diff --git a/.gitignore b/.gitignore
index 9d612c1122..1d7f52cf87 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,3 +32,7 @@ gha-creds-*.json
# macOS
.DS_Store
gitops-server.dockerfile
+
+# Ignore social cards cache
+userdocs/.cache/*
+userdocs/site/*
\ No newline at end of file
diff --git a/userdocs/README.md b/userdocs/README.md
new file mode 100644
index 0000000000..6bcffd32f5
--- /dev/null
+++ b/userdocs/README.md
@@ -0,0 +1,18 @@
+# Writing and publishing user docs
+
+The user docs are written in [MkDocs](https://www.mkdocs.org/) using [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/). Install `mkdocs` by following instructions [here](https://squidfunk.github.io/mkdocs-material/getting-started/)
+
+## Commands
+
+`cd userdocs` then you can run:
+
+* `mkdocs serve` - Start the live-reloading docs server.
+* `mkdocs build` - Build the documentation site.
+* `mkdocs -h` - Print help message and exit.
+
+## Project layout
+
+ mkdocs.yml # The configuration file.
+ docs/
+ index.md # The documentation homepage.
+ ... # Other markdown pages, images and other files.
diff --git a/userdocs/docs/assets/dashboards/explorer.json b/userdocs/docs/assets/dashboards/explorer.json
new file mode 100644
index 0000000000..9d27ee6930
--- /dev/null
+++ b/userdocs/docs/assets/dashboards/explorer.json
@@ -0,0 +1,1200 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "description": "weave gitops explorer metrics",
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 3,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 16,
+ "panels": [],
+ "title": "SLOs",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "red",
+ "value": null
+ },
+ {
+ "color": "green",
+ "value": 99
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 1
+ },
+ "id": 17,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/v1/query\", code=\"200\"}[30m])) * 100 / sum(rate(http_request_duration_seconds_count{handler=\"/v1/query\"}[30m]))",
+ "legendFormat": "total",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Availability",
+ "type": "stat"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "id": 6,
+ "panels": [],
+ "title": "Query",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/v1/query\"}[2m]))",
+ "legendFormat": "total",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(http_request_duration_seconds_count{handler=\"/v1/query\",code!~\"2..\"}[2m]))",
+ "hide": false,
+ "legendFormat": "errors",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Query Requests Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:1215",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:1216",
+ "format": "short",
+ "logBase": 1
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 7
+ },
+ "hiddenSeries": false,
+ "id": 1,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(http_request_duration_seconds_sum{code=\"200\",handler=\"/v1/query\",method=\"POST\"}[2m])) / sum(rate(http_request_duration_seconds_count{code=\"200\",handler=\"/v1/query\",method=\"POST\"}[2m]))",
+ "legendFormat": "200s",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Query Requests Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:923",
+ "format": "s",
+ "label": "Latency",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:924",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 10,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(datastore_latency_seconds_count{action=~\"Get.*\"}[2m])) by (action)",
+ "legendFormat": "{{action}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Datastore Read Request Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short"
+ },
+ {
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 12
+ },
+ "hiddenSeries": false,
+ "id": 11,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": true,
+ "min": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(datastore_latency_seconds_sum{action=~\"Get.*\",status=\"success\"}[2m])) / sum(rate(datastore_latency_seconds_count{action=~\"Get.*\",status=\"success\"}[2m]))\n",
+ "legendFormat": "success",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Datastore Read Requests Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:725",
+ "format": "s",
+ "label": "Latency",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:726",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "hiddenSeries": false,
+ "id": 13,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(irate(indexer_latency_seconds_count[2m])) by (action)",
+ "legendFormat": "{{action}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Indexer Read Request Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short"
+ },
+ {
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "hiddenSeries": false,
+ "id": 19,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(indexer_latency_seconds_sum{status=\"success\"}[2m])) / sum(rate(indexer_latency_seconds_count{status=\"success\"}[2m]))\n",
+ "legendFormat": "success",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Indexer Read Requests Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:725",
+ "format": "s",
+ "label": "Latency",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:726",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 22
+ },
+ "id": 7,
+ "panels": [],
+ "title": "Collector",
+ "type": "row"
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "hiddenSeries": false,
+ "id": 20,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "collector_cluster_watcher{collector=\"objects\"}",
+ "legendFormat": "{{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Objects Cluster Watchers ",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:1215",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:1216",
+ "format": "short",
+ "logBase": 1
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {},
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "hiddenSeries": false,
+ "id": 21,
+ "legend": {
+ "alignAsTable": true,
+ "avg": false,
+ "current": true,
+ "max": false,
+ "min": false,
+ "rightSide": true,
+ "show": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "collector_cluster_watcher{collector=\"roles\"}",
+ "legendFormat": "{{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "RBAC Cluster Watchers",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:1215",
+ "format": "short",
+ "logBase": 1,
+ "show": true
+ },
+ {
+ "$$hashKey": "object:1216",
+ "format": "short",
+ "logBase": 1
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 28
+ },
+ "hiddenSeries": false,
+ "id": 12,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(irate(datastore_latency_seconds_count{action=~\"Store.*\"}[2m])) by (action)",
+ "legendFormat": "{{action}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Datastore Write Request Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:384",
+ "format": "short"
+ },
+ {
+ "$$hashKey": "object:385",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 28
+ },
+ "hiddenSeries": false,
+ "id": 14,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(datastore_latency_seconds_sum{action=~\"Store.*\",status=\"success\"}[2m])) / sum(rate(datastore_latency_seconds_count{action=~\"Store.*\",status=\"success\"}[2m]))\n",
+ "legendFormat": "success",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Datastore Write Requests Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:725",
+ "format": "s",
+ "label": "Latency",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:726",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "hiddenSeries": false,
+ "id": 22,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(irate(indexer_latency_seconds_count{action=~\"Add|Remove.*\"}[2m])) by (action)",
+ "legendFormat": "{{action}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Indexer Write Request Rate",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:384",
+ "format": "short"
+ },
+ {
+ "$$hashKey": "object:385",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "prometheus"
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "hiddenSeries": false,
+ "id": 23,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "10.0.2",
+ "pointradius": 2,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P1809F7CD0C75ACF3"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(indexer_latency_seconds_sum{action=~\"Add|Remove.*\",status=\"success\"}[2m])) / sum(rate(indexer_latency_seconds_count{action=~\"Add|Remove.*\",status=\"success\"}[2m]))\n",
+ "legendFormat": "success",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeRegions": [],
+ "title": "Indexer Write Requests Duration",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "mode": "time",
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "$$hashKey": "object:725",
+ "format": "s",
+ "label": "Latency",
+ "show": true
+ },
+ {
+ "$$hashKey": "object:726",
+ "format": "short"
+ }
+ ],
+ "yaxis": {
+ "align": true
+ }
+ }
+ ],
+ "refresh": "5s",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Explorer",
+ "uid": "Lp7_c9UVk",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/userdocs/docs/assets/example-enterprise-helm.yaml b/userdocs/docs/assets/example-enterprise-helm.yaml
new file mode 100644
index 0000000000..c5107f22e4
--- /dev/null
+++ b/userdocs/docs/assets/example-enterprise-helm.yaml
@@ -0,0 +1,48 @@
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+spec:
+ interval: 60m
+ secretRef:
+ name: weave-gitops-enterprise-credentials
+ url: https://charts.dev.wkp.weave.works/releases/charts-v3
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: weave-gitops-enterprise
+ namespace: flux-system
+spec:
+ chart:
+ spec:
+ interval: 65m
+ chart: mccp
+ sourceRef:
+ kind: HelmRepository
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+ version: 0.x.x
+ install:
+ crds: CreateReplace
+ upgrade:
+ crds: CreateReplace
+ interval: 50m
+ values:
+ # -- Configure TLS settings if needed
+ # tls:
+ # -- Can be disabled if TLS is handled by a user-provided ingress controller
+ # enabled: true
+ # -- optionally specify a TLS secret
+ # secretName: null
+ config:
+ capi:
+ repositoryURL: https://github.com/$GITHUB_USER/fleet-infra
+ # -- Can be changed depending on your git repo structure
+ # repositoryPath: ./clusters/management/clusters
+ # repositoryClustersPath: ./cluster
+ git:
+ type: github
+ # -- Change if using on-prem github/gitlab
+ # hostname: https://github.com
diff --git a/userdocs/docs/assets/templates/capd-template.yaml b/userdocs/docs/assets/templates/capd-template.yaml
new file mode 100644
index 0000000000..96e687afbe
--- /dev/null
+++ b/userdocs/docs/assets/templates/capd-template.yaml
@@ -0,0 +1,162 @@
+apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+ name: cluster-template-development
+ namespace: default
+ annotations:
+ templates.weave.works/add-common-bases: "true"
+ templates.weave.works/inject-prune-annotation: "true"
+ labels:
+ weave.works/template-type: cluster
+spec:
+ description: A simple CAPD template
+ params:
+ - name: CLUSTER_NAME
+ required: true
+ description: This is used for the cluster naming.
+ - name: NAMESPACE
+ description: Namespace to create the cluster in
+ - name: KUBERNETES_VERSION
+ description: Kubernetes version to use for the cluster
+ options: ["1.19.11", "1.21.1", "1.22.0", "1.23.3"]
+ - name: CONTROL_PLANE_MACHINE_COUNT
+ description: Number of control planes
+ options: ["1", "2", "3"]
+ - name: WORKER_MACHINE_COUNT
+ description: Number of worker machines
+ resourcetemplates:
+ - content:
+ - apiVersion: gitops.weave.works/v1alpha1
+ kind: GitopsCluster
+ metadata:
+ name: "${CLUSTER_NAME}"
+ namespace: "${NAMESPACE}"
+ labels:
+ weave.works/capi: bootstrap
+ spec:
+ capiClusterRef:
+ name: "${CLUSTER_NAME}"
+ - apiVersion: cluster.x-k8s.io/v1beta1
+ kind: Cluster
+ metadata:
+ name: "${CLUSTER_NAME}"
+ namespace: "${NAMESPACE}"
+ labels:
+ cni: calico
+ spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ serviceDomain: cluster.local
+ services:
+ cidrBlocks:
+ - 10.128.0.0/12
+ controlPlaneRef:
+ apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ name: "${CLUSTER_NAME}-control-plane"
+ namespace: "${NAMESPACE}"
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerCluster
+ name: "${CLUSTER_NAME}"
+ namespace: "${NAMESPACE}"
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerCluster
+ metadata:
+ name: "${CLUSTER_NAME}"
+ namespace: "${NAMESPACE}"
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerMachineTemplate
+ metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+ namespace: "${NAMESPACE}"
+ spec:
+ template:
+ spec:
+ extraMounts:
+ - containerPath: /var/run/docker.sock
+ hostPath: /var/run/docker.sock
+ - apiVersion: controlplane.cluster.x-k8s.io/v1beta1
+ kind: KubeadmControlPlane
+ metadata:
+ name: "${CLUSTER_NAME}-control-plane"
+ namespace: "${NAMESPACE}"
+ spec:
+ kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ certSANs:
+ - localhost
+ - 127.0.0.1
+ - 0.0.0.0
+ controllerManager:
+ extraArgs:
+ enable-hostpath-provisioner: "true"
+ initConfiguration:
+ nodeRegistration:
+ criSocket: /var/run/containerd/containerd.sock
+ kubeletExtraArgs:
+ cgroup-driver: cgroupfs
+ eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
+ joinConfiguration:
+ nodeRegistration:
+ criSocket: /var/run/containerd/containerd.sock
+ kubeletExtraArgs:
+ cgroup-driver: cgroupfs
+ eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
+ machineTemplate:
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerMachineTemplate
+ name: "${CLUSTER_NAME}-control-plane"
+ namespace: "${NAMESPACE}"
+ replicas: "${CONTROL_PLANE_MACHINE_COUNT}"
+ version: "${KUBERNETES_VERSION}"
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerMachineTemplate
+ metadata:
+ name: "${CLUSTER_NAME}-md-0"
+ namespace: "${NAMESPACE}"
+ spec:
+ template:
+ spec: {}
+ - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ metadata:
+ name: "${CLUSTER_NAME}-md-0"
+ namespace: "${NAMESPACE}"
+ spec:
+ template:
+ spec:
+ joinConfiguration:
+ nodeRegistration:
+ kubeletExtraArgs:
+ cgroup-driver: cgroupfs
+ eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%
+ - apiVersion: cluster.x-k8s.io/v1beta1
+ kind: MachineDeployment
+ metadata:
+ name: "${CLUSTER_NAME}-md-0"
+ namespace: "${NAMESPACE}"
+ spec:
+ clusterName: "${CLUSTER_NAME}"
+ replicas: "${WORKER_MACHINE_COUNT}"
+ selector:
+ matchLabels: null
+ template:
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
+ kind: KubeadmConfigTemplate
+ name: "${CLUSTER_NAME}-md-0"
+ namespace: "${NAMESPACE}"
+ clusterName: "${CLUSTER_NAME}"
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
+ kind: DockerMachineTemplate
+ name: "${CLUSTER_NAME}-md-0"
+ namespace: "${NAMESPACE}"
+ version: "${KUBERNETES_VERSION}"
diff --git a/userdocs/docs/backstage.md b/userdocs/docs/backstage.md
new file mode 100644
index 0000000000..7de3f227e4
--- /dev/null
+++ b/userdocs/docs/backstage.md
@@ -0,0 +1,135 @@
+---
+title: Backstage Plugin for Flux
+---
+
+Are you running [Backstage](https://backstage.io) and [Flux](https://fluxcd.io)? Do you want to expose the state of your Flux resources in your Backstage portal?
+
+The `@weaveworksoss/backstage-plugin-flux` Backstage plugin provides a set of components that you can add to your existing Backstage app to display the state of Flux resources.
+
+## Installation
+
+We provide the full installation instructions in the plugin [repository](https://github.com/weaveworks/weaveworks-backstage/tree/main/plugins/backstage-plugin-flux). But first you will need to install the [Kubernetes plugin](https://backstage.io/docs/features/kubernetes/) and configure it to access the clusters you want to query Flux resources from.
+
+You will need to install the plugin to your frontend app:
+
+```console
+# From your Backstage root directory
+yarn add --cwd packages/app @weaveworksoss/backstage-plugin-flux
+```
+
+Then add the components you want to your [EntityPage](https://backstage.io/docs/plugins/integrating-plugin-into-software-catalog/#import-your-plugin-and-embed-in-the-entities-page).
+
+Currently, the Backstage plugin provides the following components:
+
+- EntityFluxDeploymentsCard - shows a combined view of HelmReleases and Kustomizations
+- EntityFluxSourcesCard - shows a combined view of GitRepositories, OCIRepositories and HelmRepositories
+- EntityFluxHelmReleasesCard
+- EntityFluxKustomizationsCard
+- EntityFluxGitRepositoriesCard
+- EntityFluxOCIRepositoriesCard
+- EntityFluxHelmRepositoriesCard
+
+For example, to add the `EntityFluxHelmReleasesCard` to your Entity home page for components with the `backstage.io/kubernetes-id` entity annotation.
+
+```tsx title="packages/app/src/components/catalog/EntityPage.tsx"
+import {
+ EntityFluxHelmReleasesCard,
+} from '@weaveworksoss/backstage-plugin-flux';
+import { isKubernetesAvailable } from '@backstage/plugin-kubernetes';
+
+const overviewContent = (
+ Packages: Package v1alpha1 contains API Schema definitions for the gitopssets v1alpha1 API group GitOpsSet is the Schema for the gitopssets API Suspend tells the controller to suspend the reconciliation of this
+GitOpsSet. Generators generate the data to be inserted into the provided templates. Templates are a set of YAML templates that are rendered into resources
+from the data supplied by the generators. The name of the Kubernetes service account to impersonate
+when reconciling this Kustomization.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ APIClientGenerator defines a generator that queries an API endpoint and uses
+that to generate data. The interval at which to poll the API endpoint. This is the API endpoint to use. Method defines the HTTP method to use to talk to the endpoint. JSONPath is string that is used to modify the result of the API
+call. This can be used to extract a repeating element from a response.
+https://kubernetes.io/docs/reference/kubectl/jsonpath/ HeadersRef allows optional configuration of a Secret or ConfigMap to add
+additional headers to an outgoing request. For example, a Secret with a key Authorization: Bearer abc123 could be
+used to configure an authorization header. Body is set as the body in a POST request. If set, this will configure the Method to be POST automatically. SingleElement means generate a single element with the result of the API
+call. When true, the response must be a JSON object and will be returned as a
+single element, i.e. only one element will be generated containing the
+entire object. Reference to Secret in same namespace with a field “caFile” which
+provides the Certificate Authority to trust when making API calls.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ClusterGenerator defines a generator that queries the cluster API for
+relevant clusters. Selector is used to filter the clusters that you want to target. If no selector is provided, no clusters will be matched.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ConfigGenerator loads a referenced ConfigMap or
+Secret from the Cluster and makes it available as a resource. Kind of the referent. Name of the referent.
+(Appears on:
+GitOpsSetSpec)
+ GitOpsSetGenerator is the top-level set of generators for this GitOpsSet.
+(Appears on:
+MatrixGenerator)
+ GitOpsSetNestedGenerator describes the generators usable by the MatrixGenerator.
+This is a subset of the generators allowed by the GitOpsSetGenerator because the CRD format doesn’t support recursive declarations. Name is an optional field that will be used to prefix the values generated
+by the nested generators, this allows multiple generators of the same
+type in a single Matrix generator.
+(Appears on:
+GitOpsSet)
+ GitOpsSetSpec defines the desired state of GitOpsSet Suspend tells the controller to suspend the reconciliation of this
+GitOpsSet. Generators generate the data to be inserted into the provided templates. Templates are a set of YAML templates that are rendered into resources
+from the data supplied by the generators. The name of the Kubernetes service account to impersonate
+when reconciling this Kustomization.
+(Appears on:
+GitOpsSet)
+ GitOpsSetStatus defines the observed state of GitOpsSet
+(Members of ObservedGeneration is the last observed generation of the HelmRepository
+object. Conditions holds the conditions for the GitOpsSet Inventory contains the list of Kubernetes resource object references that
+have been successfully applied
+(Appears on:
+GitOpsSetSpec)
+ GitOpsSetTemplate describes a resource to create Repeat is a JSONPath string defining that the template content should be
+repeated for each of the matching elements in the JSONPath expression.
+https://kubernetes.io/docs/reference/kubectl/jsonpath/ Content is the YAML to be templated and generated.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ GitRepositoryGenerator generates from files in a Flux GitRepository resource. RepositoryRef is the name of a GitRepository resource to be generated from. Files is a set of rules for identifying files to be parsed. Directories is a set of rules for identifying directories to be
+generated.
+(Appears on:
+APIClientGenerator)
+ HeadersReference references either a Secret or ConfigMap to be used for
+additional request headers. The resource kind to get headers from. Name of the resource in the same namespace to apply headers from.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ImagePolicyGenerator generates from the ImagePolicy. PolicyRef is the name of a ImagePolicy resource to be generated from.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ListGenerator generates from a hard-coded list.
+(Appears on:
+GitOpsSetGenerator)
+ MatrixGenerator defines a matrix that combines generators.
+The matrix is a cartesian product of the generators. Generators is a list of generators to be combined. SingleElement means generate a single element with the result of the
+merged generator elements. When true, the matrix elements will be merged to a single element, with
+whatever prefixes they have.
+It’s recommended that you use the Name field to separate out elements.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ OCIRepositoryGenerator generates from files in a Flux OCIRepository resource. RepositoryRef is the name of a OCIRepository resource to be generated from. Files is a set of rules for identifying files to be parsed. Directories is a set of rules for identifying directories to be
+generated.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ PullRequestGenerator defines a generator that queries a Git hosting service
+for relevant PRs. The interval at which to check for repository updates. Determines which git-api protocol to use. This is the API endpoint to use. This should be the Repo you want to query.
+e.g. my-org/my-repo Reference to Secret in same namespace with a field “password” which is an
+auth token that can query the Git Provider API. Labels is used to filter the PRs that you want to target.
+This may be applied on the server. Fork is used to filter out forks from the target PRs if false,
+or to include forks if true
+(Appears on:
+GitRepositoryGenerator,
+OCIRepositoryGenerator)
+ RepositoryGeneratorDirectoryItem stores the information about a specific
+directory to be generated from.
+(Appears on:
+GitRepositoryGenerator,
+OCIRepositoryGenerator)
+ RepositoryGeneratorFileItem defines a path to a file to be parsed when generating. Path is the name of a file to read and generate from can be JSON or YAML.
+(Appears on:
+GitOpsSetStatus)
+ ResourceInventory contains a list of Kubernetes resource object references that have been applied by a Kustomization. Entries of Kubernetes resource object references.
+(Appears on:
+ResourceInventory)
+ ResourceRef contains the information necessary to locate a resource within a cluster. ID is the string representation of the Kubernetes resource object’s metadata,
+in the format ‘namespace_name_group_kind’. Version is the API version of the Kubernetes resource object’s kind. This page was automatically generated with Are you running Backstage and Flux? Do you want to expose the state of your Flux resources in your Backstage portal? The We provide the full installation instructions in the plugin repository. But first you will need to install the Kubernetes plugin and configure it to access the clusters you want to query Flux resources from. You will need to install the plugin to your frontend app: Then add the components you want to your EntityPage. Currently, the Backstage plugin provides the following components: For example, to add the When you view components with the correct annotation: This will query across your configured clusters for Instead of displaying the state on the overview page, it's possible to compose a page displaying the state of resources. For example, to add a page You can connect the plugin to your Weave GitOps installation through your config: NOTE: The plugin will generate URLs relative to this URL and link to them from the displayed resources. Weave GitOps templates describe the properties of your cluster—how many nodes, what version of Kubernetes, etc. The identity refers to which account will be used to create the cluster. When you render a template, you may want to set the credentials to be used for this cluster—for example, if the cost is allocated to a specific team. The rendered resource can be automatically configured with the selected credentials. Credentials are injected into the following resources: * AWSCluster, AWSManagedControlPlane * AzureCluster, AzureManagedCluster * VSphereCluster If no credentials are selected, no changes will be applied, and the credentials used by your CAPI controller will be used as the default. In our cluster we have the template: and the identity We can select Weave GitOps to use the The resulting definition will have the identity injected into the appropriate place in the template, for this example: The supported providers implement multi-tenancy by setting an Weave GitOps will search all namespaces in the cluster for potential identities that can be used to create a cluster. The following identity We'll use this page to help you move past common troublesome situations. To authenticate using Git during the pull request creation, you will need to select the Git repository where you'll create the pull request. Depending on the action performed on the resource (creation/deletion/editing), the default Git repository selected in the UI is determined in the following order: the repository used to initially create the resource found in the the first repository found with a the flux-system repository the first repository in the list of Git repositories that the user has access to. In the case of deletion and editing, if the resource repository is found amongst the Git repositories that the user has access to, it will be preselected and the selection will be disabled. If it is not found, you can choose a new repository. In the case of tenants, we recommend adding the The system will try and automatically calculate the correct HTTPS API endpoint to create a pull request against. For example, if the Git repository URL is However, it is not always possible to accurately derive this URL. An override can be specified to set the correct URL instead. For example, the SSH URL may be In this case, we set the override via the The pull request will then be created against the correct HTTPS API. The above also applies to application creation. Weave GitOps Enterprise can leverage Cluster API providers to enable leaf cluster creation. Cluster API provides declarative APIs, controllers, and tooling to manage the lifecycle of Kubernetes clusters across a large number of infrastructure providers. Cluster API custom resource definitions (CRDs) are platform-independent as each provider implementation handles the creation of virtual machines, VPCs, networks, and other required infrastructure parts—enabling consistent and repeatable cluster deployments. As an AWS advanced technology partner, Weaveworks has been working tirelessly to ensure that deploying EKS anywhere is smooth and removes the barriers to application modernization. You'll need to install the following software before continuing with these instructions: Some Cluster API providers allow you to choose the account or identity that the new cluster will be created with. This is often referred to as Multi-tenancy in the CAPI world. Weave GitOps currently supports: When a cluster is provisioned, by default it will reconcile all the manifests in To display Applications and Sources in the UI we need to give the logged in user permissions to inspect the new cluster. Adding common RBAC rules to import WegoAdmin from "!!raw-loader!./assets/rbac/wego-admin.yaml"; <CodeBlock title="clusters/bases/rbac/wego-admin.yaml" className="language-yaml" {WegoAdmin} To do this, go to Weaveworks' Profiles Catalog. See CAPI Templates page for more details on this topic. Once we load a template we can use it in the UI to create clusters! import CapaTemplate from "!!raw-loader!./assets/templates/capa-template.yaml"; Download the template below to your config repository path, then commit and push to your Git origin. <CodeBlock title="clusters/management/apps/capi/templates/capa-template.yaml" className="language-yaml" {CapaTemplate} This step ensures that Flux gets installed into your cluster. Create a cluster bootstrap config as follows: import CapiGitopsCDC from "!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml"; Download the config with: Then update the <CodeBlock title="clusters/management/capi/boostrap/capi-gitops-cluster-bootstrap-config.yaml" className="language-yaml" {CapiGitopsCDC} Here are the steps: Note that you can't apply an empty repository to a cluster. If you have Cluster API clusters and other manifests committed to this repository, and then delete all of them so there are zero manifests left, then the apply will fail and the resources will not be removed from the cluster. A workaround is to add a dummy ConfigMap back to the Git repository after deleting everything else so that there is at least one manifest to apply. If you do not need CAPI-based cluster management support, you can disable CAPI via the Helm Chart values. Update your Weave GitOps Enterprise And that's it! In line with the mantra “cattle, not pets,” Weave GitOps Enterprise (WGE) simplifies managing cluster lifecycle at scale—even massive scale. Through pull requests, which make every action recorded and auditable, WGE makes it possible for teams to create, update, and delete clusters across entire fleets. Breaking things is harder, and recovery is easier. WGE further simplifies the cluster lifecycle management process by providing both a user interface (UI) and a command line interface (CLI) to interact with and manage clusters on-prem, across clouds, and in hybrid environments. You can even use our UI to delete clusters—all it takes is the press of a button that spins up a pull request. WGE fully supports a range of options, including: - Crossplane integration - Terraform integration, with a Terraform Controller that follows the patterns established by Flux - Cluster API The Weave GitOps Enterprise UI enables you to install software packages to your bootstrapped cluster via the Applications view of our user interface, using a Helm chart (via a HelmRelease) or Kustomization. First, find the "Add an Application" button: A form will appear, asking you to select the target cluster where you want to add your Application. Select the source type of either your Git repository or your Helm repository from the selected cluster: If you select Git repository as the source type, you will be able to add the Application from Kustomization: If you select Helm repository as the source type, you will be able to add Application from HelmRelease. And if you choose the profiles Helm chart repository URL, you can select a profile from our Profiles list. Finally, you can create a pull request to your target cluster and see it on your GitOps repository. Our user guide provides two pathways to deployment: Just click the option you want to get started with, and let's go. import CodeBlock from "@theme/CodeBlock"; import BrowserOnly from "@docusaurus/BrowserOnly"; You do not need Cluster API to add your Kubernetes cluster to Weave GitOps Enterprise. The only thing you need is a secret containing a valid If you already have a If you have a kubeconfig, but it is not yet stored in your management cluster, load it into the cluster using this command: Here's how to create a kubeconfig secret. This will allow WGE to introspect the cluster for available namespaces. Once we know what namespaces are available we can test whether the logged in user can access them via impersonation. ( Then, run the following command to get the service account token: Obtain the cluster certificate (CA). How you do this depends on your cluster. AKS: Visit the Azure user docs for more information. You'll need to copy the contents of the certificate into the Update the following fields: CLUSTER_NAME: insert the name of your cluster—i.e., TOKEN: add the token of the service account retrieved in the previous step Finally, create a secret for the generated kubeconfig in the WGE management cluster: This step ensures that Flux gets installed into your cluster. Create a cluster bootstrap config as follows: import CapiGitopsCDC from "!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml"; Download the config with: Then update the <CodeBlock title="clusters/management/capi/boostrap/capi-gitops-cluster-bootstrap-config.yaml" className="language-yaml" {CapiGitopsCDC} To connect your cluster, you need to add some common RBAC rules into the To display Applications and Sources in the UI, we need to give the logged-in user the permission to inspect the new cluster. Adding common RBAC rules to import WegoAdmin from "!!raw-loader!./assets/rbac/wego-admin.yaml"; <CodeBlock title="clusters/bases/rbac/wego-admin.yaml" className="language-yaml" {WegoAdmin} When a To use the Weave GitOps Enterprise user interface (UI) to inspect the Applications and Sources running on the new cluster, you'll need permissions. We took care of this above when we stored your RBAC rules in Save these two files in your Git repository, then commit and push. Once Flux has reconciled the cluster, you can inspect your Flux resources via the UI! To test that your kubeconfig secret is correctly set up, apply the following manifest and check the logs after the job completes: In the manifest above, Other documentation that you might find useful: BEFORE YOU START The following instructions require you to make minor changes to the content of your own hosted Helm repository. To put it simply, Profiles are Helm charts. To create a Profile, you need to add an annotation to a Helm chart. A very simple Helm chart marked up as a Profile looks like this: Alternatively, you can annotate a Flux This will ensure that all charts in the Profile layers are a mechanism for loosely defining dependencies between Profiles. To add a layer to a Profile chart: When multiple Profiles are specified in an API call, with layers in the API request then the set of layers is sorted, reversed, and configured as dependencies using Flux's dependsOn mechanism. The scope of the If only one chart is being installed, no If several charts are installed in the same layer, then the preceeding layer charts will be configured to depend on all the charts in the succeeding layer. You can add your Profiles to a remote repository that can be referenced using a HelmRepository resource. The repository can be either public or private. Using a private repo requires a few extra steps. In this example, a public repo and branch is referenced directly where the Helm releases are: To use private repositories with restricted access, you can use a secret synced to the target leaf cluster. SecretSync references the secret as Once the SecretSync and Secret are available, the secret can be directly referenced in the HelmRepository object: Note: The WGE inspects the namespace in the management cluster where it is deployed, and looks for a When creating a cluster from the UI using a CAPI template, these Profiles are available for selection in the As shown above, some Profiles are optional, while others are required. This is determined when the template is authored and allows for operations teams to control which Helm packages should be installed on new clusters by default. To enable editing of the yaml values for required Profiles, add the Ready for more GitOps? To purchase an entitlement to Weave GitOps Enterprise, please contact sales@weave.works. Weave GitOps Enterprise provides ops teams with an easy way to assess the health of multiple clusters in a single place. It shows cluster information such as Kubernetes version and number of nodes and provides details about the GitOps operations on those clusters, such as Git repositories and recent commits. Additionally, it aggregates Prometheus alerts to assist with troubleshooting. If you have already purchased your entitlement, head to the installation page. In addition to the features in the OSS edition, Weave GitOps Enterprise offers the following capabilities, taking your delivery from simple Continuous Delivery to Internal Developer Platform: Weave GitOps Enterprise (WGE) simplifies cluster lifecycle management at scale—even massive scale. Through pull requests, which make every action recorded and auditable, WGE makes it possible for teams to create, update, and delete clusters across entire fleets. WGE further simplifies the process by providing both a user interface (UI) and a command line interface (CLI) for teams to interact with and manage clusters on-prem, across clouds, and in hybrid environments. WGE works with Terraform, Crossplane, and any Cluster API provider. Add policy as code to GitOps pipelines and enforce security and compliance, application resilience and coding standards from source to production. Validate policy conformance at every step in the software delivery pipeline: commit, build, deploy and run time. Deploy into production environments safely using canary, blue/green deployment, and A/B strategies. Simple, single-file configuration defines success rollback. Measure Service Level Objectives (SLOs) using observability metrics from Prometheus, Datadog, New Relic, and others. Rollout new software from development to production. Environment rollouts that work with your existing CI system. Allow DevOps teams to work seamlessly together with multi-tenancy, total RBAC control, and policy enforcement, with integration to enterprise IAM. Component profiles enable teams to deploy standard services quickly, consistently and reliably. Teams can curate the profiles that are available within their estate ensuring there is consistency everywhere. Using GitOps it's easy to guarantee the latest, secure versions of any component are deployed in all production systems. Gain a single view of the health and state of the cluster and its workloads. Monitor deployments and alert on policy violations across apps and clusters. Reduce complexity with GitOps and install across all major target environments including support for on-premise, edge, hybrid, and multi-cloud Kubernetes clusters. Your business and workloads operate around the clock, and so do we. Whenever you have a problem, our experts are there to help. We’ve got your back! From wikipedia An air gap, air wall, air gapping or disconnected network is a network security measure employed on one or more computers to ensure that a secure computer network is physically isolated from unsecured networks, such as the public Internet or an unsecured local area network... This document guides on how to install Weave GitOps Enterprise (WGE) in a restricted environment. There are multiple restrictions that could happen within an air-gapped environment. This guide assumes that you have egress network restrictions. In order to install WGE, the required artifacts must be loaded from a private registry. This guide helps you with the task to identity the Helm charts and container images required to install WGE and to load them into your private registry. It also assumes that you could prepare the installation from a proxy host. A proxy host is defined here as a computer that is able to access to both the public and private network. It could take different shapes, for example, it could be a bastion host, a corp laptop, etc. Access to both public and private network is required during the airgap installation but not simultaneously. It is expected to have an online stage to gather the artifacts first, and an offline stage later, to load the artifacts in the private network. Finally, we aim to provide an end to end example to use it as a guidance more than a recipe. Feel free to adapt the details that do not fit within your context. There are different variations of the following stages and conditions. We consider that installing WGE in an air-gapped environment could follow the following stages. The main goal of this stage is to recreate a local WGE within your context, to collect the container images and Helm charts, that will be required in your private registry for the offline installation. A three-step setup is followed. There are many possible configurations for this host. This guide will assume that the host has installed the following: Create a kind cluster with registry following this guide You could just use We are going to install ChartMuseum via Flux. Remember to also install helm plugin cm-push. Set up access from your host. At this stage you have already a private registry for container images and helm charts. This step is to gather the artifacts and images in your local environment to push to the private registry. This would vary depending on the provider, given that we target a offline environment, most likely we are in a private cloud environment, so we will be using liquidmetal. Export these environment variables to configure your CAPI experience. Adjust them to your context. Execute the following script to generate Apply the following example manifest to deploy the Terraform Controller: Update the following manifest to your context. At this stage you should have a local management cluster with Weave GitOps Enterprise installed. You can observe the installed Helm Charts with As well as the container images: This section guides you to push installed artifacts to your private registry. Here's a Makefile to help you with each stage: The Skopeo allows you to configure a range a security features to meet your requirements. For example, configuring trust policies before pulling or signing containers before making them available in your private network. Feel free to adapt the previous script to meet your security needs. At this stage you have in your private registry both the Helm charts and container images required to install Weave GitOps Enterprise. Now you are ready to install WGE from your private registry. Follow the instructions to install WGE with the following considerations: An example of how it would look for Weave GitOps Enterprise is shown below. Indicate in the Cluster API configuration file Once you successfully create your Kubernetes cluster in Azure Marketplace, follow these steps to Install Weave GitOps Enterprise. These instructions apply to both Azure AKS and Azure ARC clusters—they'll behave in the same way. Tip If you have already installed Flux, then Azure Flux will refuse to install. Search for Weave GitOps Enterprise in the "Extensions + Applications" of the Azure Marketplace. Click the "GitOps" option. This will take you to a screen that presents a first-class item called Click GitOps => Create. Add the config name, namespace (default), scope: cluster, type (Flux v2), and continuous reconciliation option. Your entries should look like this: All of the displayed properties for the Flux objects screen are the same as what you'd supply to Flux bootstrap. If you are planning to manage or connect CAPI clusters to the WE service make sure you first install the CAPI provider. Then during the WE installation process be sure to select the "Enable CAPI support" checkbox. Contact sales@weave.works for a valid entitlements secret. This will come in the form of a file “entitlements.yaml”. Apply it to the cluster: (This section is the same as what you'll find in the main WGE install documentation.) Here we provide guidance for GitHub, GitLab, BitBucket Server, and Azure DevOps. Create a GitLab OAuth application that will request Follow the GitLab docs. The application should have at least these scopes: Add callback URLs to the application for each address the UI will be exposed on, e.g.: Save your application, taking note of the Client ID and Client Secret. Save them into the Replace values in this snippet and run: Create a new incoming application link from the BitBucket administration dashboard. You will be asked to enter a unique name and the redirect URL for the external application. The redirect URL should be set to Save your application and take note of the Client ID and Client Secret. Save them into the Replace values in this snippet and run: If the secret is already present, use the following command to update it using your default editor: Info If BitBucket Server is running on the default port (7990), make sure you include the port number in the values of the secret. For example: Navigate to VisualStudio and register a new application, as explained in the docs. Set the authorization callback URL and select which scopes to grant. Set the callback URL to Select the After creating your application, you will be presented with the application settings. Take note of the In your cluster, create a secret named Replace values in this snippet and run: WGE is now configured to ask users for authorization the next time a pull request must be created as part of using a template. Note that each user can view and manage which applications they have authorized by navigating to https://app.vsaex.visualstudio.com/me. First, install the Weave GitOps Enterprise CLI tool. To do this, you can use either brew or curl. Now, to login to the WGE UI, generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. There are several different ways to generate a bcrypt hash. Here, we'll use A validation to know it’s working: First, you'll get taken to the Weaveworks portal on the Azure platform, which provides your subscription details. Search for Weave GitOps. Pick "View private products" and choose WGE. Fill out the forms, selecting your cluster, then choose "Review and Create". Additional configuration is done through an optional ConfigMap: Apply the configuration with: Go to the "services and ingresses" tab in the Azure portal and look for signs that the UI installed. WGE will try and automatically install Flux on a new cluster. If this fails for some reason, or if you need a custom Flux installation, you can manually install it before installing WGE. Click "Next" and add: And under the "Authentication" section: Click "Next". You'll see an option to create a Kustomisation, which is optional. To create one: Click "Save". Then clicking "Next", which will give you a summary so you can review your input. Then click "Create". It will take about five minutes to deploy. You'll get to a new screen, which at the top-right shows "Notifications" and will display creation of the Flux configuration. When your deployment succeeds, go to the resource and pin to your dashboard. Then go to your terminal to see if it works in kubectl. In the terminal you'll get the GitRepository and Kustomizations. You should then get a green "succeeded" checkmark. The Kustomisations screen does not provide an option to inspect the path/target namespace—you have to supply the target Namespace in the Kustomization object. From this point, you can follow our generalized WGE installation instructions to configure TLS and log into the UI. Installing the Azure Marketplace product installs the Helm chart. Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. You could install Weave GitOps Enterprise via Each scenario is supported by an operation modes: For those seeking other scenarios or fine-grain customisation Weave GitOps Enterprise manual install would be the recommended. Before you start make sure the following requirements are met: Weave GitOps Enterprise Bootstrap functionality is available on Weave GitOps Enterprise CLI starting from version v0.35. If you haven't already, please install the latest Please use the following command to start the installation wizard of Weave GitOps Enterprise. You could run the bootstrap command in non-interactive mode by providing the required configurations as flags. The following gives you an example to get started that you could adapt to your own context For more information about the CLI configurations, check the below sections here Weave GitOps Enterprise runs on top of flux, the bootstrap CLI will check if flux is installed on the management cluster, and it will verify that it has the right version with valid git repository setup, and it is able to reconcile flux components properly. If flux is installed, but doesn't have a valid installation, the bootstrap CLI will terminate pending the fix or uninstall of current flux installation. Weave GitOps Enterprise Entitlement is your obtained license to use our product. The Entitlements file is a Kubernetes secret that contains your licence. In order for Info Disclaimer: The bootstrap CLI will ONLY use the private key to push WGE resources to your repo, and won't use it in any other way that can comprimise your repo or clusters security. The bootstrap CLI will prompt you to choose from the latest 3 versions of Weave GitOps Enterprise. You will be prompt to provide admin username and password, which will be used to access the dashboard. This will create admin secret with the credentials. If you already have previous admin credentials on your cluster, the installation will prompt you if you want to continue with the old credentials or exit and revoke them and re-run the installation. To access Weave GitOps Enterprise dashboard, you have the two following options available: After installation is successful. The CLI will print out the URL where you can access the dashboard. OIDC configuration will enable you to login with OIDC provider beside, or instead of the admin credentials. Afte the installation is complete, you will be prompt if you want to configure OIDC access. If you don't want to set it up right away, you can do it later by running To configure OIDC access, you will be asked to provide the following values: Note Please don't forget to add a new static-client on your OIDC provider settings with the redirectURI Info To purchase an entitlement to Weave GitOps Enterprise, please contact sales@weave.works. There is no need to install the open source version of Weave GitOps before installing Weave GitOps Enterprise. To get up and running with Weave GitOps Enterprise: - create a Kubernetes cluster - add your cluster to kubeconfig—which you'll get from Kubernetes—so that the kubeconfig correctly points to the management cluster - create a Git repository; in the instructions below, we refer to a To do this, you can use either brew or curl. The Your private Git repo should have a clusters/management folder that includes the manifests Flux needs to operate, and that also generates a key value pair for Flux to access the repo. At this point your Flux management cluster should be running. Take a look at the repository you created earlier. As noted above, you receive your entitlements secret by contacting sales@weave.works. Use this command to apply it to the cluster: There are two supported methods for logging in to the dashboard, that work with standard Kubernetes RBAC: - Login via an OIDC provider: recommended, as this will allow you to control permissions for existing users and groups that have already been configured to use OIDC. OIDC decouples the need to manage user lists from the application, allowing it to be managed via a central system designed for that purpose (i.e. the OIDC provider). OIDC also enables the creation of groups—either via your provider's own systems or by using a connector like Dex. - Login via a cluster user account: which is insecure, and which we only recommend for local and development environments or if you need to activate emergency access to a damaged cluster. However, it is an option if an OIDC provider is not available. You may decide to give your engineering teams access to the WGE dashboard so they can view and manage their workloads. In this case, you will want to secure dashboard access and restrict who can interact with it. Weave GitOps Enterprise integrates with your OIDC provider and uses standard Kubernetes RBAC to give you fine-grained control of the dashboard users' permissions. OIDC extends the OAuth2 authorization protocol by including an additional field (ID Token) that contains information (claims) about a user's identity. After a user successfully authenticates with the OIDC provider, Weave GitOps Enterprise uses this information to impersonate the user in any calls to the Kubernetes API. This allows cluster administrators to use RBAC rules to control access to the cluster and the dashboard. To login via your OIDC provider, create a Kubernetes secret to store the OIDC configuration. This configuration consists of the following parameters: Ensure that your OIDC provider has been set up with a client ID/secret and the dashboard's redirect URL. Create a secret named Once the HTTP server starts, unauthenticated users will have to click 'Login With OIDC Provider' to log in or use the cluster account (if configured). Upon successful authentication, the users' identities will be impersonated in any calls made to the Kubernetes API, as part of any action they take in the dashboard. By default the Helm chart will configure RBAC correctly, but we recommend reading the service account and user permissions pages to understand which actions are needed for Weave GitOps to function correctly. For some OIDC configurations, you may need to customise the requested scopes or claims. The By default, the following scopes are requested: "openid","offline_access","email","groups". The "openid" scope is mandatory for OpenID auth and will be added if not provided. The "email" and "groups" scopes are commonly used as unique identifiers in organisations. "offline_access" allows us to refresh OIDC tokens to keep login sessions alive for as long as a refresh token is valid. You can, however, change the defaults. By default, the following claims are parsed from the OpenID ID Token "email" and "groups". These are presented as the This is equivalent to configuring your Again, you can configure these from the This example uses Dex and its GitHub connector to show you how to log in to the Weave GitOps dashboard by authenticating with your GitHub account. It assumes you have already installed Weave GitOps on a Kubernetes cluster, per the instructions above, and have also enabled TLS. Dex is an identity service that uses OpenID Connect to drive authentication for other apps. There are other solutions for identity and access management, such as Keycloak. Create a namespace where you will install Dex: Get a GitHub ClientID and Client secret by creating a new OAuth application. Use An important part of the configuration is the In this example, the GitHub organisation is Based on these groups, we can bind roles to groups: In the same way, we can bind cluster roles to a group: For a static user, add Generate a static user password via the Using the "Login with OIDC Provider" button: We have to authorize the GitHub OAuth application: After that, grant access to Dex: Now we are logged in with our GitHub user and can see all of the resources we have access to: Important This is an insecure method of securing your dashboard which we only recommend for local and development environments, or if you need to activate emergency access to a damaged cluster. Note also that this mechanism only exists for a single user. You will not be able to create multiple users. Weave GitOps does not provide its own authentication mechanism. For secure and fully-featured authentication we strongly recommend using an OIDC provider, as described in the other tab. Before you log in via the emergency user account, you need to generate a bcrypt hash for your chosen password and store it as a secret in Kubernetes. There are several different ways to generate a bcrypt hash. This guide uses Generate the password by running: Now create a Kubernetes secret to store your chosen username and the password hash: You should now be able to login via the cluster user account using your chosen username and password. To change either the username or the password, recreate the Only one emergency user can be created this way. To add more users, enable an OIDC provider. By default, both a ClusterRole and Role are generated for the emergency user. Both have the same permissions, with the former being optional and the latter being bound to the These permissions give the emergency user Administrator-level powers. We do not advise leaving it active on production systems. If required, the permissions can be expanded with the To remove the emergency user as a login method, set the following values in the Helm Chart: If you are disabling an already existing emergency user, you will need to manually delete the Kubernetes Secret and any User Roles that were created on the cluster. This section covers the service account permissions for the Weave GitOps application, which the WGE UI requires to work. The default permissions will generate a cluster role that includes the permissions: These allow the pod to do three things: - Impersonate the user and operate in the cluster as them - Read the available namespaces; this is required to understand users' permissions - Read the The primary way Weave GitOps queries the Kube API is via The application, not the cluster, authenticates the user, either via the emergency cluster user credentials or OIDC. Then it makes Kube API calls on the user's behalf. This is equivalent to making a kubectl call like: Assuming the user The application itself uses get namespace permissions to pre-cache the list of available namespaces. As the user accesses resources their permissions within various namespaces is also cached to speed up future operations. The The application needs to be able to access these secrets in order to authenticate users. This section discusses the Kubernetes permissions needed by Weave GitOps application users and groups. At a minimum, a User should be bound to a Role in the For a wider scope, the User can be bound to a ClusterRole with the same set. On top of this you can add other permissions to view WGE resources like The following table lists resources that Flux works with directly. Weave GitOps needs to be able to query the CRDs that Flux uses before it can accurately display Flux state. The The Weave GitOps reads basic resources so that it can monitor the effect that Flux has on what's running. Reading Flux communicates the status of itself primarily via events. These events will show when reconciliations start and stop, whether they're successful, and information as to why they're not. The label of the OIDC button on the login screen is configurable via a feature flag environment variable. This can give your users a more familiar experience when logging in. Adjust the configuration in the Helm This section is purposefully vague as we intend to give a broad idea of how to implement such a system. The specifics will dependent on your circumstances and goals. Our general recommendation is to use OIDC and a small number of groups that Weave GitOps can impersonate. Configuring Weave GitOps to impersonate Kubernetes groups rather than users has the following benefits: - A user's permissions for impersonation by Weave GitOps can be separate from any other permissions that they may or may not have within the cluster. - Users do not have to be individually managed within the cluster and can have their permissions managed together. Assume that your company has the following people in OIDC: - Aisha, a cluster admin, who should have full admin access to Weave GitOps - Brian, lead of Team-A, who should have admin permissions to their team's namespace in Weave GitOps and read-only otherwise - June and Jo, developers in Team-A who should have read-only access to Weave GitOps You can then create three groups: Using OIDC for cluster and Weave GitOps Authentication If the same OIDC provider is used to authenticate a user with the cluster itself (e.g. for use with This can lead to unintended consequences, like viewing The yaml to configure these permissions would look roughly like: Here we provide guidance for GitHub, GitLab, BitBucket Server, and Azure DevOps. GitHub requires no additional configuration for OAuth git access Create a GitLab OAuth application that will request Follow the GitLab docs. The application should have at least these scopes: Add callback URLs to the application for each address the UI will be exposed on, e.g.: Save your application, taking note of the Client ID and Client Secret. Save them into the Replace values in this snippet and run: Create a new incoming application link from the BitBucket administration dashboard. You will be asked to enter a unique name and the redirect URL for the external application. The redirect URL should be set to Save your application and take note of the Client ID and Client Secret. Save them into the Replace values in this snippet and run: If the secret is already present, use the following command to update it using your default editor: Info If BitBucket Server is running on the default port (7990), make sure you include the port number in the values of the secret. For example: Navigate to VisualStudio and register a new application, as explained in the docs. Set the authorization callback URL and select which scopes to grant. Set the callback URL to Select the After creating your application, you will be presented with the application settings. Take note of the In your cluster, create a secret named Replace values in this snippet and run: WGE is now configured to ask users for authorization the next time a pull request must be created as part of using a template. Note that each user can view and manage which applications they have authorized by navigating to https://app.vsaex.visualstudio.com/me. By default, the WGE UI pod will listen on port It can then be accessed via port-forwarding: If you're using an ingress controller to terminate TLS you can disable it in the Helm release: Other ingress conguration changes can be made via the ingress configuration We deploy WGE via a Helm chart. We'll save and adapt the below template before committing it in Git to a Flux-reconciled path. Clone the newly created repo locally. We're gonna add some things! Download the helm-release to import ExampleWGE from "../assets/example-enterprise-helm.yaml"; import ExampleWGEContent from "!!raw-loader!../assets/example-enterprise-helm.yaml"; Once you have copied the above file, open and adjust the following configuration options: Ensure this has been set to your repository URL. By default, WGE will create new clusters in the The other important path to configure is where you'll store applications and workloads run on the new cluster. By default this is To login to the WGE UI, generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. There are several different ways to generate a bcrypt hash. Here, we'll use A validation to know it’s working: Policy agent comes packaged with the WGE chart. To install it, set the following values: Commit and push all the files Flux will reconcile the helm-release and WGE will be deployed into the cluster. You can check the Here are a couple of options for you to take your next steps with WGE. Explore one option or all of them, in no particular order. See also our guide to installing Weave GitOps Enterprise on Azure: - An Azure cluster deployed with either the Azure Portal or Azure CLI tools. - Azure Flux add-on deployed by adding a GitOps configuration, either via the Azure Portal or the CLI tool. Note that this documentation applies to both Azure AKS and Azure ARC clusters. The Azure cluster already has the Azure Flux add-on installed. This differs from CNCF Flux in that there are two additional controllers: - fluxconfig-agent - fluxconfig-controller These controllers have CRDs that define the version of Flux and any Flux Kustomizations that are managed via the Azure CLI. The CRDs are all apiVersion: clusterconfig.azure.com/v1beta1. The Kinds are: - FluxConfig - FluxConfigSyncStatus The FluxConfig Kind configures Flux itself and creates any Kustomizations that refer to a single-source GitRepository. This guide assumes that this process is already completed and that a top-level Kustomization has been configured for the fleet repo cluster directory already set up at The CRDs that this FluxConfig generates are Flux CRDs, as follows: - GitRepositories - Kustomizations These generated resources are viewable through Weave GitOps Enterprise. Weave GitOps itself is deployed by Flux using a HelmRelease that pulls the Helm Chart. It doesn’t need to install Flux, as it is assumed that Flux is already deployed. Therefore it can use the Azure Flux add-on, which poses no conflicts with WGE itself. Incompatibilities exist between the Azure Flux add-on and CNCF Flux. They should not be run at the same time, on the same cluster, due to conflicts in the CRD management. If the Flux bootstrapping process IS run on a cluster with Azure Flux add-on, it will override the Azure Flux add-on with the Flux version used in the bootstrap. Also, it would add Flux manifests to the source Git repository. This would be undesirable. Azure Flux add-on-enabled clusters keep the Azure Flux add-on in place. To join a cluster, you'll set up a service account with permissions and create a kubeconfig for the service account. This service account does not need cluster admin permissions unless you are bootstrapping Flux into the cluster. The bootstrapping process will either be A) carried out before joining the cluster to WGE; or B) configured specifically for Flux to be bootstrapped into the cluster from WGE. If you already have Flux running, you can create the service account in your fleet repo: Commit to your fleet repo to sync. Create a secret to store the kubeconfig, and a GitopsCluster object in the WGE management cluster that points to the kubeconfig secret. This allows you to connect to the target cluster and read various Kubernetes objects—including the Flux objects, such as: Kubernetes 1.24+ will not create secrets for Service Accounts for you, so you have to add it yourself. Add a new secret for the service account by adding to the service account yaml file in step 1. Create a kubeconfig secret. We'll use a helper script to generate the kubeconfig, and then save it into Create a secret for the generated kubeconfig in the WGE management cluster: You can also take care of this step in WGE's Secrets UI, setting up a a secret in SOPS or ESO. Flux CRDs are compatible with the Azure Flux Configuration CRDs. This means that there are no compatibility issues between WGE and Azure Flux. Create a GitopsCluster object. It must NOT be bootstrapped. Remove the annotation for bootstrap so it will not deploy Flux. Commit to your fleet repo and sync. Log in to your WGE management cluster to see if the cluster has appeared. MSFT maintains CAPZ, the Azure CAPI provider. Currently there is no support for Azure Flux. A CAPI-based cluster will continue to run the Flux bootstrap process on cluster creation when managed by WGE, because there is no Azure Flux option. WGE uses TF-controller to deploy Terraform resources. For WGE to use the cluster as a target requires A) a resource created in the management cluster and B) a kubeconfig that maps to a service account in the target cluster. The Terraform cluster build typically creates this service account and then outputs to a secret store or local secret so that WGE can use it as a cluster. The Flux bootstrap process can be initiated directly with the Flux Terraform module, which deploys CNCF Flux to the target cluster. Alternatively, you can apply an Azure Policy to provide the Azure Flux add-on. This is an example of how you can use the policy controls. This means you could come across clusters that are deployed with Terraform with the Azure Flux add-on already installed and would not run the Flux bootstrap process. Either way, it is typical that Terraform-deployed clusters do not run the Flux bootstrap process at all, because it is usually already installed. The Azure Flux add-on is supported under Crossplane-deployed Azure clusters. Any clusters deployed with Crossplane that have the Azure Flux add-on enabled would also be added to WGE without running the bootstrap process. Info This page details the changes for Weave GitOps Enterprise and its associated components. For Weave GitOps OSS, please see the release notes on GitHub. 2023-08-31 2023-08-17 2023-08-04 Warning This release builds upon Weave GitOps v0.29.0 that has breaking changes from Flux v2.0.0. Please make sure that you read these release notes. 2023-08-03 Danger We introduced a breaking change in this release by upgrading to Flux v2 APIs, notably Follow Flux or Weave GitOps to upgrade to Flux v2 GA before upgrading Weave GitOps Enterprise. 2023-07-20 2023-07-07 2023-06-22 2023-06-08 Bug fixes 2023-05-25 (none) 2023-05-12 2023-04-27 2023-04-13 2023-03-30 2023-03-16 2023-03-02 2023-02-16 This release contains dependency upgrades and bug fixes. For a larger list of updates, check out the Weave GitOps v0.17.0 release. 2023-02-02 No breaking changes 2023-01-19 No breaking changes 2023-01-05 [UI] "Tenant" is renamed to "Workspace" on details page. [UI] Use time.RFC3339 format for all timestamps of the workspaces tabs. [UI] Error notification boundary does not allow user to navigate away from the page. [Gitops run] GitOps Run doesn't ask to install dashboard twice No breaking changes 2022-12-22 [UI] Notifications Fixed provider page showing a 404. No breaking changes 2022-12-09 We highly recommend users of v0.11.0 upgrade to this version as it includes fixes for a number of UI issues. Supporting custom OIDC groups claims for azure/okta integration Support for OIDC custom username and group claims: Terraform CRD Error Users of the Terraform Controller will be pleased to know we’ve addressed the issue where an error would be displayed if it had not been installed on all connected clusters. Management cluster renaming If the name of the cluster where Weave GitOps Enterprise is installed, was changed from the default of management through the config.cluster.name parameter, certain workflows could fail such as fetching profiles, this has now been resolved. weave-gitops v0.12.0 cluster-controller v1.4.1 cluster-bootstrap-controller v0.3.0 (optional) pipeline-controller v0.0.11 (optional) policy-agent 2.1.1 2022-11-25 This release incorporates anonymous aggregate user behavior analytics to help us continuously improve the product. As an Enterprise customer, this is enabled by default. You can learn more about this here. We are making these changes to provide a unified and intuitive self-service experience within Weave GitOps Enterprise, removing misleading and potentially confusing terminology born from when only Clusters were backed by Templates. New API Group for the GitOpsTemplate CRD - old: After upgrading Weave GitOps Enterprise which includes the updated CRD: 1. Update all your GitOpsTemplates in Git changing all occurrences of Template Profiles / Applications / Credentials sections are hidden by default For both The default values for a profile are not fetched and included in a pull-request Prior to this release WGE would fetch the default values.yaml for every profile installed and include them in the This was an expensive operation and occasionally led to timeouts. The new behaviour is to omit the values and fall back to the defaults included in the helm-chart. This sacrifices some UX (being able to see all the defaults in the PR and tweak them) to improve performance. There should not be any final behaviour changes to the installed charts. You can still view and tweak the 2022-11-15 2022-11-10 Adds support for showing policy modes and policy configs in the UI Show suspended status on pipelines detail Align and link logo Actually remove the watcher from the helm-watcher-cache UI 1817 disable create target name space if name space is flux system Adding edit capi cluster resource acceptance test 2022-10-17 2022-09-22 If using the policy-agent included in the weave-gitops-enterprise helm chart, the configuration should now be placed under the old new Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. This page helps you to understand the options available to configure Explorer Before using Explorer, please ensure that: - You have Weave Gitops Enterprise v0.23.0 The following configuration options are available for you to setup Explorer. You should specify them in your HelmRelease values: Explorer watches the GitopsClusters that you have connected to Weave Gitops Enterprise, as well as your Management cluster. Explorer watches for the following kind resources out of the box: Weave Gitops - GitopsSets - Templates - Policy Audit Violations Explorer take a simple approach to manage resource views. It leverages a Data Store for caching the views and query them. The storage lifecycle is bounded to Weave Gitops Enterprise app and does not provide persistence guarantees. Instead, it requests data as required to the leaf clusters. In its simplest form, the data store used is SQLite. There are two main paths to consider within Explorer in the context of authentication and authorization (authN/authZ): We look into them separately. Explorer leverages existing authentication and authorization built-in the application. It identifies for a user logged in the application: its identity and the access permissions via Kuberentes RBAC. Query results are filtered honouring the access determined via RBAC. GitopsClusters define the connection and security context that Explorer leverages to collect data from leaf clusters. Given that you have followed the indications in setup RBAC, the GitopsCluster service account is able to impersonate any user or group. Tip Collector RBAC resources are part of your leaf clusters common RBAC configuration. It is commonly located in your To configure collection, you would need to extend this configuration with the following: If you want the collector to watch a particular namespace use a RoleBinding instead. Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. This guide shows you the basics steps to start using Explorer. Before using Explorer, please ensure that: Explorer is enabled via configuration through the feature flag The For a complete overview on the configuration you could see configuration. Login to Weave Gitops and Explorer will be shown in the navigation menu Explorer UI looks as follows: It has two main components: For a more detailed view on the UI you could see querying. Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. As platform engineer or as developer, your applications and platform services will likely span multiple kubernetes clusters or infrastructure components. In order to manage and operate them you require a platform capability that allows you to discover the resources from a single place. Explorer is that capability that allows any platform user to discover platform resources from a single place across all your kubernetes clusters. Explorer is better suited for journeys matching the discovery of resources across the platform resources inventory. If you have a particular resources you want to manage, weave gitops offers single resource experience for almost every resource. Explorer support all Flux Applications and Sources CRDs See Supported Kinds for more details. Now that you know what Explorer is, follow getting started to quickly have a feeling of what Explorer can do for you. Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. As platform engineer you could need to have a finer understanding on the underlying logic for Explorer. The following options are available to you to operate and troubleshoot it. It is a debugging tool to make visible explorer authorization logic. You could find it as tab You could discover by Explorer provides the following telemetry to use for operations. Explorer exports Prometheus metrics. See setup to get started. Explorer querying path is composed of three components exporting metrics: Based on go-http-metrics, the following metrics are generated. Request Duration: histogram with the latency of the HTTP requests. Response Size: histogram with the size of the HTTP responses in bytes Requests In Flight: gauge with the number of inflight requests being handled at the same time. Request Latency: histogram with the latency of the datastore read requests. Requests In Flight: gauge with the number of inflight requests being handled at the same time. Request Latency: histogram with the latency of the indexer read requests. Requests In Flight: gauge with the number of inflight requests being handled at the same time. Explorer collecting path is composed of three components exporting metrics: The following metrics are available to monitor its health. The metric Where A sum on Request Latency: histogram with the latency of the datastore write requests. Requests In Flight: gauge with the number of inflight write requests being handled at the same time. Request Latency: histogram with the latency of the indexer write requests. Requests In Flight: gauge with the number of inflight requests being handled at the same time. Use Explorer dashboard to monitor its golden signals Explorer dashboard is part of Weave GitOps Dashboards Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. Explorer recommended way to discover resources is via its search dialog. This guide provides the background to understand it and set how to use it. Every resource is normalised to the following common schema: For a The schema looks like You can open the query filter settings by clicking on the filter button: The To filter the results by cluster, kind, namespace, enable the checkbox filters: Note that the free-form terms only apply to the filtered results from the kind filter. In this case, we only match the "podinfo" string on results that are We can also "OR" filters together. Note that filters within a category are OR'd together, but terms are AND'd across categories. For example, selecting the We ❤️ your comments and suggestions as we look to make successfully adopting a cloud-native approach, to application deployment on Kubernetes with GitOps, easier and easier. There are a number of ways you can reach out: Weaveworks is utilizing Pendo, a product-analytics app, to gather anonymous user behavior analytics for both Weave GitOps and Weave GitOps Enterprise. We use this data so we can understand what you love about Weave GitOps, and areas we can improve. Weave GitOps OSS users will be notified when you create the dashboard for the first time via gitops create dashboard or when you use gitops run for the first time and decide to install the dashboard via that functionality. Analytics will not be enabled until after this notification so that you can opt out before sending analytics data. For Weave GitOps Enterprise users, this functionality is turned on by default. Further below we go into more detail about how you can control this functionality. We want to ensure that we are designing the best features, addressing the most pressing bugs, and prioritizing our roadmap appropriately for our users. Collecting analytics on our users’ behaviors gives us valuable insights and allows us to conduct analyses on user behavior within the product. This is important for us so we can make informed decisions- based on how, where and when our users use Weave GitOps - and prioritize what is most important to users like you. We’d like to understand the usage of the graph and dependency tabs within the dashboard. If users are utilizing this feature, we would like to understand the value and how we can improve that feature. However, if users aren’t using it, we can conduct research to understand why and either fix it, or come to the conclusion that it really doesn’t serve any utility and focus our efforts on more valuable features. Weave GitOps’s anonymous user and event data has a 24 month retention policy. The default value for data retention in Pendo is 7 years. For more information on Pendo’s data storage policies, click here. Weave GitOps gathers data on how the CLI and Web UI are used. There is no way for us or Pendo to connect our IDs to individual users or sites. For the CLI, we gather usage data on: For the Web UI, we gather usage data on: Weave GitOps CLI analytics are sent at startup. The dashboard analytics are sent through its execution. Both CLI and Dashboard analytics are sent to Pendo over HTTPS. The CLI code is viewable in pkg/analytics. It will ignore any errors, e.g. if you don’t have any network connection. The dashboard setup code is viewable in ui/components/Pendo.tsx - this will fetch a 3rd party javascript from Pendo’s servers. All the data collected, analytics, and feedback are for the sole purpose of creating better product experience for you and your teams. We would really appreciate it if you left the analytics on as it helps us prioritize which features to build next and what features to improve. However, if you do want to opt out of Weave GitOps’s analytics you can opt out of CLI and/or Dashboard analytics. We have created a command to make it easy to turn analytics on or off for the CLI. To disable analytics: gitops set config analytics false To enable analytics: gitops set config analytics true You need to update your helm release to remove The An example usecase would be to ensure that certain RBAC or policies are applied to all clusters using this template. The When enabled, GitOps automatically injects a The intention here is stop Flux from explicitly deleting subresources of the This is the pattern recommended in the capi-quickstart guide https://cluster-api.sigs.k8s.io/user/quick-start.html#clean-up. The Enterprise Here we're going to talk about the The Info GitOpsTemplate or CAPITemplate? The only difference between See the Weave Gitops Enterprise installation instructions for details on how to install the EE Using a local As in the UI you can add profiles to your template. However instead of reading the latest version of a profile and its layers from a This particular helm repo provides a version of the You can supply a Instead of specifying the parameters on the command line you can supply a config file. For example the above invocation can be replaced like so: ```yaml title=config.yaml template-file: capd-capi-template.yaml output-dir: ./out values: - CLUSTER_NAME=foo profiles: - name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml GitOps template objects need to be wrapped with the Tip For complete examples of widely-used templates, see the Quickstart guide. GitOps Templates were originally introduced to enable self-service operations for the the cluster creation workflow. We have since extended this capability to cover Terraform, Crossplane and general Kubernetes resources. An example template could, upon merging to a GitOps repository and reconciling in a cluster, provide a running developer environment consisting of an EKS cluster, an RDS database, and a branch and revision of the current application through single template. Templates can be loaded into the cluster by Platform Operator by adding them to the Flux-manage GitOps repository for the target cluster. Alternatively, they can be applied directly to the cluster with Info Weave GitOps will search for templates in the Template types are used by Weave GitOps to group the templates nicely in the Dashboard UI. There are 4 recommended template types: Declare this in the object manifest by using the The rendering of certain component sections in a template can be enabled or disabled with annotations. The annotation keys are of the form Supported components: Example: When rendering a template, a It can be added to any other resource by simply adding the annotation in empty form. This annotation holds information about which template generated the resource and the parameter values used as a json string. If the resource type is one of the following and has this annotation, an Example: A Use a Application developers can use a template through our GUI. The rendered template is added to their GitOps repository via a pull request. When merged and reconciled, the resources in the template are created. A resource can be a Tip A Info GitOpsTemplate or CAPITemplate? The only difference between When users have chosen a template, they will be presented with a form to complete. This form will collect the specific resource configuration which they would like applied to their instance. Resource variables, or parameters, are set by the template author in the template object manifest under Some params are required for all resources as they will be used to generate paths for the eventually rendered resources. These are: - The following metadata fields can be added for each parameter under Example: Profiles are enhanched Helm Charts which allow operators to make additional components either optional or required to developers using self-service templates. Default and required profiles can be added via the template A template with the above profiles would offer Application Developers the option to add Keys available in the Keys available in the Tip Deprecated feature Where possible please use the Profiles can also be included within templates by the Where: The templates exist as a Helm Chart in the weave-gitops-quickstart github repo. To get started, add the following Commit and merge the above file. Once the If you click on the The following pipeline templates have been made available on your Weave GitOps Enterprise instance: The above Quickstart templates are designed to provide a practical getting started experience. We encourage Platform Operators to start off with these templates within their team to ramp up on using Weave GitOps. If the need arises later, operators can always expand on these templates to develop their own set of self-service capabilities. As a developer using Weave GitOps Enterprise, use the templates to explore GitOps's capabilities. For example, to create a pipeline for your application: use the above template provided by your Operations team to create required resources. Once they have been added to your GitOps repository, you can adapt the rendered resources to meet your needs. Want to contribute? The Quickstart templates are maintained by the Weave Gitops team. If you would like to make alterations, suggest fixes, or even contribute a new template which you find cool, just head to the repo and open a new issue or PR! Template authors can configure the eventual locatation of the rendered template in the user's GitOps repository. This allows for more control over where different resources in the template are rendered. The path for rendered resources is configured via the Important to note The Example If the In this case some of the submitted params are used. Users must provide one of the following parameters: - To ensure users supply these values, set the parameters to Important The kustomization feature and the The default path for a template has a few components: - From the params: These are composed to create the path: Using the default values and supplying Resource templates are used to create Kubernetes resources. They are defined in the The The This can be useful to preserve comments or formatting in the rendered resource. Info The following templating languages are supported: - envsubst (default) - templating Declare the templating language to be used to render the template by setting Variables can be set for rendering into the template in the Templating uses text/templating for rendering, using go-templating style syntax As taken (from the Sprig library) The default delimiters for There are now multiple published versions of the template CRD. When manually migrating a template from If you experience issues with the path not being recognised when Flux reconciles the new template versions, try manually applying the new template to the cluster directly with: 1. Run As of Weave Gitops Enterprise 0.28.0 the conversion webhook has been removed. This removed the need for cert-manager to be installed, but you will now have to convert any This version changes the type of Example: The original version of the template. This version no longer works with Weave Gitops Enterprise 0.28.0 and above. It uses Example: Packages: Package v1alpha1 contains API Schema definitions for the gitopssets v1alpha1 API group GitOpsSet is the Schema for the gitopssets API Suspend tells the controller to suspend the reconciliation of this
+GitOpsSet. Generators generate the data to be inserted into the provided templates. Templates are a set of YAML templates that are rendered into resources
+from the data supplied by the generators. The name of the Kubernetes service account to impersonate
+when reconciling this Kustomization.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ APIClientGenerator defines a generator that queries an API endpoint and uses
+that to generate data. The interval at which to poll the API endpoint. This is the API endpoint to use. Method defines the HTTP method to use to talk to the endpoint. JSONPath is string that is used to modify the result of the API
+call. This can be used to extract a repeating element from a response.
+https://kubernetes.io/docs/reference/kubectl/jsonpath/ HeadersRef allows optional configuration of a Secret or ConfigMap to add
+additional headers to an outgoing request. For example, a Secret with a key Authorization: Bearer abc123 could be
+used to configure an authorization header. Body is set as the body in a POST request. If set, this will configure the Method to be POST automatically. SingleElement means generate a single element with the result of the API
+call. When true, the response must be a JSON object and will be returned as a
+single element, i.e. only one element will be generated containing the
+entire object. Reference to Secret in same namespace with a field “caFile” which
+provides the Certificate Authority to trust when making API calls.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ClusterGenerator defines a generator that queries the cluster API for
+relevant clusters. Selector is used to filter the clusters that you want to target. If no selector is provided, no clusters will be matched.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ConfigGenerator loads a referenced ConfigMap or
+Secret from the Cluster and makes it available as a resource. Kind of the referent. Name of the referent.
+(Appears on:
+GitOpsSetSpec)
+ GitOpsSetGenerator is the top-level set of generators for this GitOpsSet.
+(Appears on:
+MatrixGenerator)
+ GitOpsSetNestedGenerator describes the generators usable by the MatrixGenerator.
+This is a subset of the generators allowed by the GitOpsSetGenerator because the CRD format doesn’t support recursive declarations. Name is an optional field that will be used to prefix the values generated
+by the nested generators, this allows multiple generators of the same
+type in a single Matrix generator.
+(Appears on:
+GitOpsSet)
+ GitOpsSetSpec defines the desired state of GitOpsSet Suspend tells the controller to suspend the reconciliation of this
+GitOpsSet. Generators generate the data to be inserted into the provided templates. Templates are a set of YAML templates that are rendered into resources
+from the data supplied by the generators. The name of the Kubernetes service account to impersonate
+when reconciling this Kustomization.
+(Appears on:
+GitOpsSet)
+ GitOpsSetStatus defines the observed state of GitOpsSet
+(Members of ObservedGeneration is the last observed generation of the HelmRepository
+object. Conditions holds the conditions for the GitOpsSet Inventory contains the list of Kubernetes resource object references that
+have been successfully applied
+(Appears on:
+GitOpsSetSpec)
+ GitOpsSetTemplate describes a resource to create Repeat is a JSONPath string defining that the template content should be
+repeated for each of the matching elements in the JSONPath expression.
+https://kubernetes.io/docs/reference/kubectl/jsonpath/ Content is the YAML to be templated and generated.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ GitRepositoryGenerator generates from files in a Flux GitRepository resource. RepositoryRef is the name of a GitRepository resource to be generated from. Files is a set of rules for identifying files to be parsed. Directories is a set of rules for identifying directories to be
+generated.
+(Appears on:
+APIClientGenerator)
+ HeadersReference references either a Secret or ConfigMap to be used for
+additional request headers. The resource kind to get headers from. Name of the resource in the same namespace to apply headers from.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ImagePolicyGenerator generates from the ImagePolicy. PolicyRef is the name of a ImagePolicy resource to be generated from.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ ListGenerator generates from a hard-coded list.
+(Appears on:
+GitOpsSetGenerator)
+ MatrixGenerator defines a matrix that combines generators.
+The matrix is a cartesian product of the generators. Generators is a list of generators to be combined. SingleElement means generate a single element with the result of the
+merged generator elements. When true, the matrix elements will be merged to a single element, with
+whatever prefixes they have.
+It’s recommended that you use the Name field to separate out elements.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ OCIRepositoryGenerator generates from files in a Flux OCIRepository resource. RepositoryRef is the name of a OCIRepository resource to be generated from. Files is a set of rules for identifying files to be parsed. Directories is a set of rules for identifying directories to be
+generated.
+(Appears on:
+GitOpsSetGenerator,
+GitOpsSetNestedGenerator)
+ PullRequestGenerator defines a generator that queries a Git hosting service
+for relevant PRs. The interval at which to check for repository updates. Determines which git-api protocol to use. This is the API endpoint to use. This should be the Repo you want to query.
+e.g. my-org/my-repo Reference to Secret in same namespace with a field “password” which is an
+auth token that can query the Git Provider API. Labels is used to filter the PRs that you want to target.
+This may be applied on the server. Fork is used to filter out forks from the target PRs if false,
+or to include forks if true
+(Appears on:
+GitRepositoryGenerator,
+OCIRepositoryGenerator)
+ RepositoryGeneratorDirectoryItem stores the information about a specific
+directory to be generated from.
+(Appears on:
+GitRepositoryGenerator,
+OCIRepositoryGenerator)
+ RepositoryGeneratorFileItem defines a path to a file to be parsed when generating. Path is the name of a file to read and generate from can be JSON or YAML.
+(Appears on:
+GitOpsSetStatus)
+ ResourceInventory contains a list of Kubernetes resource object references that have been applied by a Kustomization. Entries of Kubernetes resource object references.
+(Appears on:
+ResourceInventory)
+ ResourceRef contains the information necessary to locate a resource within a cluster. ID is the string representation of the Kubernetes resource object’s metadata,
+in the format ‘namespace_name_group_kind’. Version is the API version of the Kubernetes resource object’s kind. This page was automatically generated with The gitopssets-controller can be installed in two ways: The standalone installation can be useful for leaf clusters that don't have Weave GitOps Enterprise installed. Before installing the gitopssets-controller, ensure that you've installed Flux. To install the gitopssets-controller using a Helm chart, use the following HelmRelease: After adding the Namespace, HelmRepository and HelmRelease to a Git repository synced by Flux, commit the changes to complete the installation process. Not all generators are enabled by default, this is because not all CRDs are required by the generators. You might want to enable or disable individual generators via the Helm Chart: 2023-09-06 2023-09-05 2023-08-17 2023-08-17 2023-08-17 2023-08-10 2023-07-26 2023-07-14 2023-06-26 2023-06-21 2023-06-20 2023-05-24 2023-05-10 2023-04-28 2023-04-27 2023-04-13 2023-03-30 2023-03-20 Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that some changes will be made to the API and behavior, particularly to enhance security by implementing impersonation for more fine-grained control over how the generated resources are applied. GitOpsSets enable Platform Operators to have a single definition for an application for multiple environments and a fleet of clusters. A single definition can be used to generate the environment and cluster-specific configuration. As an example, we can take an application that needs to be deployed to various environments (Dev, Test, Prod) built by a fleet of clusters. Each of those environments + clusters requires a specialized configuration powering the same Application. With GitOpsSets and the generators you just declare the template you want to use, the selector that will match the cluster of the inventory, and where to get the special configuration. GitOpsSets will create out of the single resource all the objects and Flux primitives that are required to successfully deploy this application. An operation that required the editing of hundreds of files can now be done with a single command. The initial generators that are coming with the preview release are: Currently rendering templates operates in two phases: Please read the security information below before using this. GitOpsSets can be suspended, by setting the When this is the case, updates will not be applied, no resources created or deleted. In addition, a manual reconciliation can be requested by annotating a GitOpsSet with the The simplest generator is the The elements in there are a set JSON of objects[^yaml], there are three in this example, and each of them has two keys, Other generators provide different sets of keys and values. The generators documentation below provides more information on what the other generators output. Templates are Kubernetes resources in YAML format. Each template is rendered for each element generated by the generators. The generated elements are provided to the template in the The output from all generators is exposed in the In addition to the NOTE: It's not recommended that you use this to name resources where the ordering of the queries for generating the elements is not guaranteed to be ordered, otherwise you could generate churn in resources as we look for resources by name when updating them, so, The output from a generator is an array of JSON objects[^yaml], the keys of which can contain repeating elements, either further JSON objects, or scalar values. It can be desirable to repeat a template for a repeated element in a generated value. The template Templates that use In this case, six different As with the The default delimiters for the template engine are These can be changed by adding an annotation to the Changing the delimiters can useful for: In yaml Unquoted values allow you to include objects in your templates too. With the default Again, if we quote them we would get a string value, not an object. We currently provide these generators: - list - pullRequests - gitRepository - ociRepository - matrix - apiClient - cluster - imagepolicy - config This is the simplest generator, which is a hard-coded array of JSON objects, described as YAML mappings. The When a The generator operates in two different ways, you can parse files (YAML or JSON) into Elements, or you can scan directories for subdirectories. In this example, a Flux These files can be JSON or YAML. In this example we expect to find the following structure in the files: Changes pushed to the For security reasons, you need to explicitly list out the files that the generator should parse. In this example, a Flux Each generated element has two keys, It is also possible to exclude paths from the generated list, for example, if you do not want to generate for a directory you can exclude it with: In this case, all directories that are subdirectories of Note: The directory tree detection is restricted to the same directory as the path, no recursion is done. In fact the path is treated as a Glob. The When an The This will require to make authenticated requests to your Git hosting provider e.g. GitHub, GitLab, Bitbucket etc. It does only require read-only access, but all API tokens should be guarded as carefully as possible, what is a "read-only" token today, might become a token with higher-privilege in the future. There have been many security compromises using API access tokens, do not let this happen to you! This example will poll "github.com/bigkevmcd/go-demo" for open pull requests and trigger the deployment of these by creating a Flux As the generator only queries open pull requests, when a PR is closed, the generated resources will be removed. For non-public installations, you can configure the The The Additionally labels can be provided for querying pull requests with matching labels e.g. The fields emitted by the pull-request are as follows: Create a read-only token that can list Pull Requests, and store it in a secret: The matrix generator doesn't generate resources by itself. It combines the results of generation from other generators e.g.: Given the files mentioned all have the following structure: This will result in three sets of generated parameters, which are a combination of the maps in the files in the gitRepository, and the elements in the list generator, this can result in a combinatorial explosion of resources being created in your cluster. These can be referenced in the templates, note that all keys in the merged generators from the Matrix are contained in the If you want to use two generators in a Matrix that output the same fields, they will collide, for example, the You can provide a name for the generator in the Matrix: The example above will yield: A matrix generator will normally generate a cartesian result, but you can also generate a single result. This would query for clusters matching the respective labels. The resulting output would look like this (in YAML): Compare this with the alternative without the In the If the Matrix generators are unnamed, they will be grouped under a top-level This generator is configured to poll an HTTP endpoint and parse the result as the generated values. This will poll an endpoint on the interval, instead of using the simpler to use PullRequest generator, you can access GitHub's API with the APIClient generator. The PullRequest generator is simpler to use, and works across multiple different git-providers. The GitHub documentation for the API endpoint shows: This can be translated into... As with the Pull Request generator, this also requires a secret token to be able to access the API We need to pass this as an HTTP header. The keys in the secret match the command-line example using curl. Unlike the Pull Request generator, you need to figure out the paths to the elements yourself. Not all APIs return an array of JSON objects, sometimes it's nested within a result type structure e.g. You can use JSONPath to extract the fields from this data... This will generate three maps for templates, with just the env and team keys. Another piece of functionality in the APIClient generator is the ability to POST JSON to the API. This will send a request body as JSON (Content-Type "application/json") to the server and interpret the result. The JSON body sent will look like this: Instead of using the JSONPath to extract from a complex structure, you can configure the result to be a single element. Whatever result is parsed from the API endpoint will be returned as a map in a single element. For generation, you might need to use the If the API endpoint you are accessing requires a custom CA you can provide this via the secret field. This secret should look like this: The request will be made with the custom CA. The cluster generator generates from in-cluster GitOpsCluster resources. For example, this The following fields are generated for each GitOpsCluster. If the selector is not provided, all clusters from all namespaces will be returned: Otherwise if the selector is empty, no clusters will be generated: The When an The generated elements have the following fields: This can be used simply, to create a deployment with an image...or, combined with a Matrix generator, to manage multiple workloads with the same image. In this example, a Combined in a Matrix, like this, it will generate two The resulting ConfigMaps look like this: With the templated fields like this: The When an This can be used simply, to create a resource with an config variable...or, combined with a Matrix generator, to manage multiple workloads with the same values. With the existing As with the other generators, the This will generate two The resulting ConfigMaps look like this: With the templated fields like this: Currently, the Sprig functions are available in the templating, with some functions removed[^sprig] for security reasons. In addition, we also provide two additional functions: The examples below assume an element that looks like this: And a template that looks like this: This would output: For template that looks like this: This would output: If the key to get does exist in the Warning Generating resources and applying them directly into your cluster can be dangerous to the health of your cluster. This is especially true for the The default It is not recommended that you create a role with blanket permissions, under the right circumstances, someone could accidentally or maliciously overwrite the cluster control-plane, which could be very dangerous. You can configure the service-account that is used to create resources. The enabled generators can be configured via the The default is to enable all generators. For example to enable only the When a GitOpsSet that uses disabled generators is created, the disabled generators will be silently ignored. GitOpsSets can be memory-hungry, for example, the Matrix generator will generate a cartesian result with multiple copies of data. The OCI and GitRepository generators will extract tarballs, the API Generator queries upstream APIs and parses the JSON, and the Config generators will load Extracting tarballs can also prove to be CPU intensive, especially where there are lots of files, and you have a very frequent regeneration period. To this end, you will need to monitor the controller metrics, and maybe increase the limits available to the controller. For example, to increase the amount of memory available to the controller: Events are enabled which will trigger Kubernetes events when successful reconciliation occurs with a To configure receiving the recorded events on a specific host, this can be provided via the See fluxcd event for the struct of the event created. [^yaml]: These are written as YAML mappings [^sprig]: The following functions are removed "env", "expandenv", "getHostByName", "genPrivateKey", "derivePassword", "sha256sum", "base", "dir", "ext", "clean", "isAbs", "osBase", "osDir", "osExt", "osClean", "osIsAbs" Important Alone, this is an insecure method of securing your dashboard. It is designed to be used with other external authentication systems like auth proxies. Set the following values in the Helm Chart: The value of the When this flag is set all other authentication methods (e.g. those specified via No login screen will be displayed when accessing the dashboard. You can bind the user provided to a ClusterRole with a ClusterRoleBinding. This would allow access to any resource. Weave GitOps lets you add annotations with custom metadata to your Flux automations and sources, and they will be displayed in the main UI. For example, you might use this to add links to dashboards, issue systems, or documentation and comments that you wish to be directly visible in the GitOps UI. We will use the Close the file and commit and push your changes. Back in your GitOps dashboard, navigate to the 'Applications' tab and select the Restrictions We are very excited for the release of the Flux v2.0 GA! This guide aims to answer some common questions before starting the upgrade, and provides step-by-step instructions. Useful terms used in this guide: Here you can find the most common questions around upgrading. Although Flux Beta APIs have been stable and used in production for quite some time, Flux GA is the main supported API version for new features and development. Features like horizontal scaling are only available in Flux GA. Also, beta APIs will be removed after six months. Yes. This has been possible since Weave Gitops v0.22.0. Use the latest available release for the best experience. Yes. This has been possible since Weave GitOps Enterprise v0.22.0. Use the latest available release for the best experience. The following limitations are knowns by version: No limitations If you are using GitOpsSets, upgrade that component to v0.10.0 for Flux GA compatibility. Update the Weave GitOps Enterprise HelmRelease values to use the new version. As of Weave GitOps v0.29, only Flux v2.0 GA is supported. Please follow the Upgrade section to help you with the process. Earlier versions of Weave GitOps work with both Flux v2 GA and Flux v2 0.x (the pre-GA ones), but it is encouraged that you upgrade to the latest version for the best experience. Hosted flux? If you are using a hosted Flux version, please check with your provider if they support Flux GA before upgrading following this guide. Known hosted Flux providers: As of writing they do not yet support the new version, so please wait before upgrading to Flux GA. Below, we'll take you through the multiple steps required to migrate to your system to Flux GA. After each step the cluster will be in a working state, so you can take your time to complete the migration. Follow the upgrade instructions from the Flux v2.0.0 release notes. At minimum, you'll need to rerun the You'll also need to bump API versions in your manifests to Bumping the APIs version in manifests can be done gradually. It is advised to not delay this procedure as the beta versions will be removed after 6 months. At this stage all clusters are running Flux GA. First, we ensure any new clusters are bootstrapped with Flux GA. Then we'll upgrade the existing clusters. At this stage, your new bootstrapped clusters will run Flux GA. Use your regular WGE upgrade procedure to bring it to the latest version At this stage you have Weave GitOps running Flux GA. Bumping the APIs version in manifests can be done gradually. We advise against delaying this procedure as the Beta versions will be removed after six months. Update Update If you haven't done it yet, plan to update your If you find any issues, please let us know via support. 👋 Come talk to us and other users in the #weave-gitops channel on Weaveworks Community Slack. Invite yourself if you haven't joined yet. The Flux project has a fantastic community to help support your GitOps journey, find more details on how to reach out via their community page Weaveworks provides Weave GitOps Enterprise, a continuous operations product that makes it easy to deploy and manage Kubernetes clusters and applications at scale in any environment. The single management console automates trusted application delivery and secure infrastructure operations on premise, in the cloud and at the edge. To discuss your support needs, please contact us at sales@weave.works. Got a suggestion for this list? Please open a pull request using the "Edit this page" link at the bottom. For full documentation visit mkdocs.org and Material theme for mkdocs. Follow the installation instructions here https://squidfunk.github.io/mkdocs-material/getting-started/ Under the folder "GitOps is the best thing since configuration as code. Git changed how we collaborate, but declarative configuration is the key to dealing with infrastructure at scale, and sets the stage for the next generation of management tools" - Kelsey Hightower, Staff Developer Advocate, Google. Weave GitOps improves developer experience—simplifying the complexities and cognitive load of deploying and managing cloud native apps on Kubernetes so that teams can go faster. It’s a powerful extension of Flux, a leading GitOps engine and Cloud Native Computing Foundation project. Weaveworks are the creators of Flux. Weave GitOps’ intuitive user interface surfaces key information to help application operators easily discover and resolve issues—simplifying and scaling adoption of GitOps and continuous delivery. The UI provides a guided experience that helps users to easily discover the relationships between Flux objects and build understanding while providing insights into application deployments. Today Weave GitOps defaults are Flux, Kustomize, Helm, SOPS, and Kubernetes Cluster API. If you use Flux already, then you can easily add Weave GitOps to create a platform management overlay. Tip Adopting GitOps can bring a number of key benefits—including faster and more frequent deployments, easy recovery from failures, and improved security and auditabiity. Check out our GitOps for Absolute Beginners eBook and Guide to GitOps for more information. This user guide provides content that will help you to install and get started with our free and paid offerings: - Weave GitOps Open Source: a simple, open source developer platform for people who don't have Kubernetes expertise but who want cloud native applications. It includes the UI and many other features that take your team beyond a simple CI/CD system. Experience how easy it is to enable GitOps and run your apps in a cluster. Go here to install. - Weave GitOps Enterprise: an enterprise version that adds automation and 100% verifiable trust to existing developer platforms, enabling faster and more frequent deployments with guardrails and golden paths for every app team. Note that Enterprise offers a more robust UI than what you'll find in our open source version. Go here to install. Tip Want to learn more about how Weave GitOps Enterprise can help your team? Get in touch with sales@weave.works to discuss your needs. Weave GitOps works on any Chromium-based browser (Chrome, Opera, Microsoft Edge), Safari, and Firefox. We only support the latest and prior two versions of these browsers. To give Weave GitOps a test drive, we recommend checking out the Open Source version and its UI, then deploying an application. Let's take a closer look at the features it offers you, all for free. Like our Enterprise version, Weave GitOps Open Source fully integrates with Flux as the GitOps engine to provide: Some of the things you can do with it: OK, time to install! Weave GitOps Enterprise provides monitoring telemetry and tooling for metrics and profiling. WGE generates Prometheus metrics for monitoring both performance and business operations. The following configuration options are available for you to configure Warning The monitoring server holds private services, so you probably won't need to expose anything beyond your cluster. If you must, ensure that it is properly secured. This setup follows the same monitoring approach as Flux and is based on Prometheus Operator. Adapt it to your context as needed. Weave GitOps Overview Monitor Weave GitOps golden signals for API server and controllers: Weave GitOps Runtime Monitor Weave GitOps Go runtime metrics like memory usage, memory heap, and Goroutines, among others. Explorer You can also monitor Explorer golden signals. During operations, profiling is useful for gaining a deeper understanding of how Weave GitOps runtime behaves. Given that Weave GitOps is written in Go, profiling happens through pprof. It is exposed as a web endpoint by pprof http. Go here for more info on using Weave GitOps is also available via the AWS Marketplace. The following steps will allow you to deploy the Weave GitOps product to an EKS cluster via a Helm Chart. These instructions presume you already have installed To deploy the managed Weave GitOps solution, first subscribe to the product on AWS Marketplace. Note: it may take ~20 minutes for your Subscription to become live and deployable. If you do not have a cluster on EKS, you can use Copy the contents of the sample file below into Create the cluster: In order to use the Weave GitOps container product, your cluster must be configured to run containers with the correct IAM Policies. The recommended way to do this is via IRSA. Use this Save the example below as First retrieve the ARN of the IAM role which you created for the This value will also be discoverable in your IAM console, and in the Outputs of the Cloud Formation template which created it. Copy the Chart URL from the Usage Instructions in AWS Marketplace, or download the file from the Deployment template to your workstation. To be able to log in to your new installation, you need to set up authentication. Create a new file Then install it: Run the following from your workstation: Your Weave GitOps installation is now ready! Now that you have a feel for how to navigate the dashboard, let's deploy a new application. In this section we will use podinfo as our sample web application. More information about If you get stuck here, try the Flux will detect the updated Click on podinfo to find details about the deployment. There should be two pods available. Info Podinfo comes with a HorizontalPodAutoscaler, which uses the To customize a deployment from a repository you don’t control, you can use Flux in-line patches. The following example shows how to use in-line patches to change the podinfo deployment. Suspending updates to a kustomization allows you to directly edit objects applied from a kustomization, without your changes being reverted by the state in Git. To suspend updates for a kustomization, from the details page, click on the suspend button at the top, and you should see it be suspended: This shows in the applications view with a yellow warning status indicating it is now suspended To resume updates, go back to the details page, click the resume button, and after a few seconds reconsolidation will continue. To delete Podinfo in the GitOps way, run this command from the root of your working directory: Congratulations 🎉🎉🎉 You've now completed the getting started guide. We welcome any and all feedback, so please let us know how we could have made your experience better. Tip These instructions only apply to Weave GitOps Open Source. To install Weave GitOps Enterprise, go here. This page covers Weave GitOps Open Source installation and is adapted from the Flux - Getting Started guide. If you haven't already, please check out our Introduction to Weave GitOps page for additional information about Weave GitOps Open Source as well as our Enterprise version. Before you can install Weave GitOps Open Source, you will need: We also recommend taking a look at the Flux Core Concepts page if you need to brush up on terminology. No matter which version of Weave GitOps you install, you need to have a Kubernetes cluster up and running. We test Weave GitOps against the latest supported Kubernetes releases. Note that the version of Flux that you use might impose further minimum version requirements. Weave GitOps is an extension to Flux. Therefore, it requires that Flux 0.32 or a later version has already been installed on your Kubernetes cluster. Full documentation is available here. In this section we are going to do the following: Let's get into it... To upgrade to the latest version, run this command: We recommend upgrading the CLI before running bootstrap to upgrade the controllers with Find which version is installed with With Bash, you can run Tip If you want to install an older version of Flux CLI, you can download the binary for your OS from the releases page. For other installation methods, see the relevant Flux documentation. Ensure your PAT has The output is similar to: The command below assumes the Git provider is Full installation documentation, including how to work with other Git providers, is available here. Weave GitOps includes a command-line interface to help users create and manage resources. The There are multiple ways to install the In this section we will: If you have difficulty saving the YAML to the correct path, run the command Run the following command, which will create a Warning This command stores a hash of a password. This is relatively safe for demo and testing purposes, but we strongly recommend using a more secure method of storing secrets (such as Flux's SOPS integration) for production systems. Our docs on securing access to the dashboard provide additional guidance and alternative login methods. You will use the password you've just created when you've finished Weave GitOps Open Source installation and are ready to login to the dashboard UI. Tip If you need to customize the Weave GitOps Helm release, you can use the Note: this wont be instantaneous. Give the Flux controllers a couple of minutes to pull the latest commit. You should see something similar to: If you wait for a while and still nothing happens, it might be that your manifests haven’t been exported to the repository. This means that Weave GitOps won't install. Tip You can use the Weave GitOps Helm Chart to customize your installation. Find the full Chart reference here. Now let's explore the Weave GitOps Open Source UI. Then, we'll deploy an application. By default, the UI is served on the root path To run the UI on a subpath, you need to set the To set the flag we use the The Weave GitOps Helm chart can generate an See the Helm chart reference for a list of all supported ingress options. The Weave GitOps user interface enables you to manage and view all of your applications in one place. This documentation gives you an overview of the Weave GitOps Open Source UI. Tip To check out Weave GitOps Enterprise's UI, which provides an even richer user experience, please contact sales@weave.works. A quick preview of what the Weave GitOps Open Source UI provides: * an Applications view that shows summary information from It also enables you to: * sync your latest Git commits directly from the UI * leverage Kubernetes RBAC to control permissions in the dashboard Let's dive in. First, expose the service running on the cluster with this command: Next, open the dashboard and login using either the emergency cluster user or OIDC, based on your configuration. (Note: The same directions for WGE apply to OSS for this step.) If you followed the example above, the emergency user will be configured with the username set to The label of the OIDC button on the login screen is configurable via a feature flag environment variable. This can give your users a more familiar experience when logging in. Adjust the configuration in the Helm Upon login you're taken to the Applications view, which allows you to quickly understand the state of your deployments and shows summary information from In the above screenshot you can see: - two The table view shows you the reported status so you can understand whether a reconciliation has been successful, and when it was last updated. You can also see where the Flux objects are deployed, which Tip For more information about Sources, please take a look at the Flux documentation. For information on Source verification, you can check: - Flux documentation - GitRepository verification - Flux documentation - OCIRepository verification If verification is not set up for the repository, this will appear blank in the UI. More actions you can take: * Click the magnifying glass icon to search for and filter objects by Let's explore the It might take a few moments for the data to load. Once it does, you should get a result that resembles the above screenshot. Here you can find key information about how the resource is defined: * which Underneath the summary information you'll find: In the left-hand menu of the UI, click on the Sources view. This will show you where Flux pulls its application definitions from—for example, Git repositories—and the current state of that synchronization. Sources shows summary information from In the above screenshot you can see: - a These have both had verification set up on them which has been completed successfully. The Sources table view displays information about status so that you can see whether Flux has been able to successfully pull from a given source, and which specific commit was last detected. It shows you key information like the Actions you can take: * Apply filtering as you did the Applications view. * Click a Go back to the Details tab, and click As with an Application detail view, you can see key information about how the resource is defined. Maybe you're an app developer who wants to deploy the latest image in a dev/staging environment with as minimal fuss as possible and reduce GitOps friction. Or you might be a platform engineer who wants to keep your platform up-to-date with the latest approved versions—for example, patch releases to reduce exposure to CVEs—or auto-deploy when approval is gated before adding an image to an internal registry. The Image Automation view can help. WeGO's Image Automation view allows users to configure automatic updates to their workloads based on the detection of a new image tag in a repository. For application developers, this means faster deployments and shorter feedback cycles to easily verify changes to an application in a Kubernetes environment. The view still supports GitOps workflows as the changes are committed back to Git—either to the branch already reconciled by Flux, or to an alternative branch so that a Pull Request can be generated and peer review can occur. Image Automation refers to Flux's ability to update the image tag specified in a manifest based on detection of a newer image and automatically deploy to a cluster. It involves three required objects—ImageRepositories, ImagePolicies, and ImageUpdateAutomations—which WeGO OSS users can discover on their clusters. Users can also view object details either through a YAML-like view, as we do for most non-Flux objects, or a details view. The UI makes it possible to suspend or resume ImageRepositories and ImageUpdateAutomations so that Flux stops looking for new updates or committing these to Git. Also, the UI shows whether all required resources are configured and assists with Image Policy to show the latest image. ImageRepositories, ImagePolicies, and ImageUpdateAutomations are used by Flux's Image Automation Controllers. The Image Reflector controller and the Image Automation controller work together to update a Git repository when new container images are available. In WeGO OSS, if the image-reflector-controller and or image-automation-controller are not installed on a cluster, a warning message will display. If you make a mistake configuring one of the resources, you can use WeGO to easily trace from the Image Repository scan, see whether it is able to select the image based on the Image Policy, and detect whether an Image Update has successfully run. This provides greater visibility into the machinery provided by Flux and enables quicker troubleshooting than what's possible by hunting via the Flux CLI. App devs can triage issues without depending on their platform teams. Let's go back to the left-hand menu of the UI and click on The Controllers tab shows your installed GitOps Toolkit Controllers and their version. By default, From this view you can see whether the controllers are healthy and which version of a given component is currently deployed. The CRD tab lists the custom resources that the GitOps Toolkit Controllers use. This allows you to see which resources you will be able to create. Now that we are familiar with the dashboard, let's deploy a new application . Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. To view pipelines, users need read access to the Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. Weave GitOps Enterprise Pipelines enables teams to increase the velocity, stability, and security of software systems via automated deployment pipelines. It provides insights into new application versions that are being rolled out across clusters and environments, which allows you to implement security guardrails and track metrics to assess if the application is working as desired. In instances of failures, the change is abandoned with an automatic rollout of the older version. With Pipelines, you define a release pipeline for a given application as a custom resource. The pipeline can comprise any number of environments through which an application is expected to be deployed. Push a change to your application in your dev environment, for example, and watch the update roll out across staging and production environments all from a single PR (or an external process like Jenkins)—with Weave GitOps Enterprise orchestrating everything. Designed with flexibility in mind, Pipelines can be easily integrated within your existing CI setup—for example, CircleCI, Jenkins, Tekton, or GitHub Actions. The Pipelines feature: - reduces toil and errors when setting up a new pipeline or reproducing previous pipelines through YAML constructs - saves time and overhead with automated code rollout from one environment to another, with minimal intervention from the Ops team - enables users to observe code progression and track application versions through different environments from the Weave GitOps UI - streamlines code deployment from one environment to another, and minimizes friction between application development and Ops teams - enables you to easily define which Helm charts are part of the environments you create—saving lots of time through automated package management Now that you know what delivery pipelines can do for you, follow the guide to get started. Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. Before using Pipelines, please ensure that: - You have Weave GitOps Enterprise installed on a cluster. - You have configured Weave GitOps Enterprise RBAC for Pipelines. - The Pipelines feature flag A pipeline allows you to define the route your application is taking, so that you can get it to production. Three main concepts are at play: - the You can define a delivery pipeline using a In the example above, the For more details about the spec of a pipeline, go here. Once Flux has reconciled your pipeline, you can navigate to the Pipelines view in the WGE UI to see the list of pipelines to which you have access. For each pipeline, the WGE UI shows a simplified view with the application Once you have selected a pipeline from the list, navigate to its details view where you can see the current status of your application by environment and deployment target. To create new Pipelines and their required resources from within Weave GitOps Enterprise, you can leverage GitOpsTemplates, which help platform teams scale for developer self-service. This document provides example configuration that you can adapt and use within your own organization, based on your tenancy model. We will cover the creation of: Secrets, required for authentication and authorization between leaf and management clusters as well as to Git, are out of scope for this document and must be handled by your chosen secret management solution. For advice on Secrets Management, refer to the Flux guide. Templates can include a single resource or multiple resources, depending on your use case. For example, you may want to only create the Pipeline custom resource to associate existing HelmReleases. Or, you can create the HelmReleases, notification controller resources, and Pipeline all in a single template. They are highly customizable to meet the needs of your teams. GitOpsTemplates are custom resources installed onto a management cluster where Weave GitOps Enterprise resides. To add a new Pipeline, click The This section provides examples to help you build your own templates for Pipelines. Included Sample This default template is shipped with Weave GitOps Enterprise to help you get started with Pipelines. For flexibility, this allows the template user to specify the names of the clusters where the application is deployed, and to vary the namespace per cluster. This works even in a tenancy model where environments coexist on the same cluster and use namespaces for isolation. This example extends the above to add a promotion strategy. In this case, it will raise a pull request to update the application version in subsequent environments. For guidance on configuring credentials, find instructions in the Promoting Applications documentation. You must add a comment to the HelmRelease or Kustomization patch where the Then the marker would be: Find more guidance on adding markers here. This example shows you how you can configure multiple resources in a single template and simplify creation through common naming strategies. The notification controller communicates update events from the leaf clusters where applications are deployed to the management cluster, where the Pipeline Controller resides and orchestrates. For the For the s GitOpsTemplates provide a highly flexible way for platform and application teams to work together with Pipelines. You can hard-code values, offer a range of accepted values, or allow the template consumer to provide input based on your organization's requirements. Templates are subject to RBAC as with any Kubernetes resource, enabling you to easily control which tenants have access to which templates. For full details on GitOpsTemplates, read our documentation. Using Flux's Notification Controller, a Jenkins Webhook can be invoked on Pipeline promotion events. To enable external callers to trigger a build on a job, an additional "Generic Webhook Trigger" plugin is required as Jenkins does not have this functionality built-in. After the plugin is installed a new "Generic Webhook Trigger" job configuration option is available. The only mandatory field is the "Token". Without this token, Jenkins will not know which build should be triggered. To access fields from the pipeline event payload, each field has to be defined as a "Post content parameters". In order to be able to invoke a generic webhook, a notification provider has to be defined. Jenkins expects the secret token which you configured above as a GET parameter or in the request header. The secret token can be stored in a Secret: Now we can define a Notification Provider using this secret: We can configure an Alert to use the Using Flux's Notification Controller, a Tekton EventListener can be triggered on Pipeline promotion events. In this tutorial, we have two tasks to demonstrate how to use parameter values from the Pipeline event payload. Both tasks print out messages with information about the pipeline promotion. Each task has three parameters: The In order to be able to trigger a Pipeline from an external source, we need three Tekton resources. A JSON payload from the Notification Service about a Pipeline promotion looks like this: In our tasks, we are using only the The template has the same parameters as the To access all required resources, we need an extra service account: With this At this point, we should have a In this case, we are using Tekton in the same cluster, so we can use an internal address to access the We can configure an Alert to use the Warning This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments. Pipelines allow you to configure automatic promotions of applications through a consecutive set of environments, e.g. from dev to staging to production. The environments are defined in the Info At the moment only applications defined as Flux The Getting Started Guide describes how to create a basic pipeline for an application so you can visualize its deployments across a series of environments. You may also configure a pipeline in order to promote applications across a series of environments. There are currently two supported strategies for application promotions: Before configuring any of the above promotion strategies, you need to setup notifications from all your environments so that whenever a new version gets deployed, the promotion webhook component of the pipeline controller is notified and takes an action based on the pipeline definition. The rest of this guide describes the configuration needed to setup application promotion via pipelines. Applications deployed in leaf clusters use the Flux notification controller running on each leaf cluster, to notify the management cluster of a successful promotion. This requires network connectivity to be established between the leaf cluster and the management cluster. The component responsible for listening to incoming notifications from leaf clusters is the pipeline controller. It hosts a webhook service that needs to be exposed via an ingress resource to make it available for external calls. Exposing the webhook service is done via the Weave GitOps Enterprise Helm chart values and the configuration used depends on your environment. The example below shows the configuration for NGINX ingress controller and needs to be adjusted if another ingress controller is used: You will need the externally reachable URL of this service later on in this guide. Once the webhook service is exposed over HTTP/S, you need to create alert/provider resources to send notifications to it from leaf clusters. These notifications represent successful promotions for applications running on the leaf clusters. Successful promotion events are triggered by Flux's notification controller. You create a Provider pointing to the promotion webhook exposed earlier and an Alert targeting the app's HelmRelease: In the example above, the Note that by default, the promotion webhook endpoint is exposed at Tip You may also use the generic webhook provider type that supports HMAC verification to ensure incoming notifications originate from authenticated sources. The Weave GitOps Enterprise can then parse the incoming URL path to identify the pipeline resource and look up the next environment for the defined promotion action. An example Alert might look like this: Tip Be sure to create the Provider/Alert tuple on each of the leaf clusters targeted by a pipeline. Now as soon as the Danger Creating pull requests requires a personal access token with write access to your git repo. If the secret containing the token is compromised (and you could assume it as a likely scenario), it could in principle allow someone to delete your production applications. Please make sure you understand the Security section below before taking the steps to enable automated pull requests. This section covers adding a promotion by pull request (PR) strategy, so that whenever the application defined in a pipeline is upgraded in one of the pipeline's environments, a PR is created that updates the manifest file setting the application version in the next environment. The dynamic nature of GitOps deployments requires you to assist Weave GitOps a little with information on which repository hosts the manifest files, how to authenticate with the repository and the Git provider API, and which file hosts the version definition for each environment. Only allow creation of RBAC resources from paths where compliance controls are in place. For example, do not allow regular users to create or update RBAC resources; or, if users must create RBAC resources, restrict them by namespace. Follow the principle of "Least Privilege" RBAC as explained in Kubernetes RBAC Good Practices, with emphasis on the following: Assign permissions at the namespace level where possible. Use RoleBindings as opposed to ClusterRoleBindings to give users rights only within a specific namespace. Avoid providing wildcard permissions when possible, especially to all resources. As Kubernetes is an extensible system, providing wildcard access gives rights not just to all object types that currently exist in the cluster, but also to all object types which are created in the future. It is also important to note that list and watch access also effectively allow users to read Secret contents. By following the guidelines above, you can have a safe initial configuration. However, given there are no deny semantics in RBAC, you need to guard future changes. An RBAC Role or ClusterRole contains rules that represent a set of permissions. Permissions are purely additive (there are no "deny" rules). You should ensure that attempts to break this contract are blocked and detected. You could achieve it by using Weave GitOps' Policy capabilities. The Policy Agent acts in two complementary modes: - Admission Controller protects from any attempt to create non-compliant RBAC resources that would end granting access to the secret. - Audit helps you identify already existing resources that are out of compliance. For example, roles created before policy agent was introduced as admission controller. Once you have enabled Policy, the Policy Library gives you a set of good practices policies that will help you keep pipeline secrets secure according to the previous RBAC recommendations. Deploy them as Kustomization based on the following example: Tip In case you don't have access to the Policy Library, work with your Weaveworks Technical Account Manager (TAM) or Weaveworks Customer Reliability Engineer (CRE) to help with this step. Policies typically allow exclusions, to accommodate privileged workloads like Flux. You can manage them via PolicyConfig. For example, in order to allow Flux you could use the following Remind not allowing users to create RBAC resources without compliance checks. Otherwise, they could create RBAC resources that could escape this runtime control. In addition to guarding against privilege escalation via RBAC, you should guard against privilege escalation through workloads: Permission to create workloads (either Pods, or workload resources that manage Pods) in a namespace implicitly grants access to many other resources in that namespace, such as Secrets, ConfigMaps, and PersistentVolumes that can be mounted in Pods You could do that by creating pipeline namespaces to hold the Pipeline and its Secret, without permission to run workloads. You could enforce the latter one by using the Policy Containers Should Not Run In Namespace from the Policy Library and PolicyConfig as follows: Tip Update updates when onboarding a new pipeline. Consider using Weave Gitops self-service capabilities GitOps Templates or GitOpsSets to help you with the task. To enable the Pipeline Controller to read the secret, we need to grant access via RBAC. The promotion credentials secret needs to be in the same namespace as the Use pipeline promotions security to verify that your environments meets the security context described earlier. Once deployed you could see how the different resources are being rejected. See those rejections in the Violations UI: In addition, verify that the Pipeline controller can only get the secret by the following tests: List access is denied: Get access is allowed: For example, if the case of GitHub, use fine-grained tokens to only allow access to the single repo that your configuration manifests exist. For example, using github and fine-grained tokens you could do so. The discovery of the version field is done using deterministic markers in a YAML manifest file. An example The value of the Weave GitOps Enterprise will look for this marker whenever it receives an event from the respective HelmRelease of one of the leaf clusters and patch the file with the version denoted in the event (see the section above for instructions on setting up notification events from leaf clusters). Finally, it will create a Git provider PR to update the version of the application for the next environment in the pipeline. The following Git providers are currently support by this promotion strategy: Select your Git provider via More info in the spec. In the journey of creating a pull request, there are different secrets involved: Create a Kubernetes secret with the previous data. Tip The field More info in the spec This section explains how to configure pipelines to work with external CI systems that are responsible for application promotions. This strategy uses the notification controller running on the management cluster, to forward any notifications received by the promotion webhook, from leaf clusters to external CI systems. This requires to patch the Flux manifests of the management cluster, in order to allow objects of type You can now create Provider/Alert resources on the management cluster to forward notifications to external systems. For example, the Provider resource shown below is used to invoke a GitHub Actions workflow on a repository: To use this Provider, add an Alert that uses the pipeline resource defined on the management cluster as an event source. An example of such an Alert is shown below: The notification controller running on the management cluster is now configured to forward any promotion notifications received from leaf clusters. To actually use this strategy from a pipeline, set the promotion field as shown below: Promotion notifications from leaf clusters should now be forwarded via the notification controller running on the management cluster and should include information about the version of the application being promoted. The supported strategies mentioned above, do not require any user interaction when handling promotions. However, there is often a need for a human operator to manually approve a promotion to the next environment. To achieve that, set the When this key is set and a promotion is detected, Weave GitOps will prompt the user to manually promote the application to the next environment, via the use of a button shown under the next environment. By default if a promotion fails, an exponential back-off retry happens and returns with an error only after three retries. Through Helm values, the retry logic is configurable. The promotion happens through an HTTP endpoint call, that endpoint may has connection timeout limits, that's why the The promotion endpoint can be exposed to the internet (for example github actions), to mitigate DoS attacks, the endpoint has rate limits. By default it's 20 requests per 30 seconds. Rate limiting can be configured through Helm values: import TierLabel from "../../../_components/TierLabel"; The Pipeline API defines a resource for continuous delivery pipelines. An example of a fully defined pipeline that creates pull requests for application promotions is shown below. The documentation for version This section provides a recommended way to configure RBAC in the context of policies. It is oriented to the journey that you expect your users to have. The policy journey in the UI involves several resources. We have the Policies that are used by the agent, the resulting Violations when the agent enforces those policies, and the PolicyConfigs that the user can configure to override policy parameters. The violations are essentially kubernetes events that contain the Validation object. In order to view those resources, users would need to have read access to the An example of a configuration to achieve this purpose could be seen below with Weave GitOps Enterprise enables developers and operators to check policy violations early in their software development life cycle, specifically at commit and build time. Developers and operators can have Weave Policy Validator integrated in their CI tools to validate whether their code changes are violating any policies or not. Weave GitOps Enterprise offer a policy engine image that can be used to perform commit/build time checks.The image can be found on Docker Hub under the name: Policies can be a helm chart, kustomize directory or just plain kubernetes yaml files. Example of policies kustomize directory Weave validator supports auto-remediation functionality which creates a pull request with suggested fixes to remediate the reported violations. Supported in: To enable it you need to provide The token must have the permission to create a pull request. See how to setup the Github Action Enabling the Weave Policy Engine features in Weave GitOps is done by running the policy agent on the cluster. This section gives an overview of the policy ecosystem and the steps required for installing and running the policy agent on leaf clusters. The policy ecosystem consists of several moving parts. The two primary components are the Policy Agent and the Policy CRs. The agent runs in several modes, and uses the Policy CRs to perform validations on different resources. The results of those validations can be written to different sinks. There are two other optional components: the PolicySet, and the PolicyConfig. The PolicySet can be used to filter policies for a specific mode, while the PolicyConfig can be used to override policy parameters during the validation of a certain resource. You need to have a running instance of Weave GitOps with at least one CAPI provider installed to provision Kubernetes clusters. See Weave GitOps Installation page for more details about installing Weave GitOps. For the policy agent to work, it will need a source for the policies that it will enforce in the cluster. Enterprise customers should request access to fork our policy library into their local repositories. Our policy library includes an extensive list of policy CRs that cover a multitude of security and compliance benchmarks. To install the policy agent on a leaf cluster, you should select the You should then configure the You can find more about other policy profile configurations here. After the leaf cluster is provisioned and the profile is installed, you should now see the policies listed in the Policies tab in Weave GitOps UI. Now you have a provisioned cluster with these policies enforced by the policy agent. By default, the policy profile is set up to enforce policies at deployment time using admission controller, which results in blocking any deployment that violates the enforced policies. Now let's try to deploy a Kubernetes deployment that violates the Once you apply it, the policy agent will deny this request and show a violation message, and accordingly the deployment will not be created. You can go to the This view shows only the violations resulting from the admission mode by configuring the events sink. Violations Log Violations Log Details Weave Policy Engine helps users have continuous security and compliance checks across their software delivery pipeline. The engine utilizes policy-as-code to guarantee security, resilience, and coding standards across applications and infrastructure. The engine comes with 100+ policies covering numerous security and compliance benchmarks like SOC2, GDPR, PCI-DSS, HIPAA, Mitre Attack and more. The policy engine provides the following functionality: An out-of-the-box admission controller that monitors any changes happening to the clusters' deployments and resources, and prevents violating changes at deployment time from being deployed to clusters. Daily scans of your clusters' deployments and resources, then report back any policy violations. The audit results can be published to different data analytics tools to provide compliance posture analysis of your clusters runtime. Early feedback on policy violations at the commit or build time, by reporting policy violations right inside git or other CI tools. This helps developers and operators detect policy violations and fix them before they deploy their changes to the clusters. Users sometimes need to enforce the same policy(s) with different configurations (parameters) for different targets (workspaces, namespaces, applications, or resources). The The PolicyConfig CRD consists of two sections 1) Each PolicyConfig CR can target either workspaces, namespaces, applications or resources. Targeting the same target explicitly in multiple PolicyConfigs is not allowed, ie: you can't use the same namespace in several PolicyConfigs which target namespaces. To target workspaces: To target namespaces: To target applications: To target resources: Each PolicyConfig can override the parameters of one or more policies: While it's not possible to create PolicyConfigs that explicitly target the same targets, it can happen implicitly ex: by targeting a namespace in a PolicyConfig and targeting an application that exists in this namespace in another. Whenever targets overlap, the narrower the scope of the PolicyConfig, the more precedence it has. Accordingly in the previous example, the configuration of the PolicyConfig targeting the application will have precedence over the PolicyConfig targeting the namespace. Those are the possible targets from lowest to highest precedence: Note: We have a Kustomization application In the above example when you apply the 5 configurations... Note Deploying Final config values will be as follows: In the above example when you apply Final config values will be as follows: In the previous example when you apply Note Deploying Final config values will be the follows: In the above example when you apply Final config values will be as follows: In the above example when you apply Final config values will be as follows: This is an optional custom resource that is used to select a group of policies to work in specific modes. In each mode, the agent will list all the PolicySets of this mode and check which policies match any of those policysets, then validate the resources against them. If there are no PolicySets found for a certain mode, all policies will be applied during this mode. Note: Tenant Policies is always active in the Admission mode, event if it is not selected in the Example PolicySets can be created for any of the three modes supported by the agent: Policies can be grouped by their ids, categories, severities, standards and tags The policy will be applied if any of the filters are matched. Previously the agent was configured with which policysets to use in each mode. Now we removed this argument from the agent's configuration and add the mode to the Policyset itself. The Policy CRD is used to define policies which are then consumed and used by the agent to validate entities. It uses OPA Rego Language to evaluate the entities. You should have a policy library repo set up which includes your policies resources as CRDs. Info Enterprise customers should have access to fork policy library repo into their local repositories. Tenant policies are special policies that are used by the Multi Tenancy feature in Weave GitOps Enterprise Tenant policies have a special tag Starting from version To enable mutating resources, policies must have field Example The policy validation object is the result of validating an entity against a policy. It contains all the necessary information to give the user a clear idea on what caused this violation or compliance. Compatible with Policy Library versions: Needs this migration steps to be compatible with the following versions: Compatible with Policy Library versions: Needs this migration steps to be compatible with the following versions: While both v.0.4.0 and v1.0.0 are compatible with the agent. Only v1.1.0 includes the modification needed to make Controller Minimum Replica Count policy with with Weave policy profile provides policies to automate the enforcement of best practices and conventions. It ensures the compliance of workloads through the use of a policy agent that provides an admission controller webhook that stops violating resources from deploying to a cluster and runs a daily audit that reports violating resources already deployed. The profile configuration contains two main sections Policies are provided in the profile as Custom Resources. The agent reads from the policies deployed on the cluster and runs them during each admission request or when auditing a resource. Policies are hosted in a policy library which is usually a git repository. They are fetched in the profile through the use of By default all policies in the specified path would be deployed in order to specify which policies should be deployed in a library, a The profile then needs to be configured with the necessary config to be able to reach the repository that is acting as a policy library. There is the option of referencing an existing policy library source instead of creating a new one. The The agent needs the following parameters to be provided in the configuration yaml file: The following optional parameters can also be provided: This contains the admission module that enforces policies. It uses the Works with policies of provider To enable admission control: Enabling admission controller requires certificates for secure communication with the webhook client and the admission server. The best way to achieve this is by installing cert manager and then configuring the profile as follows: The cert manager can also be installed by installing the cert manager profile while creating the cluster. There is the option of providing previously generated certificates although it is not recommended and it is up to the user to manage it: If the agent webhook could not be reached or the request failed to complete, the corresponding request would be refused. To change that behavior and accepts the request in cases of failure, this needs to be set: The audit functionality provides a full scan of the cluster(s) and reports back policy violations. This usually is used for policy violations reporting, and compliance posture analysis against known benchmarks like PCI DSS, CIS, .etc. Works with policies of provider To enable the audit functionality: The audit will be performed when the agent starts and then again periodically at an interval of your choice in hours (default is 24 hours). The results of the audit will be published to the configured sink(s). This is a webhook used to validate terraform plans. It is mainly used by the TF-Controller to enforce policies on terraform plans Works with policies of provider To enable the terraform admission control: When validating a resource, a validation object is generated that contains information about the status of that validation and metadata about the resource and policy involved. These objects can be exported to be visible for users as a critical part of the audit flow, but can also be useful as logs for the admission scenario. By default, the agent only writes policy validations that are violating a certain policy when performing an audit. To write compliance results as well, the following needs to be specified in the profile: The agent profile supports storing the validations in different sinks. Multiple sinks can be used at the same time: The results will be dumped into a text file in the To enable writing to a text file in audit scenario: To enable writing to a text file in admission scenario: It is possible to make the file persistent using the following configuration. This assumes that there is a PersistentVolume already configured on the cluster. The results will be written as Kubernetes events. This means that they are accessible through the kubernetes API and can be consumed by custom exporters. To enable writing Kubernetes events in audit scenario: To enable writing Kubernetes events in admission scenario: This requires the cluster to be managed using flux. It makes use of the flux notification controller to send events to multiple sources, depending on the controller configuration. The agent writes the events to the controller and it proceeds to publish it to the configured listeners. To enable writing to flux notification controller in audit scenario: To enable writing to flux notification controller in admission scenario: The results of validating entities against policies will be written to an Elasticsearch index. To enable writing to elasticsearch in audit scenario: To enable writing to elasticsearch in admission scenario: We support the following insertion modes: To help you understand the state of progressive delivery updates to your applications, Weave GitOps Enterprise uses Flagger—part of the Flux family of open source projects. WGE's Delivery view shows all of your deployed By default, Flagger automatically promotes a new version of an application whenever it passes the defined checks of an analysis phase. However, you can also configure webhooks to enable manual approvals of rollout stages. This guide shows you how to manually gate a progressive delivery promotion with Flagger by using the in-built load tester. You can configure Flagger to work with several types of hooks that will be called at given stages during a progressive delivery rollout. Some of these hooks allow you to manually gate whether a rollout proceeds at certain points: - Before scaling up a new deployment and canary analysis begins with Any URL can serve as a webhook target. It will approve if a The webhook will receive a JSON payload that can be unmarshaled as The Flagger documentation provides more information about webhooks. To enable manual approval of a promotion, configure the Tip We strongly recommend that you DO NOT USE the load tester for manual gating in a production environment. It lacks auth, so anyone with cluster access could open and close it. It also lacks storage, so all gates would close upon a restart. Instead, configure these webhooks for appropriate integration with a tool of your choice, such Jira, Slack, Jenkins, etc. In your canary object, add the following in the This gate is closed by default. Trigger a Canary rollout by updating your target deployment/daemonset—for example, by bumping the container image tag. A full list of ways to trigger a rollout is available here. Weave GitOps Enterprise (WGE)'s Applications > Delivery view enables you to watch the progression of a canary: Once the canary analysis has successfully completed, Flagger will call the To open the gate and confirm that you approve promotion of the new version of your application, exec into the load tester container: Flagger will now promote the canary version to the primary and complete the progressive delivery rollout. To manually close the gate again, issue this command: References: Built upon the core tenets of continuous integration and continuous delivery (CI/CD), progressive delivery involves gradually rolling out features to small groups of select users to balance performance with speed. Developers and DevOps teams use fine-grained controls to minimize the risks of pushing new features to the production environment. If the newly released feature proves to be stable and performant, it can then be released to all users. Flagger is a progressive delivery operator for Kubernetes and part of the Flux family of open source projects. It reduces the risk of introducing new software versions and automates production releases to improve your time to delivery. Flagger implements deployment strategies—canary releases, A/B testing, Blue/Green mirroring—using a service mesh (App Mesh, Istio, Linkerd, Kuma, Open Service Mesh) or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik, APISIX) for traffic routing. For release analysis, Flagger can query Prometheus, InfluxDB, Datadog, New Relic, CloudWatch, Stackdriver, or Graphite. For alerting it uses Slack, MS Teams, Discord, and Rocket. Using Flux allows us to manage our cluster applications in a declarative way through changes in a Git repository. Weave GitOps Enterprise integrates with Flagger in order to provide a view on progressive delivery deployments. This includes the ability to view all the resources that Flagger manages during its operation. The default ClusterRole The WGE UI's Applications > Delivery view provides an "at a glance" view so that you can understand the status of your progressive delivery rollouts across a fleet of connected clusters. This removes the cognitive overhead of having to know which objects to query and where they are located. You can also drill down into each rollout to understand its status and configuration, and view near-to-realtime data on any summary or details page. How to use WGE's progressive delivery offering: - if you don’t have Flagger installed on any clusters, you'll receive an onboarding message about installing it - click on the delivery tab on the menu bar to retrieve a table view of canaries with key summary information regarding their location and state - click on a canary to see more detailed information about status, gates, and other elements - click on the events tab on the detail page to see the most recent Kubernetes events for that canary and learn more about deployment history - click on the yaml tab on the detail page to see the raw yaml of the canary - view objects from any cluster/namespace that you have the appropriate permissions for, and nothing else Supported deployment strategies include: Canary Release: the system gradually shifts traffic to a new version of an application and assesses performance—either promoting the release or abandoning it, based on performance. A/B Testing: uses HTTP headers or cookies to ensure users remain on the same version of an application during a canary analysis. Blue/Green: Traffic is switched from the current application to a new version based on the success of testing. Blue/Green with Traffic Mirroring: sends copies of incoming requests to the new version of an application. The user receives the response from the current application and the other is discarded. The new version is promoted only if metrics are healthy. This guide uses Flux manifests to install Flagger and Linkerd, a CNCF project and service mesh for Kubernetes and beyond. We will walk you through a full end-to-end scenario where you will: - Install the Linkerd service mesh - Install Flagger - Deploy a sample application using a canary release strategy based on metrics provided through Linkerd's in-built Prometheus instance To install Linkerd we'll use a Kustomization file. It will allow us to specify the order and default namespace for the installed resources, and to generate Secrets from certificate files via the use of a To support mTLS connections between meshed pods, Linkerd requires a trust anchor certificate and an issuer certificate with its corresponding key. These certificates are automatically created via the To generate the trust anchor certificate, run: To generate the issuer certificate, run: Add the Let's add the three manifests for Linkerd components under the Note: The value for the Next, add the following manifests. The first file instructs Kustomize to patch any Note: The At this point the Once Flux reconciles this directory to the cluster, Linkerd should be installed. Before proceeding to the next step, check that all the Linkerd pods have started successfully: Note Any new directories that you add to the cluster repository while following this guide must be included in a path that Flux reconciles. To install Flagger, you'll use a Kustomization file that will define the installation order and provide a default namespace for the installed resources. Create a new Now add under this directory the three resource manifests for Flagger: - A Now add the following Kustomization file. It references all of the previous files that you've added: The Once Flux reconciles this directory to the cluster, Flagger and the load tester app should be installed. Before proceeding to the next step, check that all of your Flagger pods have started successfully: When Flagger is configured to integrate with a service mesh such as Linkerd or Istio for the rollout, this ClusterRole needs to be extended so that it can read the additional service mesh resources that Flagger generates. To display service mesh- or ingress-related resources, we require The following table provides a list of all the custom resources that Flagger generates grouped by provider: For example, the following manifest shows how In order to view canaries in a remote cluster from the management cluster, you need to consider the following: - The service account used to access the remote cluster needs to be able to list namespaces and custom resource definitions in the given cluster. It additionally needs to be able to impersonate users and groups. - The user or group that logs in to the management cluster, needs appropriate permissions to certain resources of the remote cluster. For example, applying the following manifest on remote clusters, ensures that the You may need to add more users/groups to the To demonstrate the progressive rollout of an application, we'll use a tiny sample web app called podinfo and configure a canary release strategy. In our example, Flagger will scale up a new version of podinfo (the canary) alongside the existing version (the primary). It will gradually increase traffic to the new version in increments of 5%, up to a maximum of 50%. Flagger will continuously monitor the new version for an acceptable request response rate and average request duration. Based on this analysis, Flagger will either update the primary to the new version or abandon the promotion, then scale the canary back down to zero. Create a new We don't need to define a service resource. This is specified within the canary definition and created by Flagger. Add a Kustomization file to apply all resources to the At this point, the After a short time, the status of the canary object should be set to Trigger a new rollout by bumping the version of During the progressive rollout, the canary object reports on its current status: After a short time the rollout is completed and the status of the canary object is set to Congratulations, you have now completed a progressive delivery rollout with Flagger and Linkerd! Next steps: - Explore more of what Flagger offers - Configure manual approvals for progressive delivery deployments Weave GitOps Command line utility for managing Kubernetes applications via GitOps. Validates flux compatibility Generate the autocompletion script for the specified shell Generate the autocompletion script for gitops for the specified shell. See each sub-command's help for details on how to use the generated script. Generate the autocompletion script for bash Generate the autocompletion script for the bash shell. This script depends on the 'bash-completion' package. If it is not installed already, you can install it via your OS's package manager. To load completions in your current shell session: To load completions for every new session, execute once: You will need to start a new shell for this setup to take effect. Generate the autocompletion script for fish Generate the autocompletion script for the fish shell. To load completions in your current shell session: To load completions for every new session, execute once: You will need to start a new shell for this setup to take effect. Generate the autocompletion script for powershell Generate the autocompletion script for powershell. To load completions in your current shell session: To load completions for every new session, add the output of the above command to your powershell profile. Generate the autocompletion script for zsh Generate the autocompletion script for the zsh shell. If shell completion is not already enabled in your environment you will need to enable it. You can execute the following once: To load completions in your current shell session: To load completions for every new session, execute once: You will need to start a new shell for this setup to take effect. Creates a resource Create a HelmRepository and HelmRelease to deploy Weave GitOps Create a HelmRepository and HelmRelease to deploy Weave GitOps Create a Terraform object Create a Terraform object Delete a resource Delete a Terraform object Display one or many Weave GitOps resources Generates a hashed secret Prints out the CLI configuration for Weave GitOps Get logs for a resource Get the runner logs of a Terraform object Replan a resource Trigger replan for a Terraform object Resume a resource Resume a Terraform object Sets one or many Weave GitOps CLI configs or resources Set the CLI configuration for Weave GitOps Suspend a resource Suspend a Terraform object Display gitops version This is a reference of all the configurable values in Weave GitOps's Helm chart. This is intended for customizing your installation after you've gone through the getting started guide. This reference was generated for the chart version 4.0.34 which installs weave gitops v0.36.0. For full documentation visit mkdocs.org and Material theme for mkdocs. Follow the installation instructions here https://squidfunk.github.io/mkdocs-material/getting-started/ Under the folder Are you running Backstage and Flux? Do you want to expose the state of your Flux resources in your Backstage portal? The We provide the full installation instructions in the plugin repository. But first you will need to install the Kubernetes plugin and configure it to access the clusters you want to query Flux resources from. You will need to install the plugin to your frontend app: Then add the components you want to your EntityPage. Currently, the Backstage plugin provides the following components: For example, to add the When you view components with the correct annotation: This will query across your configured clusters for Instead of displaying the state on the overview page, it's possible to compose a page displaying the state of resources. For example, to add a page You can connect the plugin to your Weave GitOps installation through your config: NOTE: The plugin will generate URLs relative to this URL and link to them from the displayed resources. We \u2764\ufe0f your comments and suggestions as we look to make successfully adopting a cloud-native approach, to application deployment on Kubernetes with GitOps, easier and easier. There are a number of ways you can reach out: Weaveworks is utilizing Pendo, a product-analytics app, to gather anonymous user behavior analytics for both Weave GitOps and Weave GitOps Enterprise. We use this data so we can understand what you love about Weave GitOps, and areas we can improve. Weave GitOps OSS users will be notified when you create the dashboard for the first time via gitops create dashboard or when you use gitops run for the first time and decide to install the dashboard via that functionality. Analytics will not be enabled until after this notification so that you can opt out before sending analytics data. For Weave GitOps Enterprise users, this functionality is turned on by default. Further below we go into more detail about how you can control this functionality. We want to ensure that we are designing the best features, addressing the most pressing bugs, and prioritizing our roadmap appropriately for our users. Collecting analytics on our users\u2019 behaviors gives us valuable insights and allows us to conduct analyses on user behavior within the product. This is important for us so we can make informed decisions- based on how, where and when our users use Weave GitOps - and prioritize what is most important to users like you. We\u2019d like to understand the usage of the graph and dependency tabs within the dashboard. If users are utilizing this feature, we would like to understand the value and how we can improve that feature. However, if users aren\u2019t using it, we can conduct research to understand why and either fix it, or come to the conclusion that it really doesn\u2019t serve any utility and focus our efforts on more valuable features. Weave GitOps\u2019s anonymous user and event data has a 24 month retention policy. The default value for data retention in Pendo is 7 years. For more information on Pendo\u2019s data storage policies, click here. Weave GitOps gathers data on how the CLI and Web UI are used. There is no way for us or Pendo to connect our IDs to individual users or sites. For the CLI, we gather usage data on: For the Web UI, we gather usage data on: Weave GitOps CLI analytics are sent at startup. The dashboard analytics are sent through its execution. Both CLI and Dashboard analytics are sent to Pendo over HTTPS. The CLI code is viewable in pkg/analytics. It will ignore any errors, e.g. if you don\u2019t have any network connection. The dashboard setup code is viewable in ui/components/Pendo.tsx - this will fetch a 3rd party javascript from Pendo\u2019s servers. All the data collected, analytics, and feedback are for the sole purpose of creating better product experience for you and your teams. We would really appreciate it if you left the analytics on as it helps us prioritize which features to build next and what features to improve. However, if you do want to opt out of Weave GitOps\u2019s analytics you can opt out of CLI and/or Dashboard analytics. We have created a command to make it easy to turn analytics on or off for the CLI. To disable analytics: gitops set config analytics false To enable analytics: gitops set config analytics true You need to update your helm release to remove \ud83d\udc4b Come talk to us and other users in the #weave-gitops channel on Weaveworks Community Slack. Invite yourself if you haven't joined yet. The Flux project has a fantastic community to help support your GitOps journey, find more details on how to reach out via their community page Weaveworks provides Weave GitOps Enterprise, a continuous operations product that makes it easy to deploy and manage Kubernetes clusters and applications at scale in any environment. The single management console automates trusted application delivery and secure infrastructure operations on premise, in the cloud and at the edge. To discuss your support needs, please contact us at sales@weave.works. Got a suggestion for this list? Please open a pull request using the \"Edit this page\" link at the bottom. \"GitOps is the best thing since configuration as code. Git changed how we collaborate, but declarative configuration is the key to dealing with infrastructure at scale, and sets the stage for the next generation of management tools\" - Kelsey Hightower, Staff Developer Advocate, Google. Weave GitOps improves developer experience\u2014simplifying the complexities and cognitive load of deploying and managing cloud native apps on Kubernetes so that teams can go faster. It\u2019s a powerful extension of Flux, a leading GitOps engine and Cloud Native Computing Foundation project. Weaveworks are the creators of Flux. Weave GitOps\u2019 intuitive user interface surfaces key information to help application operators easily discover and resolve issues\u2014simplifying and scaling adoption of GitOps and continuous delivery. The UI provides a guided experience that helps users to easily discover the relationships between Flux objects and build understanding while providing insights into application deployments. Today Weave GitOps defaults are Flux, Kustomize, Helm, SOPS, and Kubernetes Cluster API. If you use Flux already, then you can easily add Weave GitOps to create a platform management overlay. Tip Adopting GitOps can bring a number of key benefits\u2014including faster and more frequent deployments, easy recovery from failures, and improved security and auditabiity. Check out our GitOps for Absolute Beginners eBook and Guide to GitOps for more information. This user guide provides content that will help you to install and get started with our free and paid offerings: - Weave GitOps Open Source: a simple, open source developer platform for people who don't have Kubernetes expertise but who want cloud native applications. It includes the UI and many other features that take your team beyond a simple CI/CD system. Experience how easy it is to enable GitOps and run your apps in a cluster. Go here to install. - Weave GitOps Enterprise: an enterprise version that adds automation and 100% verifiable trust to existing developer platforms, enabling faster and more frequent deployments with guardrails and golden paths for every app team. Note that Enterprise offers a more robust UI than what you'll find in our open source version. Go here to install. Tip Want to learn more about how Weave GitOps Enterprise can help your team? Get in touch with sales@weave.works to discuss your needs. Weave GitOps works on any Chromium-based browser (Chrome, Opera, Microsoft Edge), Safari, and Firefox. We only support the latest and prior two versions of these browsers. To give Weave GitOps a test drive, we recommend checking out the Open Source version and its UI, then deploying an application. Let's take a closer look at the features it offers you, all for free. Like our Enterprise version, Weave GitOps Open Source fully integrates with Flux as the GitOps engine to provide: Some of the things you can do with it: OK, time to install! Weave GitOps Enterprise provides monitoring telemetry and tooling for metrics and profiling. WGE generates Prometheus metrics for monitoring both performance and business operations. The following configuration options are available for you to configure Warning The monitoring server holds private services, so you probably won't need to expose anything beyond your cluster. If you must, ensure that it is properly secured. This setup follows the same monitoring approach as Flux and is based on Prometheus Operator. Adapt it to your context as needed. Weave GitOps Overview Monitor Weave GitOps golden signals for API server and controllers: Weave GitOps Runtime Monitor Weave GitOps Go runtime metrics like memory usage, memory heap, and Goroutines, among others. Explorer You can also monitor Explorer golden signals. During operations, profiling is useful for gaining a deeper understanding of how Weave GitOps runtime behaves. Given that Weave GitOps is written in Go, profiling happens through pprof. It is exposed as a web endpoint by pprof http. Go here for more info on using This document defines security reporting, handling, disclosure, and audit information for Weave Gitops. Vulnerability disclosures announced publicly. Disclosures will contain an overview, details about the vulnerability, a fix that will typically be an update, and optionally a workaround if one is available. We will coordinate publishing disclosures and security releases in a way that is realistic and necessary for end users. We prefer to fully disclose the vulnerability as soon as possible once a user mitigation is available. Disclosures will always be published in a timely manner after a release is published that fixes the vulnerability. Here is an overview of all our published security advisories. In line with the mantra \u201ccattle, not pets,\u201d Weave GitOps Enterprise (WGE) simplifies managing cluster lifecycle at scale\u2014even massive scale. Through pull requests, which make every action recorded and auditable, WGE makes it possible for teams to create, update, and delete clusters across entire fleets. Breaking things is harder, and recovery is easier. WGE further simplifies the cluster lifecycle management process by providing both a user interface (UI) and a command line interface (CLI) to interact with and manage clusters on-prem, across clouds, and in hybrid environments. You can even use our UI to delete clusters\u2014all it takes is the press of a button that spins up a pull request. WGE fully supports a range of options, including: - Crossplane integration - Terraform integration, with a Terraform Controller that follows the patterns established by Flux - Cluster API The Weave GitOps Enterprise UI enables you to install software packages to your bootstrapped cluster via the Applications view of our user interface, using a Helm chart (via a HelmRelease) or Kustomization. First, find the \"Add an Application\" button: A form will appear, asking you to select the target cluster where you want to add your Application. Select the source type of either your Git repository or your Helm repository from the selected cluster: If you select Git repository as the source type, you will be able to add the Application from Kustomization: If you select Helm repository as the source type, you will be able to add Application from HelmRelease. And if you choose the profiles Helm chart repository URL, you can select a profile from our Profiles list. Finally, you can create a pull request to your target cluster and see it on your GitOps repository. Our user guide provides two pathways to deployment: Just click the option you want to get started with, and let's go. We'll use this page to help you move past common troublesome situations. To authenticate using Git during the pull request creation, you will need to select the Git repository where you'll create the pull request. Depending on the action performed on the resource (creation/deletion/editing), the default Git repository selected in the UI is determined in the following order: the repository used to initially create the resource found in the templates.weave.works/v1alpha1
+
+GitOpsSet
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+apiVersion
+string
+
+templates.weave.works/v1alpha1
+
+
+
+
+kind
+string
+
+
+GitOpsSet
+
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+
+metadata
field.
+
+
+
+
+spec
+
+
+GitOpsSetSpec
+
+
+
+
+
+
+
+
+
+
+
+
+
+suspend
+
+bool
+
+
+(Optional)
+
+
+
+
+
+generators
+
+
+[]GitOpsSetGenerator
+
+
+
+
+
+
+
+
+templates
+
+
+[]GitOpsSetTemplate
+
+
+
+
+
+
+
+
+
+serviceAccountName
+
+string
+
+
+(Optional)
+
+
+
+
+
+
+status
+
+
+GitOpsSetStatus
+
+
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+interval
+
+
+Kubernetes meta/v1.Duration
+
+
+
+
+
+
+
+
+endpoint
+
+string
+
+
+(Optional)
+
+
+
+
+
+method
+
+string
+
+
+
+
+
+
+
+jsonPath
+
+string
+
+
+
+
+
+
+
+headersRef
+
+
+HeadersReference
+
+
+
+(Optional)
+
+
+
+
+
+body
+
+
+Kubernetes pkg/apis/apiextensions/v1.JSON
+
+
+
+(Optional)
+
+
+
+
+
+singleElement
+
+bool
+
+
+(Optional)
+
+
+
+
+
+
+secretRef
+
+
+Kubernetes core/v1.LocalObjectReference
+
+
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+selector
+
+
+Kubernetes meta/v1.LabelSelector
+
+
+
+(Optional)
+
+ConfigGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+kind
+
+string
+
+
+
+
+
+
+
+
+name
+
+string
+
+
+
+GitOpsSetGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+list
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+pullRequests
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+gitRepository
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+ociRepository
+
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+matrix
+
+
+MatrixGenerator
+
+
+
+
+
+
+
+
+cluster
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+apiClient
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+imagePolicy
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+
+config
+
+
+ConfigGenerator
+
+
+
+
+GitOpsSetNestedGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+name
+
+string
+
+
+(Optional)
+
+
+
+
+
+list
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+gitRepository
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+ociRepository
+
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+pullRequests
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+cluster
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+apiClient
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+imagePolicy
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+
+config
+
+
+ConfigGenerator
+
+
+
+
+GitOpsSetSpec
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+suspend
+
+bool
+
+
+(Optional)
+
+
+
+
+
+generators
+
+
+[]GitOpsSetGenerator
+
+
+
+
+
+
+
+
+templates
+
+
+[]GitOpsSetTemplate
+
+
+
+
+
+
+
+
+
+serviceAccountName
+
+string
+
+
+(Optional)
+
+GitOpsSetStatus
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+ReconcileRequestStatus
+
+
+github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
+
+
+
+
+ReconcileRequestStatus
are embedded into this type.)
+
+
+
+
+observedGeneration
+
+int64
+
+
+(Optional)
+
+
+
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+
+(Optional)
+
+
+
+
+
+
+inventory
+
+
+ResourceInventory
+
+
+
+(Optional)
+
+GitOpsSetTemplate
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repeat
+
+string
+
+
+
+
+
+
+
+
+content
+
+
+k8s.io/apimachinery/pkg/runtime.RawExtension
+
+
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repositoryRef
+
+string
+
+
+
+
+
+
+
+files
+
+
+[]RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+
+directories
+
+
+[]RepositoryGeneratorDirectoryItem
+
+
+
+
+HeadersReference
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+kind
+
+string
+
+
+
+
+
+
+
+
+name
+
+string
+
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+policyRef
+
+string
+
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+elements
+
+
+[]Kubernetes pkg/apis/apiextensions/v1.JSON
+
+
+
+
+MatrixGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+generators
+
+
+[]GitOpsSetNestedGenerator
+
+
+
+
+
+
+
+
+
+singleElement
+
+bool
+
+
+(Optional)
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repositoryRef
+
+string
+
+
+
+
+
+
+
+files
+
+
+[]RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+
+directories
+
+
+[]RepositoryGeneratorDirectoryItem
+
+
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+interval
+
+
+Kubernetes meta/v1.Duration
+
+
+
+
+
+
+
+
+driver
+
+string
+
+
+
+
+
+
+
+serverURL
+
+string
+
+
+(Optional)
+
+
+
+
+
+repo
+
+string
+
+
+
+
+
+
+
+secretRef
+
+
+Kubernetes core/v1.LocalObjectReference
+
+
+
+
+
+
+
+
+labels
+
+[]string
+
+
+(Optional)
+
+
+
+
+
+
+forks
+
+bool
+
+
+(Optional)
+
+RepositoryGeneratorDirectoryItem
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+path
+
+string
+
+
+
+
+
+
+
+
+exclude
+
+bool
+
+
+
+RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+path
+
+string
+
+
+
+ResourceInventory
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+entries
+
+
+[]ResourceRef
+
+
+
+
+ResourceRef
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+id
+
+string
+
+
+
+
+
+
+
+
+v
+
+string
+
+
+
+gen-crd-api-reference-docs
+
+pool:
+ vmImage: ubuntu-latest
+
+container:
+ image: weaveworks/weave-iac-validator:v1.1-azure
+
+steps:
+- script: weave-validator --path
404 - Not found
Backstage
@weaveworksoss/backstage-plugin-flux
Backstage plugin provides a set of components that you can add to your existing Backstage app to display the state of Flux resources.Installation¶
# From your Backstage root directory
+yarn add --cwd packages/app @weaveworksoss/backstage-plugin-flux
+
EntityFluxHelmReleasesCard
to your Entity home page for components with the backstage.io/kubernetes-id
entity annotation.import {
+ EntityFluxHelmReleasesCard,
+} from '@weaveworksoss/backstage-plugin-flux';
+import { isKubernetesAvailable } from '@backstage/plugin-kubernetes';
+
+const overviewContent = (
+ <Grid item md={6}>
+ <EntityAboutCard variant="gridItem" />
+ </Grid>
+
+ <EntitySwitch>
+ <EntitySwitch.Case if={isKubernetesAvailable}>
+ <EntityFluxHelmReleasesCard />
+ </EntitySwitch.Case>
+ </EntitySwitch>
+);
+
apiVersion: backstage.io/v1alpha1
+kind: Component
+metadata:
+ name: catalogue-service
+ description: A microservices-demo service that provides catalogue/product information
+ annotations:
+ backstage.io/kubernetes-id: podinfo
+
HelmReleases
that have the correct label:apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: podinfo
+ namespace: podinfo
+ # The label here is matched to the Backstage Entity annotation
+ labels:
+ backstage.io/kubernetes-id: podinfo
+spec:
+ interval: 5m
+ chart:
+ spec:
+ chart: podinfo
+ version: '6.3.6'
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ namespace: podinfo
+
Building a Custom Page with Resources¶
/kustomizations
to your Entity for components with the backstage.io/kubernetes-id
entity annotation:import {
+ EntityFluxGitRepositoriesCard,
+ EntityFluxKustomizationsCard,
+} from '@weaveworksoss/backstage-plugin-flux';
+import { isKubernetesAvailable } from '@backstage/plugin-kubernetes';
+
+const serviceEntityPage = (
+ // insert in the page where you need it
+
+ <EntityLayout.Route path="/kustomizations" title="Kustomizations" if={isKubernetesAvailable}>
+ <Grid container spacing={3} alignItems="stretch">
+ <Grid item md={12}>
+ <EntityFluxKustomizationsCard />
+ </Grid>
+ <Grid item md={12}>
+ <EntityFluxGitRepositoriesCard />
+ </Grid>
+ </Grid>
+ </EntityLayout.Route>
+);
+
Connecting to Weave GitOps¶
app:
+ title: Backstage Example App
+ baseUrl: http://localhost:3000
+...
+gitops:
+ # Set this to be the root of your Weave GitOps application
+ baseUrl: https://example.com
+
How to Inject Credentials Into Your Template ENTERPRISE¶
apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+ name: capa-cluster-template
+spec:
+ resourcetemplates:
+ - contents:
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
+ kind: AWSCluster
+ metadata:
+ name: "${CLUSTER_NAME}"
+ spec:
+ region: "${AWS_REGION}"
+
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
+kind: AWSClusterStaticIdentity
+metadata:
+ name: "test-account"
+spec:
+ secretRef:
+ name: test-account-creds
+ namespace: capa-system
+ allowedNamespaces:
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/ns: "testlabel"
+
test-account
when creating the cluster by using the Infrastructure provider credentials dropdown on the Create new cluster with template page:apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
+kind: AWSCluster
+metadata:
+ name: example-cluster
+spec:
+ region: eu-north-1
+ identityRef:
+ kind: AWSClusterStaticIdentity
+ name: test-account
+
identityRef
s¶identityRef
on the the provider cluster object, e.g. AWSCluster
, AzureCluster
or VSphereCluster
.kind
s are currently supported and their corresponding Cluster kinds:
AWSClusterStaticIdentity
: AWSCluster
AWSClusterRoleIdentity
: AWSCluster
AzureClusterIdentity
: AzureCluster
VSphereClusterIdentity
: VSphereCluster
Cluster Management Troubleshooting ENTERPRISE¶
Git Repositories and Resources¶
templates.weave.works/create-request
annotation (in the case of editing or deleting of resources) metadata:
+ annotations:
+ templates.weave.works/create-request: "{...\"parameter_values\":{...\"url\":\"https://github.com/weave-example-org/weave-demo\"}"
+
weave.works/repo-role: default
annotation metadata:
+ annotations:
+ weave.works/repo-role: default
+
metadata:
+ name: flux-system
+ namespace: flux-system
+
weave.works/repo-role: default
to an appropriate Git repository.Overriding the Calculated Git Repository HTTPS URL¶
ssh://git@github.com/org/repo.git
, the system will try and convert it to https://github.com/org/repo.git
.ssh://git@interal-ssh-server:2222/org/repo.git
and the correct HTTPS URL may be https://gitlab.example.com/org/repo.git
. weave.works/repo-https-url
annotation on the GitRepository
object:apiVersion: source.toolkit.fluxcd.io/v1beta1
+kind: GitRepository
+metadata:
+ name: repo
+ namespace: flux-system
+ annotations:
+ // highlight-start
+ weave.works/repo-https-url: https://gitlab.example.com/org/repo.git
+ // highlight-end
+spec:
+ interval: 1m
+ url: ssh://git@interal-ssh-server:2222/org/repo.git
+
Deploying CAPA with EKS ENTERPRISE¶
Prerequisites¶
github cli
>= 2.3.0 (source)kubectl
(source)eksctl
(source)aws cli
(source)clusterctl
>= v1.1.3 (source); follow these steps to initialise the cluster and enable feature gatesclusterawsadm
>= v1.1.0, following Cluster API's instructionsAWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
with either aws configure
or by exporting it in the current shell.GITHUB_TOKEN
as an environment variable in the current shell. It should have permissions to create Pull Requests against the cluster config repo.Multitenancy¶
1. Add Common RBAC to Your Repository¶
./clusters/<cluster-namespace>/<cluster-name>
and ./clusters/bases
../clusters/bases/rbac
is an easy way to configure this!curl -o clusters/bases/rbac/wego-admin.yaml https://docs.gitops.weave.works/assets/files/wego-admin-c80945c1acf9908fe6e61139ef65c62e.yaml
+
Expand to see full template yaml
+
2. Build a Kubernetes Platform with Built-in Components Preconfigured for Your Organization¶
curl -o clusters/management/capi/templates/capa-template.yaml https://docs.gitops.weave.works/assets/files/capa-template-49001fbae51e2a9f365b80caebd6f341.yaml
+
{% include '/assets/templates/capa-template.yaml' %}
+
3. Add a Cluster Bootstrap Config¶
kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN
+
curl -o clusters/management/capi/bootstrap/capi-gitops-cluster-bootstrap-config.yaml https://docs.gitops.weave.works/assets/files/capi-gitops-cluster-bootstrap-config-d9934a1e6872a5b7ee5559d2d97a3d83.yaml
+
GITOPS_REPO
variable to point to your clusterExpand to see full yaml
+
4. Delete a Cluster with the Weave GitOps Enterprise UI¶
Create a PR to delete clusters
buttonRemove clusters
button5. Disable CAPI Support¶
HelmRelease
object with the global.capiEnabled
value set to false
:---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+spec:
+ interval: 60m
+ secretRef:
+ name: weave-gitops-enterprise-credentials
+ url: https://charts.dev.wkp.weave.works/releases/charts-v3
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: weave-gitops-enterprise
+ namespace: flux-system
+spec:
+ chart:
+ spec:
+ interval: 65m
+ chart: mccp
+ sourceRef:
+ kind: HelmRepository
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+ version: 0.12.0
+ install:
+ crds: CreateReplace
+ upgrade:
+ crds: CreateReplace
+ interval: 50m
+ values:
+ global:
+ capiEnabled: false
+
Cluster Management Introduction ENTERPRISE¶
Helm Charts and Kustomizations Made Easy with Our UI¶
Follow Our User Guide¶
Managing Clusters Without Cluster API ENTERPRISE¶
kubeconfig
.Adding kubeconfig to Your Management Cluster¶
kubeconfig
stored in a secret in your management cluster, continue with the "Create a GitopsCluster
" step below.kubectl create secret generic demo-01-kubeconfig \
+--from-file=value=./demo-01-kubeconfig
+
apiVersion: v1
+kind: ServiceAccount
+metadata:
+name: demo-01
+namespace: default
+
Expand to see role manifests
---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: impersonate-user-groups
+ subjects:
+ - kind: ServiceAccount
+ name: wgesa
+ namespace: default
+ roleRef:
+ kind: ClusterRole
+ name: user-groups-impersonator
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: user-groups-impersonator
+ rules:
+ - apiGroups: [""]
+ resources: ["users", "groups"]
+ verbs: ["impersonate"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+
kubectl get secrets --field-selector type=kubernetes.io/service-account-token
+ NAME TYPE DATA AGE
+ default-token-lsjz4 kubernetes.io/service-account-token 3 13d
+ demo-01-token-gqz7p kubernetes.io/service-account-token 3 99m
+
demo-01-token-gqz7p
is the secret that holds the token for demo-01
service account.)TOKEN=$(kubectl get secret demo-01-token-gqz7p -o jsonpath={.data.token} | base64 -d)
+
static-kubeconfig.sh
:Expand to see script
#!/bin/bash
+ if [[ -z "$CLUSTER_NAME" ]]; then
+ echo "Ensure CLUSTER_NAME has been set"
+ exit 1
+ fi
+ if [[ -z "$CA_CERTIFICATE" ]]; then
+ echo "Ensure CA_CERTIFICATE has been set to the path of the CA certificate"
+ exit 1
+ fi
+ if [[ -z "$ENDPOINT" ]]; then
+ echo "Ensure ENDPOINT has been set"
+ exit 1
+ fi
+ if [[ -z "$TOKEN" ]]; then
+ echo "Ensure TOKEN has been set"
+ exit 1
+ fi
+ export CLUSTER_CA_CERTIFICATE=$(cat "$CA_CERTIFICATE" | base64)
+ envsubst <<EOF
+ apiVersion: v1
+ kind: Config
+ clusters:
+ - name: $CLUSTER_NAME
+ cluster:
+ server: https://$ENDPOINT
+ certificate-authority-data: $CLUSTER_CA_CERTIFICATE
+ users:
+ - name: $CLUSTER_NAME
+ user:
+ token: $TOKEN
+ contexts:
+ - name: $CLUSTER_NAME
+ context:
+ cluster: $CLUSTER_NAME
+ user: $CLUSTER_NAME
+ current-context: $CLUSTER_NAME
+ EOF
+
ca.crt
file used below.CLUSTER_NAME=demo-01 \
+CA_CERTIFICATE=ca.crt \
+ENDPOINT=<control-plane-ip-address> \
+TOKEN=<token> ./static-kubeconfig.sh > demo-01-kubeconfig
+
demo-01
34.218.72.31
kubectl create secret generic demo-01-kubeconfig \
+--from-file=value=./demo-01-kubeconfig
+
Add a Cluster Bootstrap Config¶
kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN
+
GITHUB_USER
variable to point to your repositoryExpand to see full yaml
Connect a Cluster¶
clusters/bases
folder. When a cluster is provisioned, by default it will reconcile all the manifests in ./clusters/<cluster-namespace>/<cluster-name>
and ./clusters/bases
../clusters/bases/rbac
is an easy way to configure this.Expand to see full template yaml
Create a
GitopsCluster
¶GitopsCluster
appears in the cluster, the Cluster Bootstrap Controller will install Flux on it and by default start reconciling the ./clusters/demo-01
path in your management cluster's Git repository:apiVersion: gitops.weave.works/v1alpha1
+kind: GitopsCluster
+metadata:
+ name: demo-01
+ namespace: default
+ # Signals that this cluster should be bootstrapped.
+ labels:
+ weave.works/capi: bootstrap
+spec:
+ secretRef:
+ name: demo-01-kubeconfig
+
./clusters/bases
. In the following step, we'll create a kustomization to add these common resources onto our new cluster:apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+kind: Kustomization
+metadata:
+ creationTimestamp: null
+ name: clusters-bases-kustomization
+ namespace: flux-system
+spec:
+ interval: 10m0s
+ path: clusters/bases
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: flux-system
+
Debugging Tip: Checking that Your kubeconfig Secret Is in Your Cluster¶
Expand to see manifest
---
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ name: kubectl
+ spec:
+ ttlSecondsAfterFinished: 30
+ template:
+ spec:
+ containers:
+ - name: kubectl
+ image: bitnami/kubectl
+ args:
+ [
+ "get",
+ "pods",
+ "-n",
+ "kube-system",
+ "--kubeconfig",
+ "/etc/kubeconfig/value",
+ ]
+ volumeMounts:
+ - name: kubeconfig
+ mountPath: "/etc/kubeconfig"
+ readOnly: true
+ restartPolicy: Never
+ volumes:
+ - name: kubeconfig
+ secret:
+ secretName: demo-01-kubeconfig
+ optional: false
+
demo-01-kubeconfig
is the name of the secret that contains the kubeconfig for the remote cluster.
Additional Resources¶
Profiles ENTERPRISE¶
name: demo-profile
+version: 0.0.1
+annotations:
+ weave.works/profile: "A Demo Profile"
+
Mark a HelmRepository as Containing Profiles¶
HelmRepository
apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: podinfo
+ namespace: default
+ annotations:
+ weave.works/profiles: "true" # this identifies all charts as profiles
+spec:
+ interval: 5m0s
+ url: https://stefanprodan.github.io/podinfo
+
HelmRepository
are identified as Profiles.Add Layers to Define Dependencies Between Your Profiles¶
name: demo-profile
+version: 0.0.1
+annotations:
+ weave.works/profile: "A Demo Profile"
+ weave.works/layer: "demo"
+
┌─────────┐ ┌─────────┐ ┌─────────┐
+│ │ │ │ │ │
+│ layer-3 ├──────► layer-2 ├──────► layer-1 │
+│ │ │ │ │ │
+└─────────┘ └─────────┘ └─────────┘
+ dependsOn dependsOn
+
dependsOn
calculation is limited to the set of Profiles in the API call.dependsOn
is configured.┌──────────┐ ┌─────────┐ ┌─────────┐
+│ │ │ │ │ │
+│ layer-3 ├─────► layer-2 ├──────► layer-1 │
+│ │ │ │ │ │
+└──────────┤ └─────────┘ └─▲───────┘
+ dependsOn │ dependsOn │
+ │ │
+ │ ┌─────────┐ │
+ │ │ │ │
+ └─────► layer-2 ├────────┘
+ │ │
+ └─────────┘
+ dependsOn
+
dependsOn
for the chart without a layer to depend on the chart with layer. (Optional) Use a Helm Chart from a Remote Public/Private Repository¶
apiVersion: source.toolkit.fluxcd.io/v1beta1
+kind: HelmRepository
+metadata:
+ name: weaveworks-charts
+ namespace: flux-system
+spec:
+ interval: 1m
+ url: https://weaveworks.github.io/weave-gitops-profile-examples/
+
spec.secretRef
. The labels of your target leaf cluster are added for the syncer to match clusters against those labels using spec.clusterSelector.matchLabels
.apiVersion: capi.weave.works/v1alpha1
+kind: SecretSync
+metadata:
+ name: my-dev-secret-syncer
+ namespace: flux-system
+spec:
+ clusterSelector:
+ matchLabels:
+ weave.works/capi: bootstrap
+ secretRef:
+ name: weave-gitops-enterprise-credentials
+ targetNamespace: flux-system
+
apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: weaveworks-charts
+ namespace: flux-system
+spec:
+ interval: 60m
+ secretRef:
+ name: weave-gitops-enterprise-credentials
+ url: https://charts.dev.wkp.weave.works/releases/charts-v3
+
HelmRepoSecret
, SecretSync
, and the GitopsCluster
should all be in the same namespace.Select the Profiles You Want Installed at Cluster Creation¶
HelmRepository
object named weaveworks-charts
. This Kubernetes object should point to a Helm chart repository that includes the Profiles available for installation.Profiles
section of the template. For example:editable
flag in the annotation and describe the required Profile in the template. For example: apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+ name: connect-a-cluster-with-policies
+ namespace: default
+ annotations:
+ capi.weave.works/profile-0: '{"name": "weave-policy-agent", "editable": true, "version": "0.2.8", "values": "accountId: weaveworks\nclusterId: ${CLUSTER_NAME}" }'
+
Weave GitOps Enterprise ENTERPRISE¶
Feature Breakdown¶
Cluster Fleet Management¶
Trusted Application Delivery¶
Progressive Delivery¶
CD Pipelines¶
Team Workspaces¶
Self-Service Templates and Profiles¶
Health Status and Compliance Dashboards¶
Kubernetes Anywhere¶
Critical 24/7 Support¶
Install Enterprise in Air-gapped Environments ENTERPRISE¶
Before You Start¶
Install WGE¶
Set up a WGE install environment¶
Setup a proxy host¶
Create a Kind Cluster¶
Install Flux¶
flux install
to install Flux into your kind clusterSet up a Helm repo¶
Expand to see installation yaml
---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: chartmuseum
+namespace: flux-system
+spec:
+interval: 10m
+url: https://chartmuseum.github.io/charts
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: chartmuseum
+namespace: flux-system
+spec:
+chart:
+ spec:
+ chart: chartmuseum
+ sourceRef:
+ kind: HelmRepository
+ name: chartmuseum
+ namespace: flux-system
+interval: 10m0s
+timeout: 10m0s
+releaseName: helm-repo
+install:
+ crds: CreateReplace
+ remediation:
+ retries: 3
+values:
+ env:
+ open:
+ DISABLE_API: "false"
+ AUTH_ANONYMOUS_GET: "true"
+
#expose kubernetes svc
+kubectl -n flux-system port-forward svc/helm-repo-chartmuseum 8080:8080 &
+
+#add hostname
+sudo -- sh -c "echo 127.0.0.1 helm-repo-chartmuseum >> /etc/hosts"
+
#add repo to helm
+helm repo add private http://helm-repo-chartmuseum:8080
+
+#test that works
+helm repo update private
+
Install WGE¶
Cluster API¶
export CAPI_BASE_PATH=/tmp/capi
+export CERT_MANAGER_VERSION=v1.9.1
+export CAPI_VERSION=v1.3.0
+export CAPMVM_VERSION=v0.7.0
+export EXP_CLUSTER_RESOURCE_SET=true
+export CONTROL_PLANE_MACHINE_COUNT=1
+export WORKER_MACHINE_COUNT=1
+export CONTROL_PLANE_VIP="192.168.100.9"
+export HOST_ENDPOINT="192.168.1.130:9090"
+
clusterctl
config file.cat << EOF > clusterctl.yaml
+cert-manager:
+ url: "$CAPI_BASE_PATH/cert-manager/$CERT_MANAGER_VERSION/cert-manager.yaml"
+
+providers:
+ - name: "microvm"
+ url: "$CAPI_BASE_PATH/infrastructure-microvm/$CAPMVM_VERSION/infrastructure-components.yaml"
+ type: "InfrastructureProvider"
+ - name: "cluster-api"
+ url: "$CAPI_BASE_PATH/cluster-api/$CAPI_VERSION/core-components.yaml"
+ type: "CoreProvider"
+ - name: "kubeadm"
+ url: "$CAPI_BASE_PATH/bootstrap-kubeadm/$CAPI_VERSION/bootstrap-components.yaml"
+ type: "BootstrapProvider"
+ - name: "kubeadm"
+ url: "$CAPI_BASE_PATH/control-plane-kubeadm/$CAPI_VERSION/control-plane-components.yaml"
+ type: "ControlPlaneProvider"
+EOF
+
make
using the following makefile to intialise CAPI in your cluster: Expand to see Makefile contents
.PHONY := capi
+
+capi: capi-init capi-cluster
+
+capi-init: cert-manager cluster-api bootstrap-kubeadm control-plane-kubeadm microvm clusterctl-init
+
+cert-manager:
+ mkdir -p $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION)
+ curl -L https://github.com/cert-manager/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml --output $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION)/cert-manager.yaml
+
+cluster-api:
+ mkdir -p $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/core-components.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/core-components.yaml
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/metadata.yaml
+
+bootstrap-kubeadm:
+ mkdir -p $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/bootstrap-components.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/bootstrap-components.yaml
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/metadata.yaml
+
+control-plane-kubeadm:
+ mkdir -p $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/control-plane-components.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/control-plane-components.yaml
+ curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/metadata.yaml
+
+microvm:
+ mkdir -p $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)
+ curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/infrastructure-components.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/infrastructure-components.yaml
+ curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/cluster-template-cilium.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/cluster-template-cilium.yaml
+ curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/metadata.yaml
+
+clusterctl-init:
+ clusterctl init --wait-providers -v 4 --config clusterctl.yaml --infrastructure microvm
+
+capi-cluster:
+ clusterctl generate cluster --config clusterctl.yaml -i microvm:$(CAPMVM_VERSION) -f cilium lm-demo | kubectl apply -f -
+
Deploying the Terraform Controller¶
Expand to see file contents
apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: tf-controller
+namespace: flux-system
+spec:
+interval: 10m
+url: https://weaveworks.github.io/tf-controller/
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: tf-controller
+namespace: flux-system
+spec:
+chart:
+ spec:
+ chart: tf-controller
+ version: "0.9.2"
+ sourceRef:
+ kind: HelmRepository
+ name: tf-controller
+ namespace: flux-system
+interval: 10m0s
+install:
+ crds: CreateReplace
+ remediation:
+ retries: 3
+upgrade:
+ crds: CreateReplace
+
WGE¶
Expand to see file contents
---
+apiVersion: v1
+data:
+deploy-key: <changeme>
+entitlement: <changeme>
+password: <changeme>
+username: <changeme>
+kind: Secret
+metadata:
+labels:
+ kustomize.toolkit.fluxcd.io/name: shared-secrets
+ kustomize.toolkit.fluxcd.io/namespace: flux-system
+name: weave-gitops-enterprise-credentials
+namespace: flux-system
+type: Opaque
+---
+apiVersion: v1
+data:
+password: <changeme>
+username: <changeme>
+kind: Secret
+metadata:
+labels:
+ kustomize.toolkit.fluxcd.io/name: enterprise
+ kustomize.toolkit.fluxcd.io/namespace: flux-system
+name: cluster-user-auth
+namespace: flux-system
+type: Opaque
+---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: weave-gitops-enterprise-charts
+namespace: flux-system
+spec:
+interval: 10m
+secretRef:
+ name: weave-gitops-enterprise-credentials
+url: https://charts.dev.wkp.weave.works/releases/charts-v3
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: weave-gitops-enterprise
+namespace: flux-system
+spec:
+chart:
+ spec:
+ chart: mccp
+ version: "0.10.2"
+ sourceRef:
+ kind: HelmRepository
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+interval: 10m0s
+install:
+ crds: CreateReplace
+ remediation:
+ retries: 3
+upgrade:
+ crds: CreateReplace
+values:
+ global:
+ capiEnabled: true
+ enablePipelines: true
+ enableTerraformUI: true
+ clusterBootstrapController:
+ enabled: true
+ cluster-controller:
+ controllerManager:
+ kubeRbacProxy:
+ image:
+ repository: gcr.io/kubebuilder/kube-rbac-proxy
+ tag: v0.8.0
+ manager:
+ image:
+ repository: docker.io/weaveworks/cluster-controller
+ tag: v1.4.1
+ policy-agent:
+ enabled: true
+ image: weaveworks/policy-agent
+ pipeline-controller:
+ controller:
+ manager:
+ image:
+ repository: ghcr.io/weaveworks/pipeline-controller
+ images:
+ clustersService: docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2
+ uiServer: docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2
+ clusterBootstrapController: weaveworks/cluster-bootstrap-controller:v0.4.0
+
➜ kubectl get pods -A
+NAMESPACE NAME READY STATUS RESTARTS AGE
+...
+flux-system weave-gitops-enterprise-cluster-controller-6f8c69dc8-tq994 2/2 Running 5 (12h ago) 13h
+flux-system weave-gitops-enterprise-mccp-cluster-bootstrap-controller-cxd9c 2/2 Running 0 13h
+flux-system weave-gitops-enterprise-mccp-cluster-service-8485f5f956-pdtxw 1/1 Running 0 12h
+flux-system weave-gitops-enterprise-pipeline-controller-85b76d95bd-2sw7v 1/1 Running 0 13h
+...
+
kubectl
:kubectl get helmcharts.source.toolkit.fluxcd.io
+NAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS
+flux-system-cert-manager cert-manager 0.0.7 HelmRepository weaveworks-charts 13h True pulled 'cert-manager' chart with version '0.0.7'
+flux-system-tf-controller tf-controller 0.9.2 HelmRepository tf-controller 13h True pulled 'tf-controller' chart with version '0.9.2'
+flux-system-weave-gitops-enterprise mccp v0.10.2 HelmRepository weave-gitops-enterprise-charts 13h True pulled 'mccp' chart with version '0.10.2'
+
kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec['containers','initContainers'][*].image}" |tr -s '[[:space:]]' '\n' \
+| sort | uniq | grep -vE 'kindest|etcd|coredns'
+
+docker.io/prom/prometheus:v2.34.0
+docker.io/weaveworks/cluster-controller:v1.4.1
+docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2
+docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2
+ghcr.io/fluxcd/flagger-loadtester:0.22.0
+ghcr.io/fluxcd/flagger:1.21.0
+ghcr.io/fluxcd/helm-controller:v0.23.1
+ghcr.io/fluxcd/kustomize-controller:v0.27.1
+ghcr.io/fluxcd/notification-controller:v0.25.2
+...
+
Collect and Publish Artifacts¶
Expand to see Makefile contents
.PHONY := all
+
+ #set these variable with your custom configuration
+ PRIVATE_HELM_REPO_NAME=private
+ REGISTRY=localhost:5001
+ WGE_VERSION=0.10.2
+
+ WGE=mccp-$(WGE_VERSION)
+ WGE_CHART=$(WGE).tgz
+
+ all: images charts
+
+ charts: pull-charts push-charts
+
+ images:
+ kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec['containers','initContainers'][*].image}" \
+ |tr -s '[[:space:]]' '\n' | sort | uniq | grep -vE 'kindest|kube-(.*)|etcd|coredns' | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY)
+ kubectl get microvmmachinetemplates --all-namespaces -o jsonpath="{.items[*].spec.template.spec.kernel.image}"|tr -s '[[:space:]]' '\n' \
+ | sort | uniq | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY)
+
+ pull-charts:
+ curl -L https://s3.us-east-1.amazonaws.com/weaveworks-wkp/releases/charts-v3/$(WGE_CHART) --output $(WGE_CHART)
+
+ push-charts:
+ helm cm-push -f $(WGE_CHART) $(PRIVATE_HELM_REPO_NAME)
+
image-sync.sh
referenced in the images
target of the the above Makefile is similar to:skopeo copy docker://$1 docker://$2/$1 --preserve-digests --multi-arch=all
+
make
to automatically sync Helm charts and container images.➜ resources git:(docs-airgap-install) ✗ make
+kubectl get microvmmachinetemplates --all-namespaces -o jsonpath="{.items[*].spec.template.spec.kernel.image}"|tr -s '[[:space:]]' '\n' \
+ | sort | uniq | xargs -L 1 -I {} ./image-pull-push.sh {} docker-registry:5000
+
+5.10.77: Pulling from weaveworks-liquidmetal/flintlock-kernel
+Digest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466
+Status: Image is up to date for ghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77
+ghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77
+...5.10.77: digest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466 size: 739
+
Airgap Install¶
Weave GitOps Enterprise¶
spec.chart.spec.sourceRef
to tell Flux to pull Helm charts from your Helm repo.spec.values
to use the container images from your private registry.Expand to view example WGE manifest
---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: weave-gitops-enterprise-charts
+namespace: flux-system
+spec:
+interval: 1m
+url: http://helm-repo-chartmuseum:8080
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: weave-gitops-enterprise
+namespace: flux-system
+spec:
+chart:
+ spec:
+ chart: mccp
+ version: "0.10.2"
+ sourceRef:
+ kind: HelmRepository
+ name: weave-gitops-enterprise-charts
+ namespace: flux-system
+interval: 1m0s
+install:
+ crds: CreateReplace
+ remediation:
+ retries: 3
+upgrade:
+ crds: CreateReplace
+values:
+ global:
+ capiEnabled: true
+ enablePipelines: true
+ enableTerraformUI: true
+ clusterBootstrapController:
+ enabled: true
+ #images changed
+ cluster-controller:
+ controllerManager:
+ kubeRbacProxy:
+ image:
+ repository: localhost:5001/gcr.io/kubebuilder/kube-rbac-proxy
+ tag: v0.8.0
+ manager:
+ image:
+ repository: localhost:5001/docker.io/weaveworks/cluster-controller
+ tag: v1.4.1
+ policy-agent:
+ enabled: true
+ image: localhost:5001/weaveworks/policy-agent
+ pipeline-controller:
+ controller:
+ manager:
+ image:
+ repository: localhost:5001/ghcr.io/weaveworks/pipeline-controller
+ images:
+ clustersService: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2
+ uiServer: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2
+ clusterBootstrapController: localhost:5001/weaveworks/cluster-bootstrap-controller:v0.4.0
+
Cluster API¶
clusterctl.yaml
that you want to use images from the private repo by leveraging image overrides.images:
+ all:
+ repository: localhost:5001/registry.k8s.io/cluster-api
+ infrastructure-microvm:
+ repository: localhost:5001/ghcr.io/weaveworks-liquidmetal
+
make clusterctl-init
to init capi using your private registry.
Azure and Weave GitOps Enterprise Installation ENTERPRISE¶
1. Choose the “GitOps” Option in the Marketplace¶
Type: Flux v2
.
Optional: Install CAPZ, the CAPI Provider¶
2. Apply the Entitlements Secret¶
kubectl apply -f entitlements.yaml
+
3. Configure Access for Writing to Git from the UI¶
api
permissions to create pull requests on your behalf.
api
openid
email
profile
https://localhost:8000/oauth/gitlab
for port-forwarding and testinghttps://git.example.com/oauth/gitlab
for production usegit-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is gitlabGITLAB_HOSTNAME
where the OAuth app is hostedkubectl create secret generic git-provider-credentials --namespace=flux-system \
+ --from-literal="GITLAB_CLIENT_ID=13457" \
+ --from-literal="GITLAB_CLIENT_SECRET=24680" \
+ --from-literal="GITLAB_HOSTNAME=git.example.com" \
+ --from-literal="GIT_HOST_TYPES=git.example.com=gitlab"
+
<WGE dashboard URL>/oauth/bitbucketserver
. You will also need to select permissions for the application. The minimum set of permissions needed for WGE to create pull requests on behalf of users is Repositories - Write
. An example of configuring these settings is shown below.git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is bitbucket-serverBITBUCKET_SERVER_HOSTNAME
where the OAuth app is hostedkubectl create secret generic git-provider-credentials --namespace=flux-system \
+ --from-literal="BITBUCKET_SERVER_CLIENT_ID=13457" \
+ --from-literal="BITBUCKET_SERVER_CLIENT_SECRET=24680" \
+ --from-literal="BITBUCKET_SERVER_HOSTNAME=git.example.com" \
+ --from-literal="GIT_HOST_TYPES=git.example.com=bitbucket-server"
+
kubectl edit secret generic git-provider-credentials --namespace=flux-system
+
GIT_HOST_TYPES=git.example.com:7990=bitbucket-server
<WGE dashboard URL>/oauth/azuredevops
.Code (read and write)
scope from the list. This is necessary so that WGE can create pull requests on behalf of users. An example of configuring these settings is shown below.App ID
and Client Secret
values—you will use them to configure WGE.git-provider-credentials
that contains the App ID
and Client Secret
values from the newly created application.kubectl create secret generic git-provider-credentials --namespace=flux-system \
+ --from-literal="AZURE_DEVOPS_CLIENT_ID=<App ID value>" \
+ --from-literal="AZURE_DEVOPS_CLIENT_SECRET=<Client Secret value>"
+
4. Configure Your Password¶
brew install weaveworks/tap/gitops-ee
+
curl --silent --location "https://artifacts.wge.dev.weave.works/releases/bin/0.27.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp
+sudo mv /tmp/gitops /usr/local/bin
+gitops version
+
gitops get bcrypt-hash
from our GitOps CLI.PASSWORD="<Make up and insert a brand-new password here>"
+echo -n $PASSWORD | gitops get bcrypt-hash | kubectl create secret generic cluster-user-auth -n flux-system --from-literal=username=wego-admin --from-file=password=/dev/stdin
+
kubectl get secret -n flux-system cluster-user-auth
+
5. Install Weave GitOps Enterprise to Your Cluster¶
6. Apply Extra Configuration¶
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: cluster-service-extra-config
+ namespace: flux-system
+data:
+ # disable TLS
+NO_TLS: "true"
+
kubectl apply -f cluster-service-extra-config.yaml
+
+# restart the clusters-service for changes to take effect
+kubectl -n flux-system rollout restart deploy/weave-gitops-enterprise-mccp-cluster-service
+
Available Configuration Options¶
value default description NO_TLS
"false"
disable TLS CLUSTER_NAME
"management"
name of the management cluster AUTH_METHODS
"token-passthrough,user-account"
Which auth methods to use, valid values are 'oidc', 'token-pass-through' and 'user-account' OIDC_ISSUER_URL
"token-passthrough,user-account"
The URL of the OpenID Connect issuer OIDC_CLIENT_ID
"token-passthrough,user-account"
The client ID for the OpenID Connect client OIDC_CLIENT_SECRET
"token-passthrough,user-account"
The client secret to use with OpenID Connect issuer OIDC_REDIRECT_URL
"token-passthrough,user-account"
The OAuth2 redirect URL OIDC_TOKEN_DURATION
"1h"
The duration of the ID token. It should be set in the format: number + time unit (s,m,h) e.g., 20m OIDC_CLAIM_USERNAME
"email"
JWT claim to use as the user name. By default email, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as sub or name, depending on their provider OIDC_CLAIM_GROUPS
"groups"
JWT claim to use as the user's group. If the claim is present it must be an array of strings CUSTOM_OIDC_SCOPES
"groups, openid, email, profile"
Customise the requested scopes for then OIDC authentication flow - openid will always be requested 7. Check That It Works¶
Troubleshooting¶
Next Steps¶
Install Weave GitOps Enterprise via CLI¶
gitops-ee bootstrap
CLI command which is suitable for two main scenarios:
Getting Started¶
Prerequisites¶
Install
gitops-ee
CLI (> v0.35)¶gitops-ee
CLI using this command.brew install weaveworks/tap/gitops-ee
+
Bootstrap Weave GitOps Enterprise¶
gitops bootstrap
+
gitops bootstrap \
+ --kubeconfig=$HOME/.kube/config \
+ --private-key=$HOME/.ssh/id_rsa --private-key-password="" \
+ --version="0.35.0" \
+ --domain-type="localhost" \
+ --password="admin123"
+
Appendix¶
Understanding
gitops-ee bootstrap
¶gitops-ee bootstrap
is a workflow that will take you through the following stages:
Verify Flux¶
Verify Entitlement¶
Bootstrapping
checks that the secret exists on the management cluster, and that it is valid will check if it has valid content and the entitlement is not expired. To get the entitlement secret please contact sales@weave.works, then apply it on your management cluster with the name weave-gitops-enterprise-credentials
under flux-system
namespace.Configure Git Access¶
gitops-ee bootstrap
to push WGE resources to the management cluster's git repository, you will be prompted to provide the private key used to access your repo via ssh. If the private key is encrypted, you will also be asked to provide the private key password.Select WGE version¶
Create Cluster User¶
Configure Dashboard Access¶
localhost
in the cli and the dashboard will be available through a ClusterIP Service.externaldns
the dashboard will be available through an Ingress with the following considerations:
public-nginx
.Access the dashboard¶
(Optional) Configure OIDC¶
gitops-ee bootstrap auth --type=oidc
command.DiscoveryUrl
this will verify that OIDC is accessible and get the issuerUrl from the OIDC settings. clientID
& clientSecret
that you have configured on your OIDC static-clients.your-domain/oauth2/callback
for example http://localhost:3000/oauth2/callback
CLI configurations¶
--kube-config
: allows to choose the Kubeconfig for your cluster, default would be ~/.kube/config-d
, --domain externaldns
: indicate the domain to use in case of using externaldns-t
, --domain-type
: dashboard domain type: could be 'localhost' or 'externaldns'-h
, --help
: help for bootstrap-p
, --password
: Dashboard admin password-k
, --private-key
: Private key path. This key will be used to push the Weave GitOps Enterprise's resources to the default cluster repository-c
, --private-key-password
: Private key password. If the private key is encrypted using password-u
, --username
: Dashboard admin username-v
, --version
: Weave GitOps Enterprise version to install
Install Weave GitOps Enterprise ENTERPRISE¶
Prerequisites¶
fleet-infra
repository - configure your Git client properly (if using GitHub, for example, then review their docs on setting your username and your email address) - obtain a valid entitlement secret from Weaveworks and apply it to your cluster - install a compatible version of Flux onto your cluster; see below for how-to guidanceInstall the Weave GitOps Enterprise CLI Tool¶
brew install weaveworks/tap/gitops-ee
+
export VERSION=<VERSION>
+curl --silent --location "https://artifacts.wge.dev.weave.works/releases/bin/${VERSION}/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp
+sudo mv /tmp/gitops /usr/local/bin
+gitops version
+
Install Flux Onto Your Cluster with the
flux bootstrap
Command¶flux bootstrap
command enables you to deploy Flux on a cluster the GitOps way. Go here for more information about the command.flux bootstrap github \
+--owner=<github username> \
+--repository=fleet-infra \
+--branch=main \
+--path=./clusters/management \
+--personal \
+--components-extra image-reflector-controller,image-automation-controller
+
flux bootstrap gitlab \
+--owner=<gitlab username> \
+--repository=fleet-infra \
+--branch=main \
+--path=./clusters/management \
+--personal \
+--components-extra image-reflector-controller,image-automation-controller
+
Apply Your Entitlements Secret to Your Cluster¶
kubectl apply -f entitlements.yaml
+
Set up Authentication and RBAC¶
Securing Access to the Dashboard¶
Parameter Description Default issuerURL
The URL of the issuer; typically, the discovery URL without a path clientID
The client ID set up for Weave GitOps in the issuer clientSecret
The client secret set up for Weave GitOps in the issuer redirectURL
The redirect URL set up for Weave GitOps in the issuer—typically the dashboard URL, followed by /oauth2/callback
tokenDuration
The time duration that the ID Token will remain valid after successful authentication "1h0m0s" tokenDuration
The time duration that the ID Token will remain valid after successful authentication "1h0m0s" oidcUsernamePrefix
The prefix added to users when impersonating API calls to the Kubernetes API, equivalent to --oidc-username-prefix oidcGroupsPrefix
The prefix added to groups when impersonating API calls to the Kubernetes API, equivalent to --oidc-groups-prefix oidc-auth
in the flux-system
namespace with these parameters set:kubectl create secret generic oidc-auth \
+--namespace flux-system \
+--from-literal=issuerURL=<oidc-issuer-url> \
+--from-literal=clientID=<client-id> \
+--from-literal=clientSecret=<client-secret> \
+--from-literal=redirectURL=<redirect-url> \
+--from-literal=tokenDuration=<token-duration>
+
Customization¶
oidcUsernamePrefix
and oidcGroupsPrefix
work in the same way as the Kubernetes kube-apiserver command-line options, if you need them for Kubernetes, you will likely need them here.Scopes¶
kubectl create secret generic oidc-auth \
+--namespace flux-system \
+--from-literal=issuerURL=<oidc-issuer-url> \
+--from-literal=clientID=<client-id> \
+--from-literal=clientSecret=<client-secret> \
+--from-literal=redirectURL=<redirect-url> \
+--from-literal=tokenDuration=<token-duration> \
+--from-literal=customScopes=custom,scopes
+
customScopes
key is a comma-separated list of scopes to request. In this case, "custom", "scopes", and "openid" would be requested. Claims¶
user
and groups
when WGE communicates with your Kubernetes API server.kube-apiserver
with --oidc-username-claim=email --oidc-groups-claim=groups
.oidc-auth
Secret
.kubectl create secret generic oidc-auth \
+--namespace flux-system \
+--from-literal=issuerURL=<oidc-issuer-url> \
+--from-literal=clientID=<client-id> \
+--from-literal=clientSecret=<client-secret> \
+--from-literal=redirectURL=<redirect-url> \
+--from-literal=tokenDuration=<token-duration> \
+--from-literal=claimUsername=sub \
+--from-literal=claimGroups=groups
+
kube-apiserver
configuration. Configuring OIDC with Dex and GitHub¶
---
+apiVersion: v1
+kind: Namespace
+metadata:
+name: dex
+
kubectl create secret generic github-client \
+--namespace=dex \
+--from-literal=client-id=${GITHUB_CLIENT_ID} \
+--from-literal=client-secret=${GITHUB_CLIENT_SECRET}
+
Deploy Dex¶
HelmRepository
and HelmRelease
objects to let Flux deploy everything.Expand to see resource manifests
---
+apiVersion: source.toolkit.fluxcd.io/v1beta1
+kind: HelmRepository
+metadata:
+name: dex
+namespace: dex
+spec:
+interval: 1m
+url: https://charts.dexidp.io
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: dex
+namespace: dex
+spec:
+interval: 5m
+chart:
+ spec:
+ chart: dex
+ version: 0.15.3
+ sourceRef:
+ kind: HelmRepository
+ name: dex
+ namespace: dex
+ interval: 1m
+values:
+ envVars:
+ - name: GITHUB_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: github-client
+ key: client-id
+ - name: GITHUB_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: github-client
+ key: client-secret
+ config:
+ # Set it to a valid URL
+ issuer: https://dex.dev.example.tld
+
+ # See https://dexidp.io/docs/storage/ for more options
+ storage:
+ type: memory
+
+ staticClients:
+ - name: 'Weave GitOps'
+ id: weave-gitops
+ secret: AiAImuXKhoI5ApvKWF988txjZ+6rG3S7o6X5En
+ redirectURIs:
+ - 'https://localhost:9001/oauth2/callback'
+ - 'https://0.0.0.0:9001/oauth2/callback'
+ - 'http://0.0.0.0:9001/oauth2/callback'
+ - 'http://localhost:4567/oauth2/callback'
+ - 'https://localhost:4567/oauth2/callback'
+ - 'http://localhost:3000/oauth2/callback'
+
+ connectors:
+ - type: github
+ id: github
+ name: GitHub
+ config:
+ clientID: $GITHUB_CLIENT_ID
+ clientSecret: $GITHUB_CLIENT_SECRET
+ redirectURI: https://dex.dev.example.tld/callback
+ orgs:
+ - name: weaveworks
+ teams:
+ - team-a
+ - team-b
+ - QA
+ - name: ww-test-org
+ ingress:
+ enabled: true
+ className: nginx
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-prod
+ hosts:
+ - host: dex.dev.example.tld
+ paths:
+ - path: /
+ pathType: ImplementationSpecific
+ tls:
+ - hosts:
+ - dex.dev.example.tld
+ secretName: dex-dev-example-tld
+
orgs
field on the GitHub connector, which allows you to define groups within a GitHub organisation: orgs:
+- name: weaveworks
+teams:
+- team-a
+- team-b
+- QA
+
weaveworks
and all members of the team-a
, team-b
, and QA
teams can authenticate. Group membership is added to the user.Expand to see group role bindings
---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+name: wego-test-user-read-resources
+namespace: flux-system
+subjects:
+- kind: Group
+ name: weaveworks:QA
+ namespace: flux-system
+roleRef:
+kind: Role
+name: wego-admin-role
+apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+name: wego-admin-role
+namespace: flux-system
+rules:
+- apiGroups: [""]
+ resources: ["secrets", "pods" ]
+ verbs: [ "get", "list" ]
+- apiGroups: ["apps"]
+ resources: [ "deployments", "replicasets"]
+ verbs: [ "get", "list" ]
+- apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list", "patch" ]
+- apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list", "patch" ]
+- apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: ["buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories"]
+ verbs: ["get", "list", "patch"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+
Expand to see group cluster role bindings
---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+name: weaveworks:team-a
+subjects:
+- kind: Group
+name: weaveworks:team-a
+apiGroup: rbac.authorization.k8s.io
+roleRef:
+kind: ClusterRole
+name: cluster-admin
+apiGroup: rbac.authorization.k8s.io
+
Set up a Static User¶
staticPasswords
to the config
:spec:
+values:
+ config:
+ staticPasswords:
+ - email: "admin@example.tld"
+ hash: "$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W"
+ username: "admin"
+ userID: "08a8684b-db88-4b73-90a9-3cd1661f5466"
+
gitops
CLI:PASSWORD="<your password>"
+echo -n $PASSWORD | gitops get bcrypt-hash
+$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q
+
OIDC Login¶
Configuring the Emergency User¶
gitops get bcrypt-hash
from our CLI.PASSWORD="<your password>"
+echo -n $PASSWORD | gitops get bcrypt-hash
+$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q
+
kubectl create secret generic cluster-user-auth \
+--namespace flux-system \
+--from-literal=username=wego-admin \
+--from-literal=password='$2a$10$OS5NJmPNEb13UTOSKngMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q'
+
Updating the Emergency User¶
cluster-user-auth
with the new details.User Permissions¶
flux-system
namespace (where Flux stores its resources by default). The default set of rules are configured like this:rules:
+# Flux Resources
+- apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+- apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+- apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+- apiGroups: [ "notification.toolkit.fluxcd.io" ]
+ resources: [ "providers", "alerts" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+- apiGroups: ["infra.contrib.fluxcd.io"]
+ resources: ["terraforms"]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+# Read access for all other Kubernetes objects
+- apiGroups: ["*"]
+ resources: ["*"]
+ verbs: [ "get", "list", "watch" ]
+
rbac.additionalRules
field in the Helm Chart. Follow the instructions in the next section in order to configure RBAC correctly.Note
#
+adminUser:
+create: false
+#
+additionalArgs:
+- --auth-methods=oidc
+#
+
GitOps Dashboard Service Account Permissions¶
rules:
+- apiGroups: [""]
+ resources: ["users", "groups"]
+ verbs: [ "impersonate" ]
+- apiGroups: [""]
+ resources: [ "secrets" ]
+ verbs: [ "get", "list" ]
+- apiGroups: [ "" ]
+ resources: [ "namespaces" ]
+ verbs: [ "get", "list" ]
+
cluster-user-auth
and oidc-auth
secrets, the default secrets to store the emergency cluster user account and OIDC configuration (see securing access to the dashboard)Impersonation¶
impersonation
. The permissions granted to users and groups that Weave GitOps can impersonate will determine the scope of actions that WGE can take within your cluster. $ kubectl get deployments --as aisha@example.com
+
aisha@example.com
has permissions to get deployments within the cluster, this will return those deployments. The same occurs within the application, so properly configuring application permissions is very important. Without proper restrictions the application can impersonate very powerful users
or groups
. For example, the system:masters
is a group generally bound to the cluster-admin
role, which can do anything.Get Namespaces¶
Reading the
cluster-user-auth
and oidc-auth
Secrets¶cluster-user-auth
and oidc-auth
secrets provide information for authenticating to the application. The former holds the username and bcrypt-hashed password for the emergency user, and the latter holds OIDC configuration.User Permissions¶
flux-system
namespace—which is where Flux stores its resources by default—with the following permissions:rules:
+ # Flux Resources
+ - apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+ - apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+ - apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+ - apiGroups: [ "notification.toolkit.fluxcd.io" ]
+ resources: [ "providers", "alerts" ]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+ - apiGroups: ["infra.contrib.fluxcd.io"]
+ resources: ["terraforms"]
+ verbs: [ "get", "list", "watch", "patch" ]
+
+ # Read access for all other Kubernetes objects
+ - apiGroups: ["*"]
+ resources: ["*"]
+ verbs: [ "get", "list", "watch" ]
+
GitOpsSets
and Templates
.Flux Resources¶
API Group Resources Permissions kustomize.toolkit.fluxcd.io kustomizations get, list, patch helm.toolkit.fluxcd.io Helm Releases get, list, patch source.toolkit.fluxcd.io buckets, Helm charts, Git repositories, Helm repositories, OCI repositories get, list, patch notification.toolkit.fluxcd.io providers, alerts get, list infra.contrib.fluxcd.io Terraform get, list, patch get
and list
permissions facilitate this.patch
permissions are used for two features: to suspend and resume reconciliation of a resource by modifying the 'spec' of a resource, and to force reconciliation of a resource by modifying resource annotations. These features work in the same way that flux suspend
, flux resume
, and flux reconcile
does on the CLI.Resources Managed via Flux¶
API Group Resources Permissions "" configmaps, secrets, pods, services, persistent volumes, persistent volume claims get, list, watch apps deployments, replica sets, stateful sets get, list, watch batch jobs, cron jobs get, list, watch autoscaling horizontal pod autoscalers get, list, watch rbac.authorization.k8s.io roles, cluster roles, rolebindings, cluster role bindings get, list, watch networking.k8s.io ingresses get, list, watch secrets
enables Weave GitOps to monitor the state of Helm releases as that's where it stores the state by default. For clarity this these are the Helm release objects not the Flux HelmRelease resource (which are dealt with by the earlier section).Feedback from Flux¶
Login UI¶
values.yaml
file or the spec.values
section of the Weave GitOps HelmRelease
resource:extraEnvVars:
+ - name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL
+ value: "Login with ACME"
+
Recommended RBAC Configuration¶
Example Setup¶
wego-admin
ClusterRole
, created by Helm, wego-admin-cluster-role
wego-team-a-admin
Role
, using the same permissions as wego-admin-role
, created in Team-A's namespacewego-readonly
ClusterRole
that matches wego-admin-cluster-role
but with no patch
permissions.kubectl
) and to Weave GitOps then, depending on OIDC configuration, they may end up with the super-set of their permissions from Weave GitOps and any other permissions granted to them.secrets
. To avoid this, OIDC providers will often let you configure which groups are returned to which clients. The Weave GitOps groups should not be returned to the cluster client (and vice versa).Code¶
Expand to see example RBAC
# Admin cluster role
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: wego-admin-cluster-role
+ rules:
+ - apiGroups: [""]
+ resources: ["secrets", "pods" ]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["apps"]
+ resources: [ "deployments", "replicasets"]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list", "patch" ]
+ - apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list", "patch" ]
+ - apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "get", "list", "patch" ]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ ---
+ # Read-only cluster role
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: wego-readonly-role
+ rules:
+ # All the 'patch' permissions have been removed
+ - apiGroups: [""]
+ resources: ["secrets", "pods" ]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["apps"]
+ resources: [ "deployments", "replicasets"]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list" ]
+ - apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "get", "list" ]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+ ---
+ # Bind the cluster admin role to the wego-admin group
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: wego-cluster-admin
+ subjects:
+ - kind: Group
+ name: wego-admin # only Aisha is a member
+ apiGroup: rbac.authorization.k8s.io
+ roleRef:
+ kind: ClusterRole
+ name: wego-admin-cluster-role
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ # Bind the admin role in the team-a namespace for the wego-team-a-admin group
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: wego-team-a-admin-role
+ namespace: team-a
+ subjects:
+ - kind: Group
+ name: wego-team-a-admin # Aisha & Brian are members
+ apiGroup: rbac.authorization.k8s.io
+ roleRef:
+ # Use the cluster role to set rules, just bind them in the team-a namespace
+ kind: ClusterRole
+ name: wego-admin-role
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ # Bind the read-only role to the read-only group
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: wego-readonly-role
+ subjects:
+ - kind: Group
+ name: wego-readonly # Everyone is a member
+ apiGroup: rbac.authorization.k8s.io
+ roleRef:
+ kind: ClusterRole
+ name: wego-readonly-role
+ apiGroup: rbac.authorization.k8s.io
+ ---
+
Configure Access for Writing to Git from the Weave GitOps Enterprise UI¶
api
permissions to create pull requests on your behalf.
api
openid
email
profile
https://localhost:8000/oauth/gitlab
for port-forwarding and testinghttps://git.example.com/oauth/gitlab
for production usegit-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is gitlabGITLAB_HOSTNAME
where the OAuth app is hostedkubectl create secret generic git-provider-credentials --namespace=flux-system \
+--from-literal="GITLAB_CLIENT_ID=13457" \
+--from-literal="GITLAB_CLIENT_SECRET=24680" \
+--from-literal="GITLAB_HOSTNAME=git.example.com" \
+--from-literal="GIT_HOST_TYPES=git.example.com=gitlab"
+
<WGE dashboard URL>/oauth/bitbucketserver
. You will also need to select permissions for the application. The minimum set of permissions needed for WGE to create pull requests on behalf of users is Repositories - Write
. An example of configuring these settings is shown below.git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is bitbucket-serverBITBUCKET_SERVER_HOSTNAME
where the OAuth app is hostedkubectl create secret generic git-provider-credentials --namespace=flux-system \
+--from-literal="BITBUCKET_SERVER_CLIENT_ID=13457" \
+--from-literal="BITBUCKET_SERVER_CLIENT_SECRET=24680" \
+--from-literal="BITBUCKET_SERVER_HOSTNAME=git.example.com" \
+--from-literal="GIT_HOST_TYPES=git.example.com=bitbucket-server"
+
kubectl edit secret generic git-provider-credentials --namespace=flux-system
+
GIT_HOST_TYPES=git.example.com:7990=bitbucket-server
<WGE dashboard URL>/oauth/azuredevops
. Code (read and write)
scope from the list. This is necessary so that WGE can create pull requests on behalf of users. An example of configuring these settings is shown below.App ID
and Client Secret
values—you will use them to configure WGE.git-provider-credentials
that contains the App ID
and Client Secret
values from the newly created application.kubectl create secret generic git-provider-credentials --namespace=flux-system \
+--from-literal="AZURE_DEVOPS_CLIENT_ID=<App ID value>" \
+--from-literal="AZURE_DEVOPS_CLIENT_SECRET=<Client Secret value>"
+
TLS Configuration¶
8000
with TLS enabled. WGE will generate and use a self-signed certificate for this purpose.kubectl port-forward --namespace flux-system svc/clusters-service 8000:8000
values:
+ tls:
+ enabled: false
+
values:
+ ingress:
+ enabled: true
+ ... other parameters specific to the ingress type ...
+
Configure Helm Chart and Commit¶
git clone git@<provider>:<username>/fleet-infra
+cd fleet-infra
+
clusters/management/weave-gitops-enterprise.yaml
.Expand to see file contents
values.config.capi.repositoryURL
¶values.config.capi.repositoryPath
¶clusters/management/clusters
path. You can configure it with values.config.capi.repositoryPath
. You might what to change it to clusters/my-cluster/cluster
if you configured Flux to reconcile ./clusters/my-cluster
instead.values.config.capi.repositoryClustersPath
¶./clusters
. When a new cluster is specified, any selected profiles will be written to ./clusters/{.namespace}/{.clusterName}/profiles.yaml
. When the new cluster is bootstrapped, Flux will sync the ./clusters/{.namespace}/{.clusterName}
path.Configure Your Password¶
gitops get bcrypt-hash
from our CLI.PASSWORD="<Make up and insert a brand-new password here>"
+echo -n $PASSWORD | gitops get bcrypt-hash | kubectl create secret generic cluster-user-auth -n flux-system --from-literal=username=wego-admin --from-file=password=/dev/stdin
+
kubectl get secret -n flux-system cluster-user-auth
+
(Optional) Install Policy Agent¶
values.policy-agent.enabled
: set to true to install the agent with WGEvalues.policy-agent.config.accountId
: organization name, used as identifiervalues.policy-agent.config.clusterId
: unique identifier for the clustergit add clusters/management/weave-gitops-enterprise.yaml
+git commit -m "Deploy Weave GitOps Enterprise"
+git push
+
flux-system
namespace to verify all pods are running.Next Steps¶
Joining a Cluster with Azure Flux ENTERPRISE¶
Prerequisites¶
Initial Status¶
clusters/default/CLUSTER_NAME/manifests
.Joining a Cluster to WGE¶
Setting up a Service Account¶
Expand to see role manifests
apiVersion: v1
+kind: ServiceAccount
+metadata:
+name: wgesa
+namespace: default
+---
+apiVersion: v1
+kind: Secret
+type: kubernetes.io/service-account-token
+metadata:
+name: wgesa-secret
+namespace: default
+annotations:
+ kubernetes.io/service-account.name: "wgesa"
+
Expand to see role manifests
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: impersonate-user-groups
+subjects:
+ - kind: ServiceAccount
+ name: wgesa
+ namespace: default
+roleRef:
+ kind: ClusterRole
+ name: user-groups-impersonator
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: user-groups-impersonator
+rules:
+ - apiGroups: [""]
+ resources: ["users", "groups"]
+ verbs: ["impersonate"]
+ - apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+
static-kubeconfig.sh
:Expand to see script
#!/bin/bash
+
+if [[ -z "$CLUSTER_NAME" ]]; then
+ echo "Ensure CLUSTER_NAME has been set"
+ exit 1
+fi
+
+if [[ -z "$CA_CERTIFICATE" ]]; then
+ echo "Ensure CA_CERTIFICATE has been set to the path of the CA certificate"
+ exit 1
+fi
+
+if [[ -z "$ENDPOINT" ]]; then
+ echo "Ensure ENDPOINT has been set"
+ exit 1
+fi
+
+if [[ -z "$TOKEN" ]]; then
+ echo "Ensure TOKEN has been set"
+ exit 1
+fi
+
+export CLUSTER_CA_CERTIFICATE=$(cat "$CA_CERTIFICATE" | base64)
+
+envsubst <<EOF
+apiVersion: v1
+kind: Config
+clusters:
+- name: $CLUSTER_NAME
+ cluster:
+ server: https://$ENDPOINT
+ certificate-authority-data: $CLUSTER_CA_CERTIFICATE
+users:
+- name: $CLUSTER_NAME
+ user:
+ token: $TOKEN
+contexts:
+- name: $CLUSTER_NAME
+ context:
+ cluster: $CLUSTER_NAME
+ user: $CLUSTER_NAME
+current-context: $CLUSTER_NAME
+
+EOF
+
kubectl create secret generic demo-01-kubeconfig \
+--from-file=value=./demo-01-kubeconfig
+
Using WGE to Deploy Clusters¶
With Cluster API¶
With Terraform Provider¶
With Crossplane¶
Releases ENTERPRISE¶
v0.31.0¶
Highlights¶
gitops connect cluster
.Dependency versions¶
v0.30.0¶
Highlights¶
UI¶
Policy¶
GitOpsSets¶
GitRepository
or OCIRepository
has no artifact, stop generating with an error.Dependency versions¶
v0.29.1¶
Dependency versions¶
🚀 Enhancements¶
v0.29.0¶
⚠️ Breaking changes¶
GitRepository
v1, Kustomization
v1, and Receiver
v1. This means that this version of Weave GitOps Enterprise is not compatible with previous versions of Flux v2, such as v0.41.x and earlier.✍️ Action required¶
Highlights¶
Flux¶
GitRepository
v1, Kustomization
v1, and Receiver
v1 resources. See Breaking Changes.Explorer¶
Dependency versions¶
🚀 Enhancements¶
🔥 UI¶
v0.28.0¶
Highlights¶
UI¶
Explorer¶
Breaking Changes¶
v1alpha1
of the CAPITemplate
and GitopsTemplate
CRDs. Please migrate to v1alpha2
of these CRDs. See the migration guideDependency versions¶
v0.27.0¶
Highlights¶
Explorer¶
GitOpsSets¶
UI¶
Dependency versions¶
v0.26.0¶
Highlights¶
Dependency versions¶
v0.25.0¶
Dependency versions¶
v0.24.0¶
Highlights¶
GitOpsSets¶
ImagePolicy
. This allows you to include the latest version of an image in your templates, for example to keep a Deployment
up to date.Profiles and Charts¶
"weave.works/helm-version-filter": "> 0.0.0"
to filter out rc releases"weave.works/helm-version-filter": "> 1.0.0"
to filter any pre 1.0 releases"weave.works/helm-version-filter": "> 3.0.0-0"
to filter any pre 3.0 releases but include rc releasesExplorer¶
Breaking Changes¶
Known issues¶
Explorer¶
Dependency versions¶
v0.23.0¶
Highlights¶
Application Details¶
Explorer¶
Templates¶
metadata.name
field.Cluster details¶
Explorer¶
Dependency versions¶
v0.22.0¶
Highlights¶
Explorer¶
GitopsSets¶
Cluster Bootstrapping¶
Upgrade Notes (from the previous release)¶
Known issues¶
Dependency versions¶
v0.21.2¶
Highlights¶
Dependency versions¶
v0.20.0¶
Dependency versions¶
v0.19.0¶
Highlights¶
UI¶
Dependency versions¶
v0.18.0¶
Highlights¶
UI¶
GitopsSets¶
cluster
generator allows you to interact with the Weave GitOps Cluster inventory. GitOps Clusters that are added and removed to the inventory are reflected by the generator. That can be used to target for example to manage applications across a fleet of clusters.gitRepository
generator can now scan directories and paths with the new directory
option, which enables you to create for example dynamically Flux Kustomizations , based on your repository.apiClient
generator allows you to query and endpoint, and provide data for your template./metrics
endpoint ready to be collectedDependency versions¶
v0.17.0¶
Highlights¶
v0.16.0¶
Highlights¶
Create External Secrets via WGE UI¶
Plan Button in Terraform¶
Dependency versions¶
Breaking changes¶
v0.15.1¶
Highlights¶
Multi Repository support. Weave GitOps Enterprise adapts and scales to your repository structure¶
GitOps Templates¶
GitOps Templates CLI enhancements¶
gitops create template
supporting --config
allows you to read command line flags from a config file and --output-dir
allows you to write files out to a directory instead of just stdoutGitOpsSets in preview¶
Minor fixes¶
OIDC¶
Dependency versions¶
Breaking changes¶
v0.14.1¶
Highlights¶
Secrets management¶
Pipelines¶
Minor fixes¶
Workspaces¶
Other¶
Dependency versions¶
Breaking changes¶
v0.13.0¶
Highlights¶
GitOps Templates Path feature¶
spec:
+ resourcetemplates:
+ - path: ./clusters/${CLUSTER_NAME}/definition/cluster.yaml
+ content:
+ - apiVersion: cluster.x-k8s.io/v1alpha4
+ kind: Cluster
+ metadata:
+ name: ${CLUSTER_NAME}
+ ...
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
+ kind: AWSCluster
+ metadata:
+ name: ${CLUSTER_NAME}
+ ...
+ - path: ./clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml
+ content:
+ - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ metadata:
+ name: ${CLUSTER_NAME}-nginx
+ ...
+ - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ metadata:
+ name: ${CLUSTER_NAME}-cert-manager
+ ...
+
Workspace UI¶
Enhanced Terraform Table in UI¶
Keyboard shortcuts for "port forwards" on GitOps Run¶
github.com/mattn/go-tty
package (other options required pressing Enter after a keypress, this catches just a single numeric keypress) and opening URLs with the github.com/pkg/browser
package.Minor fixes¶
Dependency versions¶
Breaking changes¶
v0.12.0¶
Highlights¶
GitOps Templates¶
spec:
+ charts:
+ items:
+ - chart: cert-manager
+ version: v1.5.3
+ editable: false
+ required: true
+ values:
+ installCRDs: ${CERT_MANAGER_INSTALL_CRDS}
+ targetNamespace: cert-manager
+ layer: layer-1
+ template:
+ content:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cert-manager
+ spec:
+ retries: ${CERT_MANAGER_RETRY_COUNT}
+
Authentication with OIDC support¶
config
+ oidc:
+ claimUsername: ""
+ claimGroups: ""
+
Policy commit-time agent¶
Admin User- simpler RBAC¶
Pipelines - adding Pipelines through Templates¶
Support for multiple Flux instances on a single cluster¶
Minor fixes¶
Dependency versions¶
Known issues¶
v0.11.0¶
Highlights¶
GitOpsTemplates¶
Pipelines¶
enablePipelines
flag.weave.works/template-type=pipeline
label.Telemetry¶
Dependency versions¶
Breaking changes¶
GitOpsTemplates and CAPITemplates¶
clustertemplates.weave.works
- new: templates.weave.works
apiVersion: clustertemplates.weave.works/v1alpha1
to apiVersion: templates.weave.works/v1alpha1
. 2. Commit, push and reconcile. They should now be viewable in the Templates view again. 3. Clean up the old CRD. As it stands: - kubectl get gitopstemplate -A
will be empty as it is pointing to the old clustertemplates.weave.works
CRD. - kubectl get gitopstemplate.templates.weave.works -A
will work To fix the former of the commands, remove the old CRD (helm does not do this automatically for safety reasons): - kubectl delete crd gitopstemplates.clustertemplates.weave.works
- You may have to wait up to 5 minutes for your local kubectl CRD cache to invalidate, then kubectl get gitopstemplate -A
should be working as usualCAPITemplates
and GitopsTemplates
the default visibility for all sections in a template has been set to "false"
. To re-enable profiles or applications on a template you can tweak the annotationsannotations:
+ templates.weave.works/profiles-enabled: "true" # enable profiles
+ templates.weave.works/kustomizations-enabled: "true" # enable applications
+ templates.weave.works/credentials-enabled: "true" # enable CAPI credentials
+
HelmReleases
in the Pull Request when rendering out the profiles of a template.values.yaml
when selecting profiles to include on the "Create resource (cluster)" page. If changes are made here the updated values.yaml will be included.v0.10.2¶
Highlights¶
Dependency versions¶
v0.10.1¶
Highlights¶
Dependency versions¶
v0.9.6¶
Highlights¶
Dependency versions¶
v0.9.5¶
Highlights¶
gitops create tenant
now supports --prune
to remove old resources from the cluster if you're not using --export
with GitOps.deploymentRBAC
section in tenancy.yaml
allows you to specify the permissions given to the flux Kustomizations
that will apply the resources from git to your tenants' namespaces in the cluster.OCIRepository
sources when restricting/allowing the sources that can be applied into tenants' namespaces.{{ .params.CLUSTER_NAME | upper }}
namespace
can be specified in the template profile annotation that will be provided as the HelmRelease
's targetNamespace
by default.phase="Provisioned"
, rather than ControlPlaneReady=True
status.Dependency versions¶
Known issues¶
⚠️ Breaking changes from v0.9.4¶
config
key.policy-agent:
+ enabled: true
+ accountId: "my-account"
+ clusterId: "my-cluster"
+
policy-agent:
+ enabled: true
+ config:
+ accountId: "my-account"
+ clusterId: "my-cluster"
+
Configuration ENTERPRISE¶
Prerequisites¶
Setup¶
.spec.values.enableExplorer
: feature flag to control whether Explorer is enabled..spec.values.useQueryServiceBackend
: feature flag to control whether you want to leverage Explorer backend capabilities for other UI experiences like Applications or Sources.spec.values.explorer.collector.serviceAccount
: ServiceAccount name
and namespace
that explorer collector will use to impersonate in leaf clusters. Make sure you read authz for collector before setting it. Default values are name: collector
, namespace: flux-system
.---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: weave-gitops-enterprise
+ namespace: flux-system
+spec:
+ # ... other spec components
+ values:
+ enableExplorer: true # feature flag to enable explorer
+ useQueryServiceBackend: true # uses explorer query backend in collection UIs
+ explorer:
+ collector:
+ serviceAccount: # service account that collector will impersonate in leaf clusters
+ name: collector
+ namespace: flux-system
+
Configuration¶
Clusters¶
Kinds¶
Data Layer¶
Authentication and Authorization¶
Authentication and Authorization for querying¶
Authentication and Authorization for collecting¶
clusters/bases
folder, as described in Getting started.
.spec.values.explorer.collector.serviceAccount
.Expand to see example
apiVersion: v1
+kind: ServiceAccount
+metadata:
+name: collector # should match .spec.values.explorer.collector.serviceAccount.name
+namespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace
+
Expand to see example
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+name: collector # could be .spec.values.explorer.collector.serviceAccount.name
+rules:
+- apiGroups: [ "rbac.authorization.k8s.io" ]
+ resources: [ "roles", "clusterroles", "rolebindings", "clusterrolebindings" ]
+ verbs: [ "list", "watch" ]
+- apiGroups: [ "kustomize.toolkit.fluxcd.io" ]
+ resources: [ "kustomizations" ]
+ verbs: [ "list", "watch" ]
+- apiGroups: [ "helm.toolkit.fluxcd.io" ]
+ resources: [ "helmreleases" ]
+ verbs: [ "list", "watch" ]
+- apiGroups: [ "source.toolkit.fluxcd.io" ]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "list", "watch" ]
+
ServiceAccount
.Expand to see example
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+name: collector # could be .spec.values.explorer.collector.serviceAccount.name
+subjects:
+- kind: ServiceAccount
+ name: collector # should match .spec.values.explorer.collector.serviceAccount.name
+ namespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace
+roleRef:
+kind: ClusterRole
+name: collector # name of the cluster role created earlier
+apiGroup: rbac.authorization.k8s.io
+
collector
Expand to see example
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+name: clusters-service-impersonator-role
+rules:
+- apiGroups: [""]
+ resources: ["users", "groups"]
+ verbs: ["impersonate"]
+- apiGroups: [ "" ]
+ resources: [ "serviceaccounts" ]
+ verbs: [ "impersonate" ]
+ resourceNames:
+ - "collector" # should match .spec.values.explorer.collector.serviceAccount.name
+
Next Steps¶
Getting started ENTERPRISE¶
Pre-requisites¶
Setup¶
explorer.enabled
that you could configure in your Weave Gitops Enterprise HelmRelease values:---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: weave-gitops-enterprise
+ namespace: flux-system
+spec:
+ # ... other spec components
+ values:
+ explorer:
+ enabled: true # global enable/disable flag
+ collector:
+ # ServiceAccount that explorer will use to watch clusters for resources
+ serviceAccount:
+ name: "collector"
+ namespace: "flux-system"
+ cleaner:
+ disabled: false
+ enabledFor: # controls which parts of the UI utilize the Explorer UI/Server components
+ - applications
+ - sources
+ - gitopssets
+ - templates
+
enabledFor
field will control which parts of the UI utilize the Explorer backend for performant queries. Note that this does not control the collection of these objects, only the presentation of the objects in the UI.Explorer UI¶
Explorer
.
Explorer ENTERPRISE¶
FAQ¶
Which journeys would be able to use explorer for?¶
Which journeys would be better using other weave gitops capabilities for?¶
Which Kinds does explorer support?¶
Next Steps¶
Operations ENTERPRISE¶
Debug Access Rules¶
Access Rules
alongside the Query
tab.Cluster
and Subject
the Kinds
it is allowed to read. These are the rules that will be the source of truth doing authorization when a user does a query.Monitoring¶
Metrics¶
Querying¶
API Server¶
http_request_duration_seconds_bucket{handler="/v1/query",method="POST",le="0.05"} 0
+http_request_duration_seconds_sum{handler="/v1/query",method="POST"} 10.088081923
+http_request_duration_seconds_count{handler="/v1/query",method="POST"} 51
+
http_response_size_bytes_bucket{handler="/v1/query",method="POST",le="0.05"} 10
+http_response_size_bytes_sum{handler="/v1/query",method="POST"} 120
+http_response_size_bytes_count{handler="/v1/query",method="POST"} 10
+
http_requests_inflight{handler="/v1/query"} 0
+
Datastore Reads¶
action
is the datastore read operation that could be either GetObjects
, GetAccessRules
, GetObjectByID
, GetRoles
or GetRoleBindings
.status
is the result of the operation. It could be either read operation that could be either success
or error
.datastore_latency_seconds_bucket{action="GetObjectByID", le="+Inf", status="success"} 1175
+datastore_latency_seconds_bucket{action="GetObjectByID", le="0.01", status="success"} 1174
+
datastore_latency_seconds_count{action="GetObjectByID", status="success"} 1175
+datastore_latency_seconds_count{action="GetRoleBindings", status="success"} 47
+datastore_latency_seconds_count{action="GetRoles", status="success"} 47
+
datastore_latency_seconds_sum{action="GetObjectByID", status="success"} 0.6924557999999995
+datastore_latency_seconds_sum{action="GetRoleBindings", status="success"} 1.329158916
+datastore_latency_seconds_sum{action="GetRoles", status="success"} 3.942473879999999
+
action
is the datastore read operation that could be either GetObjects
, GetAccessRules
, GetObjectByID
, GetRoles
or GetRoleBindings
datastore_inflight_requests{action="GetObjectByID"} 0
+datastore_inflight_requests{action="GetRoleBindings"} 0
+datastore_inflight_requests{action="GetRoles"} 0
+
Indexer Reads¶
action
is the index read operation that could be either ListFacets
or Search
status
is the result of the operation. It could be either read operation that could be either success
or error
indexer_latency_seconds_bucket{action="ListFacets", le="+Inf", status="success"} 1
+indexer_latency_seconds_bucket{action="Search", le="+Inf", status="success"} 47
+
indexer_latency_seconds_sum{action="ListFacets", status="success"} 0.008928666
+indexer_latency_seconds_sum{action="Search", status="success"} 0.06231312599999999
+
indexer_latency_seconds_count{action="ListFacets", status="success"} 1
+indexer_latency_seconds_count{action="Search", status="success"} 47
+
action
is the index read operation that could be either ListFacets
or Search
indexer_inflight_requests{action="ListFacets"} 0
+indexer_inflight_requests{action="Search"} 0
+
Collecting¶
Cluster Watcher¶
collector_cluster_watcher
provides the number of the cluster watchers it the following status
: - Starting: a cluster watcher is starting at the back of detecting that a new cluster has been registered. - Started: cluster watcher has been started and collecting events from the remote cluster. This is the stable state. - Stopping: a cluster has been deregistered so its cluster watcher is no longer required. In the process of stopping it. - Failed: a cluster watcher has failed during the creation or starting process and cannot collect events from the remote clusters. This is the unstable state.collector
is the type of collector, it could be - rbac: for collecting RBAC resources (ie roles) - objects: for collecting non-rbac resources (ie kustomizations)collector_cluster_watcher{collector="objects", status="started"} 1
+collector_cluster_watcher{collector="objects", status="starting"} 0
+collector_cluster_watcher{collector="rbac", status="started"} 1
+collector_cluster_watcher{collector="rbac", status="starting"} 0
+
collector_cluster_watcher
gives the total number of cluster watchers that should be equal to the number of clustersDatastore Writes¶
action
is the datastore write operation that could be either StoreRoles
, StoreRoleBindings
, StoreObjects
, DeleteObjects
, DeleteAllObjects
, DeleteRoles
, DeleteAllRoles
, DeleteRoleBindings
, DeleteAllRoleBindings
status
is the result of the operation. It could be either read operation that could be either success
or error
datastore_latency_seconds_bucket{action="StoreRoles", le="+Inf", status="success"} 1175
+datastore_latency_seconds_bucket{action="StoreRoles", le="0.01", status="success"} 1174
+
datastore_latency_seconds_count{action="StoreRoles", status="success"} 1175
+datastore_latency_seconds_count{action="DeleteRoles", status="success"} 47
+datastore_latency_seconds_count{action="DeleteAllRoleBindings", status="success"} 47
+
datastore_latency_seconds_sum{action="StoreRoles", status="success"} 0.6924557999999995
+datastore_latency_seconds_sum{action="DeleteRoles", status="success"} 1.329158916
+datastore_latency_seconds_sum{action="DeleteAllRoleBindings", status="success"} 3.942473879999999
+
action
is the datastore write operation that could be either StoreRoles
, StoreRoleBindings
, StoreObjects
, DeleteObjects
, DeleteAllObjects
, DeleteRoles
, DeleteAllRoles
, DeleteRoleBindings
, DeleteAllRoleBindings
datastore_inflight_requests{action="StoreRoles"} 0
+datastore_inflight_requests{action="StoreRoleBindings"} 0
+datastore_inflight_requests{action="DeleteAllRoleBindings"} 0
+
Indexer Writes¶
action
is the index write operation that could be either Add
, Remove
or RemoveByQuery
status
is the result of the operation. It could be either success
or error
indexer_latency_seconds_bucket{action="Add",status="success",le="+Inf"} 109
+indexer_latency_seconds_bucket{action="Remove",status="success",le="+Inf"} 3
+
indexer_latency_seconds_sum{action="Add",status="success"} 8.393912168
+indexer_latency_seconds_sum{action="Remove",status="success"} 0.012298476
+
indexer_latency_seconds_count{action="Add",status="success"} 109
+indexer_latency_seconds_count{action="Remove",status="success"} 3
+
action
is the index write operation that could be either Add
, Remove
or RemoveByQuery
indexer_inflight_requests{action="Add"} 0
+indexer_inflight_requests{action="Remove"} 0
+
Dashboard¶
Querying ENTERPRISE¶
Schema¶
Key Description Cluster Name of cluster where the resource exists. As gitops cluster <GitopsClusterNamespace,GitopsClusterName>
Namespace Namespace name where the resource exists. Kind Resource kubernetes type or kind Name Resource name as specified in its manifest. Status Resource health status. Indicates the status of its reconciliation. Message Resource health status message. It extends status field with information about the status. podinfo
helm release from a cluster default/progress-delivery-demo2-32
like this:apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: podinfo
+ namespace: flux-system
+spec:
+ chart:
+ spec:
+ chart: podinfo
+ interval: 1m
+ reconcileStrategy: ChartVersion
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ version: 6.0.0
+ interval: 1m
+status:
+ conditions:
+ - message: Release reconciliation succeeded
+ reason: ReconciliationSucceeded
+ status: "True"
+ type: Ready
+
Cluster Namespace Kind Name Status Message default/progress-delivery-demo2-32
flux-system
HelmRelease
podinfo
Success
Release reconciliation succeeded
Filtering and Searching¶
Search
field allows for free-form text entry to query objects across all fields. For example, if we enter the term "podinfo", we will get matches for not only object names, but also strings from the Message
field:Kustomizations
.Kind=Kustomization
and Kind=HelmRelease
filters will show both Kustomizations
and HelmReleases
:
Feedback & Telemetry
Feedback¶
Anonymous Aggregate User Behavior Analytics¶
Why are we collecting this data?¶
For example¶
How long is the collected data stored?¶
What are we collecting?¶
gitops get bcrypt-hash
--password
, but not the value)
app=cli
, to know it’s a CLI metric
kube-system
namespace uuid
When is the data collected and where is it sent?¶
How?¶
Opting out¶
CLI¶
Dashboard¶
WEAVE_GITOPS_FEATURE_TELEMETRY
from the envVars
value.
Annotations ENTERPRISE¶
The
add-common-bases
annotation¶templates.weave.works/add-common-bases: "true"
annotation can be used to enable and disable the addition of a "common bases" Kustomization
to the list of rendered files. This kustomization will sync a path that is common to all clusters (clusters/bases
).The
inject-prune-annotation
annotation¶templates.weave.works/inject-prune-annotation: "true"
annotation can be used to enable and disable the injection of Flux's prune
annotation into certain resources.kustomize.toolkit.fluxcd.io/prune: disabled
annotation into every resource in the spec.resourcetemplates
that is not a cluster.x-k8s.io.Cluster
and not a gitops.weave.works.GitopsCluster
.Cluster
like AWSCluster
, KubeadmControlPlane
, AWSMachineTemplate
etc and let the CAPI controllers handle their removal.
Template CLI ENTERPRISE¶
gitops
CLI tool provides a set of commands to help you manage your templates.gitops create template
command that allows you to render templates locally and airgapped, without a full WGE installation in a Kubernetes cluster.Use cases¶
Restrictions¶
gitops create template
command only works with GitOpsTemplate
objects. It does not work with CAPITemplate
objects. You should be able to migrate any CAPITemplate
objects to GitOpsTemplate
with some small tweaks.CAPITemplate
and GitOpsTemplate
is the default value of these two annotations:
Annotation default value for CAPITemplate
default value for GitOpsTemplate
templates.weave.works/add-common-bases
"true"
"false"
templates.weave.works/inject-prune-annotations
"true"
"false"
Installation¶
gitops
CLI tool.Getting started¶
GitOpsTemplate
manifest with required parameters exported in the environment, the command can render the template to one of the following: 1. The current kubecontext directly (default) 1. stdout with --export
1. The local file system with --output-dir
, this will use the spec.resourcestemplates[].path
fields in the template to determine where to write the rendered files. This is the recommended approach for GitOps as you can then commit the rendered files to your repository.gitops create template \
+ --template-file capd-template.yaml \
+ --output-dir ./clusters/ \
+ --values CLUSTER_NAME=foo
+
Profiles¶
HelmRepository
object in the cluster, we instead read from your local helm cache.helm repo add weaveworks-charts https://raw.githubusercontent.com/weaveworks/weave-gitops-profile-examples/gh-pages
+helm repo update
+
cert-manager
repo and others.Supplying values to a profile¶
values.yaml
file to a profile using the values
parameter. For example we can supply cert-manager
's values.yaml
with:gitops create template \
+ --template-file capd-template.yaml \
+ --output-dir ./out \
+ --values CLUSTER_NAME=foo \
+ --profiles "name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml"
+
Using a config file¶
and executed with:
+
+```bash
+gitops create template --config config.yaml
+
CAPI Cluster Template Example ENTERPRISE¶
GitOpsTemplate
custom resource and then loaded into the management cluster.apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+ name: cluster-template-development
+ labels:
+ weave.works/template-type: cluster
+spec:
+ description: This is the std. CAPD template
+ renderType: templating
+ params:
+ - name: CLUSTER_NAME
+ description: This is used for the cluster naming.
+ resourcetemplates:
+ - apiVersion: cluster.x-k8s.io/v1alpha3
+ kind: Cluster
+ metadata:
+ name: "{{ .params.CLUSTER_NAME }}"
+
Creating GitOpsTemplates ENTERPRISE¶
kubectl
.default
namespace. This can be changed by configuring the config.capi.namespace
value in the Weave GitOps Enterprise Helm Chart.Template Type¶
application
- for application templatescluster
- for cluster templatesterraform
- for Terraform templatespipeline
- for Pipeline templatesweave.works/template-type
label and setting the value as the name of the type.---
+apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+ name: example-template
+ namespace: default
+ labels:
+ weave.works/template-type: pipeline
+spec:
+# ...
+
Template Components¶
templates.weave.works/COMPONENT-enabled
and have boolean
values.
profiles
kustomizations
credentials
annotations:
+ templates.weave.works/profiles-enabled: "true"
+ templates.weave.works/kustomizations-enabled: "false"
+ templates.weave.works/credentials-enabled: "true"
+
In-UI Template Editing¶
templates.weave.works/create-request
annotation is added by default to the first resource in the resourcetemplates
.Edit resource
button will appear in the GitOps UI which allows the editing of the resource by users, after which it will be re-rendered:
HelmRelease
Kustomization
HelmRepository
GitRepository
GitopsCluster
spec:
+ resourcetemplates:
+ - apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: my-configmap
+ data:
+ my-key: my-value
+ - apiVersion: source.toolkit.fluxcd.io/v1beta1
+ kind: HelmRepository
+ metadata:
+ # This annotation will add an `Edit resource` button in the UI for this resource
+ annotations:
+ templates.weave.works/create-request: ''
+ name: nginx
+ namespace: default
+
Introduction ENTERPRISE¶
GitOpsTemplate
enables application developers to self-service components and services easily through the Weave GitOps Dashboard. It's a simple YAML file that you can enrich with parameters, variables, metadata, and conditions.GitOpsTemplate
to template any resource that can be expressed in YAML (basic Kubernetes resources, Flux primitives, Terraform controller, Crossplane, Cluster API, etc.) into a standardised definition.MachinePool
for CAPI objects, a Flux Kustomization, or a Terraform Controller resource, to name a few examples.GitOpsTemplate
must be valid yaml
. Beyond this, a rendered template can create any resource you need .CAPITemplate
and GitOpsTemplate
is the default value of these two annotations:
Annotation default value for CAPITemplate
default value for GitOpsTemplate
templates.weave.works/add-common-bases
"true"
"false"
templates.weave.works/inject-prune-annotations
"true"
"false"
Parameters ENTERPRISE¶
spec.params
.Required params¶
CLUSTER_NAME
- RESOURCE_NAME
Parameters metadata¶
spec.params
. These will get rendered nicely in the UI form allowing users to understand what each field is for.
name
: The variable name within the resource templates.description
: Description of the parameter. This will be rendered in both the UI and CLI.options
: The list of possible values this parameter can be set to.required
- Whether the parameter must contain a non-empty value.default
- Default value of the parameter.spec:
+ params:
+ - name: IP_ADDRESS
+ description: 'The IP address of this service'
+ options: [1.2.3.4, 5.6.7.8]
+ default: 1.2.3.4
+
Adding Profiles to Templates ENTERPRISE¶
spec.charts
section.spec:
+ charts:
+ items:
+ - name: nginx
+ version: 1.0.0
+ targetNamespace: nginx
+ - name: cert-manager
+ targetNamespace: cert-manager
+
nginx
and cert-manager
resources to their templated resources, ready for deployment to their cluster.Profile Operator Settings¶
spec.charts
section and the template variables available to them.
Key Description Template vars helmRepositoryTemplate.path
Path the HelmRepository
will be written toparams
items
list of charts to configure, see below spec.charts.items
entries and the template variables available to them.
Key Description Template vars template.content
Full or partial HelmRelease
CR templateparams
template.path
Path the HelmRelease will be written to params
chart
Shortcut to HelmRelease.spec.chart.spec.chart
version
Shortcut to HelmRelease.spec.chart.spec.version
targetNamespace
Shortcut to HelmRelease.spec.targetNamespace
values
Shortcut to HelmRelease.spec.values
params
layer
Layer to install as required
(default=false) Allow the user to de-select this profile editable
(default=false) Allow the user to edit the values.yaml of this profile Expand for a complete yaml example
spec:
+charts:
+ helmRepositoryTemplate:
+ path: clusters/${CLUSTER_NAME}/helm-repositories.yaml
+ items:
+ - chart: cert-manager
+ version: v1.5.3
+ editable: false
+ required: true
+ values:
+ installCRDs: ${CERT_MANAGER_INSTALL_CRDS}
+ targetNamespace: cert-manager
+ layer: layer-1
+ template:
+ path: clusters/${CLUSTER_NAME}/cert-manager.yaml
+ content:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cert-manager
+ spec:
+ retries: ${CERT_MANAGER_RETRY_COUNT}
+
template.content
will be merged over the top of a default HelmRelease
CR so it does not need to be complete.Declaring Profiles with Annotations¶
spec.charts
section as detailed above to declare profiles.capi.weave.works/profile-INDEX
annotation.annotations:
+ capi.weave.works/profile-0: '{"name": "NAME", "version": "VERSION", "editable": EDITABLE, "namespace": "NAMESPACE"}'
+
name
- is the name of the profile in the default profiles repositoryversion
- (optional) will choose the default versionnamespace
- (optional) is the default target namespace for the profileeditable
- (optional, default=false
), allow the user to de-select this profile, making it a default instead of a requirement.
Quickstart GitOps Templates ENTERPRISE¶
Quickstart
templates are GitOpsTemplate
s that you could use when getting started with Weave Gitops Enterprise It aims to provide a simplified basic experience.Getting Started¶
HelmRelease
object to your Weave GitOps Enterprise configuration repo for your management cluster.Expand to view
---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: GitRepository
+metadata:
+name: weave-gitops-quickstart
+namespace: flux-system
+spec:
+interval: 10m0s
+ref:
+ branch: main
+url: https://github.com/weaveworks/weave-gitops-quickstart
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: quickstart-templates
+namespace: flux-system
+spec:
+chart:
+ spec:
+ chart: "quickstart-templates"
+ version: ">=0.1.0"
+ sourceRef:
+ kind: GitRepository
+ name: weave-gitops-quickstart
+ namespace: flux-system
+interval: 10m0s
+
HelmRelease
has been successfully deployed to your cluster, navigate to your Weave GitOps UI Dashboard. You will see that the templates
Chart is now deployed to your cluster.Templates
tab in the sidebar, you will see the Quickstart templates are now available for use:Available Templates¶
pipeline-view
: A template to create a sample pipeline to visualize a HelmRelease
application delivered to dev, test and prod environments.pipeline-promotion-resources
: A template to create the Flux Notification Controller resources required for promoting applications via pipelines.pipeline-view-promote-by-cluster
: A template to create pipelines for hard tenancy when applications are isolated by cluster.pipeline-view-promote-by-namespace
: A template to create pipelines for soft tenancy when applications are isolated by namespace.Using
GitOpsTemplate
s as a Platform Engineer¶Using
GitOpsTemplate
s as an Application Developer¶
Rendered Template Paths ENTERPRISE¶
Configuring Paths¶
spec.resourcetemplates[].path
field.
Expand to see example
spec:
+resourcetemplates:
+ // highlight-next-line
+ - path: clusters/${CLUSTER_NAME}/definition/cluster.yaml
+ content:
+ - apiVersion: cluster.x-k8s.io/v1alpha4
+ kind: Cluster
+ metadata:
+ name: ${CLUSTER_NAME}
+ ...
+ - apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4
+ kind: AWSCluster
+ metadata:
+ name: ${CLUSTER_NAME}
+ ...
+ // highlight-next-line
+ - path: clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml
+ content:
+ - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ metadata:
+ name: ${CLUSTER_NAME}-nginx
+ ...
+ - apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ metadata:
+ name: ${CLUSTER_NAME}-cert-manager
+ ...
+
Configuring paths for
charts
¶spec.charts.helmRepositoryTemplate.path
and spec.charts.items[].template.path
fields can be used to specify the paths of these resources:spec:
+ charts:
+ helmRepositoryTemplate:
+ // highlight-next-line
+ path: clusters/${CLUSTER_NAME}/workloads/helm-repo.yaml
+ items:
+ - chart: cert-manager
+ version: 0.0.8
+ template:
+ // highlight-next-line
+ path: clusters/${CLUSTER_NAME}/workloads/cert-manager.yaml
+
Default Paths¶
spec.resourcetemplates[].path
is omitted, a default path for the rendered template is calculated.CLUSTER_NAME
- RESOURCE_NAME
required
in the the template definition:spec:
+ params:
+ - name: RESOURCE_NAME
+ required: true
+ # or
+ - name: CLUSTER_NAME
+ required: true
+
add-common-bases
annotation feature always use a calculated default path. If you are using these features one of CLUSTER_NAME
or RESOURCE_NAME
must be provided, even if you specify a path
for all the other resources in the template.CLUSTER_NAME
or RESOURCE_NAME
, required. - From the params: NAMESPACE
, default: default
- From values.yaml
for the Weave GitOps Enterprise mccp
chart: values.config.capi.repositoryPath
, default: clusters/management/clusters
${repositoryPath}/${NAMESPACE}/${CLUSTER_OR_RESOURCE_NAME}.yaml
CLUSTER_NAME
as my-cluster
will result in the path: clusters/management/clusters/default/my-cluster.yaml
Resource templates ENTERPRISE¶
spec.resourcetemplates
section of the template.The
content
key¶content
key is used to define a list of resources:spec:
+ resourcetemplates:
+ - content:
+ - apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: nginx
+ - apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: cert-manager
+
The
raw
key¶raw
key is used to define a raw string that will written to the specified path.spec:
+ resourcetemplates:
+ - path: "helm-release.yaml"
+ raw: |
+ apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ metadata:
+ name: podinfo
+ namespace: prod-github
+ spec:
+ interval: 1m
+ chart:
+ spec:
+ chart: podinfo
+ version: "6.0.0" # {"$promotion": "flux-system:podinfo-github:prod"}
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ interval: 1m
+
raw
key is not compatible with the content
key. Only one of the two can be used.raw
key data must still be a valid kubernetes unstructured object.
Supported Templating Languages ENTERPRISE¶
spec.renderType
.Envsubst¶
envsubst
, which is short for 'environment substitution', uses envsubst for rendering. This templating format is used by clusterctl.${VAR_NAME}
syntax.Supported Functions¶
Expression Meaning ${var}
Value of $var
${#var}
String length of $var
${var^}
Uppercase first character of $var
${var^^}
Uppercase all characters in $var
${var,}
Lowercase first character of $var
${var,,}
Lowercase all characters in $var
${var:n}
Offset $var
n
characters from start ${var:n:len}
Offset $var
n
characters with max length of len
${var#pattern}
Strip shortest pattern
match from start ${var##pattern}
Strip longest pattern
match from start ${var%pattern}
Strip shortest pattern
match from end ${var%%pattern}
Strip longest pattern
match from end ${var-default}
If $var
is not set, evaluate expression as $default
${var:-default}
If $var
is not set or is empty, evaluate expression as $default
${var=default}
If $var
is not set, evaluate expression as $default
${var:=default}
If $var
is not set or is empty, evaluate expression as $default
${var/pattern/replacement}
Replace as few pattern
matches as possible with replacement
${var//pattern/replacement}
Replace as many pattern
matches as possible with replacement
${var/#pattern/replacement}
Replace pattern
match with replacement
from $var
start ${var/%pattern/replacement}
Replace pattern
match with replacement
from $var
endTemplating¶
{{ .params.CLUSTER_NAME }}
where params are provided by the .params
variable. Template functions can also be used with the syntax {{ .params.CLUSTER_NAME | FUNCTION }}
.Supported Functions¶
Function Type Functions String Functions trim, wrap, randAlpha, plural String List Functions splitList, sortAlpha Integer Math Functions add, max, mul Integer Slice Functions until, untilStep Float Math Functions addf, maxf, mulf Date Functions now, date Defaults Functions default, empty, coalesce, fromJson, toJson, toPrettyJson, toRawJson, ternary Encoding Functions b64enc, b64dec Lists and List Functions list, first, uniq Dictionaries and Dict Functions get, set, dict, hasKey, pluck, dig, deepCopy Type Conversion Functions atoi, int64, toString Flow Control Functions fail UUID Functions uuidv4 Version Comparison Functions semver, semverCompare Reflection typeOf, kindIs, typeIsLike Custom Delimiters¶
renderType: templating
are {{
and }}
. These can be changed by setting the templates.weave.works/delimiters
annotation on the template. For example:
templates.weave.works/delimiters: "{{,}}"
- defaulttemplates.weave.works/delimiters: "${{,}}"
${{
and }}
, for example "${{ .params.CLUSTER_NAME }}"
{{
in yaml is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like replicas: 3
you should use these delimiters. - replicas: {{ .params.REPLICAS }}
Invalid yaml - replicas: "{{ .params.REPLICAS }}"
Valid yaml, incorrect type. The type is a string
not a number
and will fail validation. - replicas: ${{ .params.REPLICAS }}
Valid yaml and correct number
type.templates.weave.works/delimiters: "<<,>>"
<<
and >>
, for example << .params.CLUSTER_NAME >>
Version Information ENTERPRISE¶
Migration notes¶
v1alpha1
to v1alpha2
¶v1alpha1
to v1alpha2
(for example in git) you will need to: 1. Update the apiVersion
: 1. for GitopsTemplate
update the apiVersion to templates.weave.works/v1alpha2
1. for CAPITemplate
update the apiVersion to capi.weave.works/v1alpha2
1. Move the spec.resourcetemplates
field to spec.resourcetemplates[0].content
1. Either leave the spec.resourcetemplates[0].path
field empty or give it a sensible value.kubectl apply -f capi-template.yaml
1. Run flux reconcile kustomization --with-source flux-system
twice.Conversion Webhook¶
v1alpha1
templates to v1alpha2
manually in git.v1alpha2
(default) notes¶spec.resourcetemplates
from a list of objects to a list of files with a path
and content
:spec:
+ resourcetemplates:
+ - path: "clusters/{{ .params.CLUSTER_NAME }}.yaml"
+ content:
+ - apiVersion: cluster.x-k8s.io/v1alpha3
+ kind: Cluster
+ metadata:
+ name: "{{ .params.CLUSTER_NAME }}"
+ path: "clusters/{{ .params.CLUSTER_NAME }}.yaml"
+
v1alpha1
notes¶spec.resourcetemplates
as a list of resources to render.spec:
+ resourcetemplates:
+ - apiVersion: cluster.x-k8s.io/v1alpha3
+ kind: Cluster
+ metadata:
+ name: "{{ .params.CLUSTER_NAME }}"
+
templates.weave.works/v1alpha1
+
+GitOpsSet
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+apiVersion
+string
+
+templates.weave.works/v1alpha1
+
+
+
+
+kind
+string
+
+
+GitOpsSet
+
+
+
+
+metadata
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+
+Refer to the Kubernetes API documentation for the fields of the
+
+metadata
field.
+
+
+
+
+spec
+
+
+GitOpsSetSpec
+
+
+
+
+
+
+
+
+
+
+
+
+
+suspend
+
+bool
+
+
+(Optional)
+
+
+
+
+
+generators
+
+
+[]GitOpsSetGenerator
+
+
+
+
+
+
+
+
+templates
+
+
+[]GitOpsSetTemplate
+
+
+
+
+
+
+
+
+
+serviceAccountName
+
+string
+
+
+(Optional)
+
+
+
+
+
+
+status
+
+
+GitOpsSetStatus
+
+
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+interval
+
+
+Kubernetes meta/v1.Duration
+
+
+
+
+
+
+
+
+endpoint
+
+string
+
+
+(Optional)
+
+
+
+
+
+method
+
+string
+
+
+
+
+
+
+
+jsonPath
+
+string
+
+
+
+
+
+
+
+headersRef
+
+
+HeadersReference
+
+
+
+(Optional)
+
+
+
+
+
+body
+
+
+Kubernetes pkg/apis/apiextensions/v1.JSON
+
+
+
+(Optional)
+
+
+
+
+
+singleElement
+
+bool
+
+
+(Optional)
+
+
+
+
+
+
+secretRef
+
+
+Kubernetes core/v1.LocalObjectReference
+
+
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+selector
+
+
+Kubernetes meta/v1.LabelSelector
+
+
+
+(Optional)
+
+ConfigGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+kind
+
+string
+
+
+
+
+
+
+
+
+name
+
+string
+
+
+
+GitOpsSetGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+list
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+pullRequests
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+gitRepository
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+ociRepository
+
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+matrix
+
+
+MatrixGenerator
+
+
+
+
+
+
+
+
+cluster
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+apiClient
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+imagePolicy
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+
+config
+
+
+ConfigGenerator
+
+
+
+
+GitOpsSetNestedGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+name
+
+string
+
+
+(Optional)
+
+
+
+
+
+list
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+gitRepository
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+ociRepository
+
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+pullRequests
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+cluster
+
+
+ClusterGenerator
+
+
+
+
+
+
+
+
+apiClient
+
+
+APIClientGenerator
+
+
+
+
+
+
+
+
+imagePolicy
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+
+config
+
+
+ConfigGenerator
+
+
+
+
+GitOpsSetSpec
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+suspend
+
+bool
+
+
+(Optional)
+
+
+
+
+
+generators
+
+
+[]GitOpsSetGenerator
+
+
+
+
+
+
+
+
+templates
+
+
+[]GitOpsSetTemplate
+
+
+
+
+
+
+
+
+
+serviceAccountName
+
+string
+
+
+(Optional)
+
+GitOpsSetStatus
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+ReconcileRequestStatus
+
+
+github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
+
+
+
+
+ReconcileRequestStatus
are embedded into this type.)
+
+
+
+
+observedGeneration
+
+int64
+
+
+(Optional)
+
+
+
+
+
+conditions
+
+
+[]Kubernetes meta/v1.Condition
+
+
+
+(Optional)
+
+
+
+
+
+
+inventory
+
+
+ResourceInventory
+
+
+
+(Optional)
+
+GitOpsSetTemplate
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repeat
+
+string
+
+
+
+
+
+
+
+
+content
+
+
+k8s.io/apimachinery/pkg/runtime.RawExtension
+
+
+
+
+GitRepositoryGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repositoryRef
+
+string
+
+
+
+
+
+
+
+files
+
+
+[]RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+
+directories
+
+
+[]RepositoryGeneratorDirectoryItem
+
+
+
+
+HeadersReference
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+kind
+
+string
+
+
+
+
+
+
+
+
+name
+
+string
+
+
+
+ImagePolicyGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+policyRef
+
+string
+
+
+
+ListGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+elements
+
+
+[]Kubernetes pkg/apis/apiextensions/v1.JSON
+
+
+
+
+MatrixGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+generators
+
+
+[]GitOpsSetNestedGenerator
+
+
+
+
+
+
+
+
+
+singleElement
+
+bool
+
+
+(Optional)
+
+OCIRepositoryGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+repositoryRef
+
+string
+
+
+
+
+
+
+
+files
+
+
+[]RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+
+directories
+
+
+[]RepositoryGeneratorDirectoryItem
+
+
+
+
+PullRequestGenerator
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+interval
+
+
+Kubernetes meta/v1.Duration
+
+
+
+
+
+
+
+
+driver
+
+string
+
+
+
+
+
+
+
+serverURL
+
+string
+
+
+(Optional)
+
+
+
+
+
+repo
+
+string
+
+
+
+
+
+
+
+secretRef
+
+
+Kubernetes core/v1.LocalObjectReference
+
+
+
+
+
+
+
+
+labels
+
+[]string
+
+
+(Optional)
+
+
+
+
+
+
+forks
+
+bool
+
+
+(Optional)
+
+RepositoryGeneratorDirectoryItem
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+path
+
+string
+
+
+
+
+
+
+
+
+exclude
+
+bool
+
+
+
+RepositoryGeneratorFileItem
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+path
+
+string
+
+
+
+ResourceInventory
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+
+entries
+
+
+[]ResourceRef
+
+
+
+
+ResourceRef
+
+
+
+
+
+
+
+
+Field
+Description
+
+
+
+
+id
+
+string
+
+
+
+
+
+
+
+
+v
+
+string
+
+
+
+gen-crd-api-reference-docs
Installation ENTERPRISE¶
Prerequisites¶
Installing the gitopssets-controller¶
apiVersion: v1
+kind: Namespace
+metadata:
+ name: gitopssets-system
+---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: weaveworks-oci-charts
+ namespace: gitopssets-system
+spec:
+ interval: 1m
+ type: oci
+ url: oci://ghcr.io/weaveworks/charts
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: gitopssets-controller
+ namespace: gitopssets-system
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: gitopssets-controller
+ sourceRef:
+ kind: HelmRepository
+ name: weaveworks-oci-charts
+ namespace: gitopssets-system
+ version: 0.15.3
+ install:
+ crds: CreateReplace
+ upgrade:
+ crds: CreateReplace
+
Customising the Generators¶
gitopssets-controller:
+ enabled: true
+ controllerManager:
+ manager:
+ args:
+ - --health-probe-bind-address=:8081
+ - --metrics-bind-address=127.0.0.1:8080
+ - --leader-elect
+ # enable the cluster generator which is not enabled by default
+ - --enabled-generators=GitRepository,Cluster,PullRequests,List,APIClient,Matrix,Config
+
Gitopssets Controller Releases ENTERPRISE¶
v0.16.1¶
v0.16.0¶
v0.15.3¶
v0.15.2¶
v0.15.1¶
v0.15.0¶
v0.14.1¶
v0.14.0¶
v0.13.3¶
v0.13.1¶
v0.13.0¶
v0.12.0¶
v0.11.0¶
repeat
mechanism within maps not just arraysv0.10.0¶
v0.9.0¶
v0.8.0¶
v0.7.0¶
v0.6.1¶
GitOpsSets ENTERPRISE¶
Introduction¶
Use Cases¶
Templating from Generators¶
Basics¶
General behaviour¶
spec.suspend
flag to be true.reconcile.fluxcd.io/requestedAt
annotation.Generation¶
List
generator.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: gitopsset-sample
+spec:
+ generators:
+ - list:
+ elements:
+ - env: dev
+ team: dev-team
+ - env: production
+ team: ops-team
+ - env: staging
+ team: ops-team
+
env
and team
.Rendering templates¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: gitopsset-sample
+spec:
+ generators:
+ - list:
+ elements:
+ - env: dev
+ team: dev-team
+ - env: production
+ team: ops-team
+ - env: staging
+ team: ops-team
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.env }}"
+ com.example/team: "{{ .Element.team }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
Element
scope, so .Element.dev
refers to the dev
field from the List element.Element
scope, not just List generators..Element
field, a .ElementIndex
is also available, this represents the zero-based index into the set of generated elements..ElementIndex
1 may not be the same as .ElementIndex
1 was the previous time, and this could cause resources to be updated unnecessarily with undesirable effects.Repeating templates¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: repeated-gitopsset-sample
+spec:
+ generators:
+ - list:
+ elements:
+ - env: dev
+ team: dev-team
+ teams:
+ - name: "team1"
+ - name: "team2"
+ - name: "team3"
+ - env: staging
+ team: staging-team
+ teams:
+ - name: "team4"
+ - name: "team5"
+ - name: "team6"
+ templates:
+ - repeat: "{ .teams }"
+ content:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "{{ .Repeat.name }}-demo"
+ data:
+ name: "{{ .Repeat.name }}-demo"
+ team: "{{ .Element.team }}"
+
repeat
field is a JSONPath expression that is applied to each element during the template rendering.repeat
will have two separate scopes for the template params, .Element
which is the top-level element generated by the generator, and the additional .Repeat
scope, which is the repeating element.ConfigMaps
are generated, three for the "dev-team" and three for the "staging-team"..ElementIndex
, for repeated elements both .ElementIndex
and .RepeatIndex
are available.Delimiters¶
{{
and }}
, which is the same as the Go template engine.GitOpsSet
:apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: gitopsset-sample
+ annotations:
+ templates.weave.works/delimiters: "${{,}}"
+
Unquoted values¶
{{
is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like replicas: 3
you should use the ${{,}}
delimiters.
replicas: {{ .params.REPLICAS }}
Invalid yamlreplicas: "{{ .params.REPLICAS }}"
Valid yaml, incorrect type. The type is a string not a number and will fail validation.replicas: ${{ .params.REPLICAS }}
Valid yaml and correct number type.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: gitopsset-sample
+ annotations:
+ templates.weave.works/delimiters: "${{,}}"
+spec:
+ generators:
+ - list:
+ elements:
+ - env: dev
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ - env: staging
+ resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ templates:
+ - content:
+ kind: Deployment
+ apiVersion: apps/v1
+ metadata:
+ name: go-demo
+ spec:
+ template:
+ spec:
+ containers:
+ - name: go-demo
+ image: weaveworks/go-demo:0.2.0
+ resources: ${{ .Element.resources | toJson }}
+
{{,}}
delimiters this would fail as the "resources" field would need to be quoted.Generators¶
List generator¶
GitRepository generator¶
GitRepository
generator operates on Flux GitRepositories.GitRepository
is updated, this will trigger a regeneration of templates.Generation from files¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: repository-sample
+spec:
+ generators:
+ - gitRepository:
+ repositoryRef: go-demo-repo
+ files:
+ - path: examples/generation/dev.yaml
+ - path: examples/generation/production.yaml
+ - path: examples/generation/staging.yaml
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.env }}"
+ com.example/team: "{{ .Element.team }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
GitRepository
called go-demo-repo
in the same namespace as the GitOpsSet
will be tracked, and Kustomization
resources will be generated from the three files listed.env: dev
+team: developers
+
GitRepository
will result in rereconciliation of the templates into the cluster.Generation from directories¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: repository-sample
+spec:
+ generators:
+ - gitRepository:
+ repositoryRef: go-demo-repo
+ directories:
+ - path: examples/kustomize/environments/*
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.Base }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.Base }}"
+ com.example/team: "{{ .Element.Base }}"
+ spec:
+ interval: 5m
+ path: "{{ .Element.Directory }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
GitRepository
called go-demo-repo
in the same namespace as the GitOpsSet
will be tracked, and Kustomization
resources are generated from paths within the examples/kustomize/environments/*
directory within the repository..Element.Directory
which will be a repo-relative path and .Element.Base
which contains the last element of the path, for example, for a directory ./examples/kustomize/environments/production
this will be production
.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: repository-sample
+spec:
+ generators:
+ - gitRepository:
+ repositoryRef: go-demo-repo
+ directories:
+ - path: examples/kustomize/environments/*
+ - path: examples/kustomize/environments/production
+ exclude: true
+ templates:
+ - content:
+
examples/kustomize/environments
will be generated, but not examples/kustomize/environments/production
.OCIRepository generator¶
OCIRepository
generator operates on Flux OCIRepositories.OCIRepository
is updated, this will trigger a regeneration of templates.OCIRepository
generator operates in exactly the same way as the GitRepository generator, except it operates on OCIRepositories.Generation from files¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: oci-repository-sample
+spec:
+ generators:
+ - ociRepository:
+ repositoryRef: go-demo-oci-repo
+ files:
+ - path: examples/generation/dev.yaml
+ - path: examples/generation/production.yaml
+ - path: examples/generation/staging.yaml
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.env }}"
+ com.example/team: "{{ .Element.team }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
PullRequests generator¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: pull-requests-sample
+spec:
+ generators:
+ - pullRequests:
+ interval: 5m
+ driver: github
+ repo: bigkevmcd/go-demo
+ secretRef:
+ name: github-secret
+ templates:
+ - content:
+ apiVersion: source.toolkit.fluxcd.io/v1beta2
+ kind: GitRepository
+ metadata:
+ name: "pr-{{ .Element.Number }}-gitrepository"
+ namespace: default
+ spec:
+ interval: 5m0s
+ url: "{{ .Element.CloneURL }}"
+ ref:
+ branch: "{{ .Element.Branch }}"
+ - content:
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ kind: Kustomization
+ metadata:
+ name: "pr-{{ .Element.Number }}-demo"
+ namespace: default
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/dev"
+ prune: true
+ targetNamespace: "{{ .Element.Branch }}-ns"
+ sourceRef:
+ kind: GitRepository
+ name: "pr-{{ .Element.Number }}-gitrepository"
+
GitRepository
and a Kustomization
to deploy.serverURL
field and point it to your own installation.driver
field can be github
or gitlab
or bitbucketserver
, other options can be supported from go-scm.forks
flag field can be used to indicate whether to include forks in the target pull requests or not. If set to true
any pull request from a fork repository will be included, otherwise if false
or not indicated the pull requests from fork repositories are discarded.- pullRequests:
+ interval: 5m
+ driver: github
+ repo: bigkevmcd/go-demo
+ secretRef:
+ name: github-secret
+ forks: false
+ labels:
+ - deploy
+
Number
this is generated as a string representationBranch
this is the source branchHeadSHA
this is the SHA of the commit in the merge branchCloneURL
this is the HTTPS clone URL for this repositoryCloneSSHURL
this is the SSH clone URL for this repositoryFork
this indicates whether the pull request is from a fork (true) or not (false)$ kubectl create secret generic github-secret \
+ --from-literal password=<insert access token here>
+
Matrix generator¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: matrix-sample
+spec:
+ generators:
+ - matrix:
+ generators:
+ - gitRepository:
+ repositoryRef: go-demo-repo
+ files:
+ - path: examples/generation/dev.yaml
+ - path: examples/generation/production.yaml
+ - path: examples/generation/staging.yaml
+ - list:
+ elements:
+ - cluster: dev-cluster
+ version: 1.0.0
+
env: dev
+team: developers
+
- env: dev
+ team: developers
+ cluster: dev-cluster
+ version: 1.0.0
+- env: staging
+ team: staging-team
+ cluster: dev-cluster
+ version: 1.0.0
+- env: production
+ team: production-team
+ cluster: dev-cluster
+ version: 1.0.0
+
Element
scope.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: matrix-sample
+spec:
+ generators:
+ - matrix:
+ generators:
+ - gitRepository:
+ repositoryRef: go-demo-repo
+ files:
+ - path: examples/generation/dev.yaml
+ - path: examples/generation/production.yaml
+ - path: examples/generation/staging.yaml
+ - list:
+ elements:
+ - cluster: dev-cluster
+ version: 1.0.0
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.env }}"
+ com.example/team: "{{ .Element.team }}"
+ com.example/cluster: "{{ .Element.cluster }}"
+ com.example/version: "{{ .Element.version }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
Optional Name for Matrix elements¶
ImagePolicy
generator outputs a latestImage
field, if you have two, they will collide.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: matrix-sample
+spec:
+ generators:
+ - matrix:
+ generators:
+ - name: gen1
+ gitRepository:
+ repositoryRef: go-demo-repo
+ files:
+ - path: examples/generation/dev.yaml
+ - path: examples/generation/production.yaml
+ - path: examples/generation/staging.yaml
+ - name: gen2
+ list:
+ elements:
+ - cluster: dev-cluster
+ version: 1.0.0
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.gen1.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.gen1.env }}"
+ com.example/team: "{{ .Element.gen1.team }}"
+ com.example/cluster: "{{ .Element.gen2.cluster }}"
+ com.example/version: "{{ .Element.gen2.version }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.gen1.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
- gen1:
+ env: dev
+ team: developers
+ gen2:
+ cluster: dev-cluster
+ ersion: 1.0.0
+- gen1:
+ env: staging
+ team: staging-team
+ gen2:
+ cluster: dev-cluster
+ version: 1.0.0
+- gen1:
+ env: production
+ team: production-team
+ gen2:
+ cluster: dev-cluster
+ version: 1.0.0
+
Single Element for Matrix¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: single-element-matrix-sample
+spec:
+ generators:
+ - matrix:
+ singleElement: true
+ generators:
+ - name: staging
+ cluster:
+ selector:
+ matchLabels:
+ env: staging
+ - name: production
+ cluster:
+ selector:
+ matchLabels:
+ env: production
+
- production:
+ - ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster1
+ ClusterNamespace: clusters
+ - ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster2
+ ClusterNamespace: clusters
+ staging:
+ - ClusterAnnotations: {}
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster1
+ ClusterNamespace: clusters
+ - ClusterAnnotations: {}
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster2
+ ClusterNamespace: clusters
+
singleElement
flag:- production:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster1
+ ClusterNamespace: clusters
+ staging:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster1
+ ClusterNamespace: clusters
+- production:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster2
+ ClusterNamespace: clusters
+ staging:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster1
+ ClusterNamespace: clusters
+- production:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster1
+ ClusterNamespace: clusters
+ staging:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster2
+ ClusterNamespace: clusters
+- production:
+ ClusterAnnotations: {}
+ ClusterLabels:
+ env: production
+ ClusterName: production-cluster2
+ ClusterNamespace: clusters
+ staging:
+ ClusterAnnotations: # omitted
+ ClusterLabels:
+ env: staging
+ ClusterName: staging-cluster2
+ ClusterNamespace: clusters
+
singleElement
case, there is only one generated element, only one template will be rendered for each content item..Matrix
name.apiClient generator¶
curl \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer <YOUR-TOKEN>"\
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/OWNER/REPO/pulls
+
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: api-client-sample
+spec:
+ generators:
+ - apiClient:
+ interval: 5m
+ endpoint: https://api.github.com/repos/bigkevmcd/go-demo/pulls
+ headersRef:
+ name: github-secret
+ kind: Secret
+ templates:
+ - content:
+ apiVersion: source.toolkit.fluxcd.io/v1beta2
+ kind: GitRepository
+ metadata:
+ name: "pr-{{ .Element.id | toJson}}-gitrepository"
+ namespace: default
+ spec:
+ interval: 5m0s
+ url: "{{ .Element.head.repo.clone_url }}"
+ ref:
+ branch: "{{ .Element.head.ref }}"
+ - content:
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ kind: Kustomization
+ metadata:
+ name: "pr-{{ .Element.id | toJson }}-demo"
+ namespace: default
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/dev"
+ prune: true
+ targetNamespace: "{{ .Element.head.ref }}-ns"
+ sourceRef:
+ kind: GitRepository
+ name: "pr-{{ .Element.id | toJson }}-gitrepository"
+
apiVersion: v1
+kind: Secret
+metadata:
+ name: github-secret
+ namespace: default
+type: Opaque
+stringData:
+ Accept: application/vnd.github+json
+ Authorization: Bearer ghp_<redacted>
+ X-GitHub-Api-Version: "2022-11-28"
+
APIClient JSONPath¶
{
+ "things": [
+ {
+ "env": "dev",
+ "team": "dev-team"
+ },
+ {
+ "env": "production",
+ "team": "opts-team"
+ },
+ {
+ "env": "staging",
+ "team": "opts-team"
+ }
+ ]
+}
+
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: api-client-sample
+spec:
+ generators:
+ - apiClient:
+ interval: 5m
+ endpoint: https://api.example.com/demo
+ jsonPath: "{ $.things }"
+
APIClient POST body¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: api-client-sample
+spec:
+ generators:
+ - apiClient:
+ interval: 5m
+ endpoint: https://api.example.com/demo
+ body:
+ name: "testing"
+ value: "testing2"
+
{ "name": "testing", "value": "testing2" }
+
APIClient simple results¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: api-client-sample
+spec:
+ generators:
+ - apiClient:
+ singleElement: true
+ interval: 5m
+ endpoint: https://api.example.com/demo
+
repeat
mechanism to generate repeating results.APIClient Custom CA¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ labels:
+ app.kubernetes.io/name: gitopsset
+ app.kubernetes.io/instance: gitopsset-sample
+ app.kubernetes.io/part-of: gitopssets-controller
+ app.kubernetes.io/created-by: gitopssets-controller
+ name: api-client-sample
+spec:
+ generators:
+ - apiClient:
+ singleElement: true
+ interval: 5m
+ endpoint: https://api.example.com/demo
+ secretRef:
+ name: https-ca-credentials
+
---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: https-ca-credentials
+type: Opaque
+data:
+ caFile: <BASE64>
+
Cluster generator¶
GitOpsSet
will generate a Kustomization
resource for each cluster matching the Label selector.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: cluster-sample
+spec:
+ generators:
+ - cluster:
+ selector:
+ matchLabels:
+ env: dev
+ team: dev-team
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.ClusterName }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.ClusterName }}"
+ com.example/team: "{{ .Element.ClusterLabels.team }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.ClusterLabels.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
ClusterName
the name of the clusterClusterNamespace
the namespace that this cluster is fromClusterLabels
the labels from the metadata field on the GitOpsClusterClusterAnnotations
the annotations from the metadata field on the GitOpsClusterapiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: cluster-sample
+spec:
+ generators:
+ - cluster: {}
+
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: cluster-sample
+spec:
+ generators:
+ - cluster:
+ selector: {}
+
ImagePolicy generator¶
ImagePolicy
generator works with the Flux Image Automation.ImagePolicy
is updated, this will trigger a regeneration of templates.
ImagePolicy
ImagePolicy
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: imagepolicy-example
+ namespace: default
+spec:
+ generators:
+ - imagePolicy:
+ policyRef: podinfo
+ templates:
+ - content:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "demo-configmap"
+ data:
+ image: "{{ .Element.latestImage }}"
+
ConfigMap
is generated containing the latest image whenever an ImagePolicy
called podinfo
is updated.ConfigMaps
with the values.apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: imagepolicy-matrix-example
+ namespace: default
+spec:
+ generators:
+ - matrix:
+ generators:
+ - imagePolicy:
+ policyRef: podinfo
+ - list:
+ elements:
+ - cluster: dev-cluster
+ version: 1.0.0
+ - cluster: prod-cluster
+ version: 1.0.0
+ templates:
+ - content:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "demo-configmap-{{ .Element.cluster }}"
+ data:
+ image: "{{ .Element.latestImage }}"
+ cluster: "{{ .Element.cluster }}"
+ version: "{{ .Element.version }}"
+
$ kubectl get configmaps
+NAME DATA AGE
+demo-configmap-dev-cluster 3 3m19s
+demo-configmap-prod-cluster 3 3m19s
+
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: demo-configmap-dev-cluster
+ namespace: default
+data:
+ cluster: dev-cluster
+ image: stefanprodan/podinfo:5.1.4
+ version: 1.0.0
+
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: demo-configmap-prod-cluster
+ namespace: default
+data:
+ cluster: prod-cluster
+ image: stefanprodan/podinfo:5.1.4
+ version: 1.0.0
+
Config generator¶
Config
generator with Kubernetes ConfigMaps and Secrets.ConfigMap
or Secret
is updated, this will trigger a regeneration of templates.ConfigMap
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: test-cm
+data:
+ name: test-config
+ demo: test-value
+
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: config-sample
+spec:
+ generators:
+ - config:
+ kind: ConfigMap
+ name: test-cm
+ templates:
+ - content:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "{{ .Element.name }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.name }}"
+ data:
+ generatedValue: "{{ .Element.demo }}"
+
ConfigMap
is generated containing the value of the "demo" field from the existing ConfigMap
test-cm. Config
generator can be combined with other generators:ConfigMaps
with the values. apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: imagepolicy-matrix-example
+ namespace: default
+spec:
+ generators:
+ - matrix:
+ generators:
+ - config:
+ kind: ConfigMap
+ name: test-cm
+ - list:
+ elements:
+ - cluster: dev-cluster
+ version: 1.0.0
+ - cluster: prod-cluster
+ version: 1.0.0
+ templates:
+ - content:
+ kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: "demo-configmap-{{ .Element.cluster }}"
+ data:
+ generatedValue: "{{ .Element.demo }}"
+ cluster: "{{ .Element.cluster }}"
+ version: "{{ .Element.version }}"
+
$ kubectl get configmaps
+NAME DATA AGE
+demo-configmap-dev-cluster 3 3m19s
+demo-configmap-prod-cluster 3 3m19s
+
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: demo-configmap-dev-cluster
+ namespace: default
+data:
+ cluster: dev-cluster
+ generatedValue: test-value
+ version: 1.0.0
+
apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: demo-configmap-prod-cluster
+ namespace: default
+data:
+ cluster: prod-cluster
+ generatedValue: test-value
+ version: 1.0.0
+
Templating functions¶
.Element
or defaults to another value.{
+ "team": "engineering dev"
+}
+
sanitize template function¶
kind: Service
+metadata:
+ name: {{ sanitize .Element.team }}-demo
+
kind: Service
+metadata:
+ name: engineeringdev-demo
+
getordefault¶
kind: Service
+metadata:
+ name: {{ getordefault .Element "name" "defaulted" }}-demo
+
kind: Service
+metadata:
+ name: defaulted-demo
+
.Element
it will be inserted, the "default" is only inserted if it doesn't exist.Security¶
GitRepository
generator, where it may not be obvious to the author of the files, or the author of the template the consequences of the template rendering.ServiceAccount
that is used by the gitopssets-controller is extremely limited, and can not create resources, you will need to explicitly grant permissions to create any of the resources you declare in the template, missing permissions will appear in the controller logs.Limiting via service-accounts¶
apiVersion: templates.weave.works/v1alpha1
+kind: GitOpsSet
+metadata:
+ name: matrix-sample
+spec:
+ # the controller will impersonate this service account
+ serviceAccountName: test-sa
+ generators:
+ - list:
+ elements:
+ - env: dev
+ team: dev-team
+ - env: production
+ team: ops-team
+ - env: staging
+ team: ops-team
+ templates:
+ - content:
+ kind: Kustomization
+ apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+ metadata:
+ name: "{{ .Element.env }}-demo"
+ labels:
+ app.kubernetes.io/name: go-demo
+ app.kubernetes.io/instance: "{{ .Element.env }}"
+ com.example/team: "{{ .Element.team }}"
+ spec:
+ interval: 5m
+ path: "./examples/kustomize/environments/{{ .Element.env }}"
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: go-demo-repo
+
gitopsset-controller configuration¶
--enabled-generators
flag, which takes a comma separated list of generators to enable.List
and GitRepository
generators:--enabled-generators=List,GitRepository
+
Kubernetes Process Limits¶
Secret
and ConfigMap
resources, all these can lead to using significant amounts of memory.resources:
+ limits:
+ cpu: 1000m
+ memory: 2Gi
+ requests:
+ cpu: 100m
+ memory: 64Mi
+
Notifications¶
Normal
event or when reconciliation fails with an Error
event. Fluxcd's Events package is used including the EventRecorder
to record these events.--events-addr
flag in RUN_ARGS
when starting the controller. This can be any HTTP endpoint.
Anonymous Access
Configuring Anonymous access¶
#
+additionalArgs:
+- --insecure-no-authentication-user=gitops-test-user
+#
+
--insecure-no-authentication-user
flag is the kubernetes User
to be impersonated to make requests into the cluster.--auth-methods
) are disabled.Example ClusterRole¶
---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: minimum-weavegitops-role
+rules:
+- apiGroups: [""]
+ resources: ["secrets","pods","events"]
+ verbs: ["get","list"]
+- apiGroups: ["apps"]
+ resources: ["deployments", "replicasets"]
+ verbs: ["get","list"]
+- apiGroups: ["kustomize.toolkit.fluxcd.io"]
+ resources: ["kustomizations"]
+ verbs: ["get","list"]
+- apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: ["helmreleases"]
+ verbs: ["get","list"]
+- apiGroups: ["source.toolkit.fluxcd.io"]
+ resources: ["*"]
+ verbs: ["get","list"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get","list","watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: gitops-test-user-binding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: minimum-weavegitops-role
+subjects:
+ - kind: User
+ name: gitops-test-user
+
Displaying Custom Metadata¶
podinfo
application that we installed in the getting started guide as an example. Open up the podinfo kustomization and add annotations to it so it looks like this:---
+apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+kind: Kustomization
+metadata:
+ name: podinfo
+ namespace: flux-system
+// highlight-start
+ annotations:
+ metadata.weave.works/description: |
+ Podinfo is a tiny web application made with Go that showcases best practices of running microservices in Kubernetes.
+ Podinfo is used by CNCF projects like Flux and Flagger for end-to-end testing and workshops.
+ metadata.weave.works/grafana-dashboard: https://grafana.my-org.example.com/d/podinfo-dashboard
+// highlight-end
+spec:
+ interval: 5m0s
+ path: ./kustomize
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: podinfo
+ targetNamespace: flux-system
+
podinfo
kustomization. At the bottom of the 'Details' section you will see the new 'Metadata' entries:
metadata.weave.works
. Any other annotations will be ignored.metadata.weave.works/grafana-dashboard
was displayed as "Grafana Dashboard".
-._
.
Upgrade to Flux GA¶
Before Starting the Upgrade¶
Flux Beta or Flux v0.x
as the latest Flux Beta Release.Flux GA
as the latest Flux GA Release CandidateWeave GitOps
as the latest Weave GitOps Enterprise releaseFAQ¶
Why Upgrade to Flux GA¶
Can I Use Weave GitOps with Flux GA?¶
Can I Use Weave GitOps Enterprise with Flux GA?¶
v0.23.0 onwards¶
v0.22.0¶
gitopssets-controller:
+ controllerManager:
+ manager:
+ image:
+ tag: v0.10.0
+
Can I Use Weave GitOps with Flux v2 0.x (pre-GA versions)?¶
Upgrade¶
ClusterBootstrapConfig
s.1. Upgrade to Flux GA on your existing leaf clusters and management clusters¶
flux bootstrap
command on your leaf clusters and management clusters.v1
as described in the Flux upgrade instructions:
2. Upgrade to Flux GA in ClusterBootstrapConfigs¶
ClusterBootstrapConfig
will most often contain an invocation of flux bootstrap
. Make sure the image is using v2
.Expand to see example
diff --git a/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml b/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml
+index bd41ec036..1b21df860 100644
+--- a/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml
++++ b/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml
+@@ -1,34 +1,34 @@
+apiVersion: capi.weave.works/v1alpha1
+kind: ClusterBootstrapConfig
+metadata:
+name: capi-gitops
+namespace: default
+spec:
+clusterSelector:
+ matchLabels:
+ weave.works/capi: bootstrap
+jobTemplate:
+ generateName: "run-gitops-{{ .ObjectMeta.Name }}"
+ spec:
+ containers:
+- - image: ghcr.io/fluxcd/flux-cli:v0.34.0
++ - image: ghcr.io/fluxcd/flux-cli:v2.0.0
+ name: flux-bootstrap
+ ...
+
3. Upgrade to latest WGE¶
4. Upgrade GitOpsTemplates, GitOpsSets, and ClusterBootstrapConfigs¶
GitOpsTemplate
and CAPITemplate
¶GitRepository
and Kustomization
CRs in the spec.resourcetemplates
to v1
as described in the flux upgrade instructions.GitOpsSets
¶GitRepository
and Kustomization
CRs in the spec.template
of your GitOpsSet
resources to v1
as described in the Flux upgrade instructions.5. Future steps¶
Kustomization
, GitRepository
and Receiver
resources to v1
, you can also upgrade to the future release of Flux that will drop support for < v1
APIs.Contact us¶
Support
Community¶
Flux¶
Commercial Support¶
Recommended resources¶
Weaveworks materials¶
Other¶
Welcome to MkDocs¶
Commands¶
mkdocs new [dir-name]
- Create a new project.mkdocs serve
- Start the live-reloading docs server.mkdocs build
- Build the documentation site.mkdocs -h
- Print help message and exit.Project layout¶
userdocs
mkdocs.yml # The configuration file. docs/ index.md # The documentation homepage. ... # Other markdown pages, images and other files.
Introducing Weave GitOps¶
Getting Started¶
Weave GitOps Open Source Features¶
Monitoring ENTERPRISE¶
Setup¶
monitoring
:---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: weave-gitops-enterprise
+ namespace: flux-system
+spec:
+ values:
+ monitoring:
+ enabled: true # enable it if you want to expose a monitoring server
+ service:
+ name: monitoring
+ port: 8080 # port to expose the monitoring server
+ metrics:
+ enabled: true # enable it to expose a prometheus metrics endpoint in `/metrics`
+ profiling:
+ enabled: false # enable it to expose a pprof debug endpoint `/debug/pprof`
+
Get Started with Monitoring¶
Expand to see manifest contents
apiVersion: source.toolkit.fluxcd.io/v1
+kind: GitRepository
+metadata:
+name: weave-gitops-quickstart
+namespace: flux-system
+spec:
+interval: 10m0s
+ref:
+ branch: main
+url: https://github.com/weaveworks/weave-gitops-quickstart
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+name: monitoring
+---
+apiVersion: kustomize.toolkit.fluxcd.io/v1
+kind: Kustomization
+metadata:
+name: kube-prometheus-stack
+namespace: flux-system
+spec:
+interval: 10m0s
+sourceRef:
+ kind: GitRepository
+ name: weave-gitops-quickstart
+path: ./monitoring/kube-prometheus-stack
+prune: true
+targetNamespace: monitoring
+wait: true
+
Expand to see manifest contents
apiVersion: kustomize.toolkit.fluxcd.io/v1
+kind: Kustomization
+metadata:
+name: monitoring-config
+namespace: flux-system
+spec:
+interval: 10m0s
+sourceRef:
+ kind: GitRepository
+ name: weave-gitops-quickstart
+path: ./monitoring/weave-gitops
+dependsOn:
+ - name: kube-prometheus-stack
+prune: true
+targetNamespace: monitoring
+
flux
or weave-gitops
.Dashboards¶
Profiling¶
Get Started with Profiling¶
/debug/pprof
path where the pprof web interface is exposed.pprof
.
AWS Marketplace
AWS Marketplace¶
kubectl
, eksctl
, helm
and the Helm S3 Plugin.Step 1: Subscribe to Weave GitOps on the AWS Marketplace¶
Step 2: Configure an EKS Cluster¶
eksctl
to create one.cluster-config.yaml
and replace the placeholder values with your settings. See the eksctl
documentation for more configuration options.Expand for file contents
---
+apiVersion: eksctl.io/v1alpha5
+kind: ClusterConfig
+metadata:
+name: CLUSTER_NAME # Change this
+region: REGION # Change this
+
+# This section is required
+iam:
+withOIDC: true
+serviceAccounts:
+- metadata:
+ name: wego-service-account # Altering this will require a corresponding change in a later command
+ namespace: flux-system
+ roleOnly: true
+ attachPolicy:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: Allow
+ Action:
+ - "aws-marketplace:RegisterUsage"
+ Resource: '*'
+
+# This section will create a single Managed nodegroup with one node.
+# Edit or remove as desired.
+managedNodeGroups:
+- name: ng1
+instanceType: m5.large
+desiredCapacity: 1
+
eksctl create cluster -f cluster-config.yaml
+
eksctl
configuration below (replacing the placeholder values) to: - Associate an OIDC provider - Create the required service account ARNoidc-config.yaml
Expand for file contents
---
+apiVersion: eksctl.io/v1alpha5
+kind: ClusterConfig
+metadata:
+name: CLUSTER_NAME # Change this
+region: REGION # Change this
+
+# This section is required
+iam:
+withOIDC: true
+serviceAccounts:
+- metadata:
+ name: wego-service-account # Altering this will require a corresponding change in a later command
+ namespace: flux-system
+ roleOnly: true
+ attachPolicy:
+ Version: "2012-10-17"
+ Statement:
+ - Effect: Allow
+ Action:
+ - "aws-marketplace:RegisterUsage"
+ Resource: '*'
+
eksctl utils associate-iam-oidc-provider -f oidc-config.yaml --approve
+eksctl create iamserviceaccount -f oidc-config.yaml --approve
+
Step 3: Fetch the Service Account Role ARN¶
wego-service-account
:# replace the placeholder values with your configuration
+# if you changed the service account name from wego-service-account, update that in the command
+export SA_ARN=$(eksctl get iamserviceaccount --cluster <cluster-name> --region <region> | awk '/wego-service-account/ {print $3}')
+
+echo $SA_ARN
+# should return
+# arn:aws:iam::<account-id>:role/eksctl-<cluster-name>-addon-iamserviceaccount-xxx-Role1-1N41MLVQEWUOF
+
Step 4: Install Weave GitOps¶
values.yaml
where you set your username, and a bcrypt hash of your desired password, like so:gitops:
+ adminUser:
+ create: true
+ username: <UPDATE>
+ passwordHash: <UPDATE>
+
helm install wego <URL/PATH> \
+--namespace=flux-system \
+--create-namespace \
+--set serviceAccountRole="$SA_ARN" \
+--values ./values.yaml
+
helm install wego <URL/PATH> \
+--namespace=flux-system \
+--create-namespace \
+--set serviceAccountName='<name>' \
+--set serviceAccountRole="$SA_ARN" \
+--values ./values.yaml
+
Step 5: Check your installation¶
kubectl get pods -n flux-system
+# you should see something like the following returned
+flux-system helm-controller-5b96d94c7f-tds9n 1/1 Running 0 53s
+flux-system kustomize-controller-8467b8b884-x2cpd 1/1 Running 0 53s
+flux-system notification-controller-55f94bc746-ggmwc 1/1 Running 0 53s
+flux-system source-controller-78bfb8576-stnr5 1/1 Running 0 53s
+flux-system wego-metering-f7jqp 1/1 Running 0 53s
+flux-system ww-gitops-weave-gitops-5bdc9f7744-vkh65 1/1 Running 0 53s
+
Step 3: Deploy an Application¶
Deploying podinfo¶
git clone https://github.com/$GITHUB_USER/fleet-infra
+cd fleet-infra
+
GitRepository
Source for podinfo. This will allow you to use different authentication methods for different repositories.flux create source git podinfo \
+ --url=https://github.com/stefanprodan/podinfo \
+ --branch=master \
+ --interval=30s \
+ --export > ./clusters/management/podinfo-source.yaml
+
GitRepository
is available here. ls
command to list your files and directories. If that doesn’t work, try ls -l ./clusters
.
podinfo-source
to your fleet-infra
repositorygit add -A && git commit -m "Add podinfo source"
+git push
+
kustomization
to build and apply the podinfo manifestflux create kustomization podinfo \
+ --target-namespace=flux-system \
+ --source=podinfo \
+ --path="./kustomize" \
+ --prune=true \
+ --interval=5m \
+ --export > ./clusters/management/podinfo-kustomization.yaml
+
podinfo-kustomization
to your fleet-infra
repositorygit add -A && git commit -m "Add podinfo kustomization"
+git push
+
View the Application in Weave GitOps¶
fleet-infra
and add podinfo. Navigate back to the dashboard to make sure that the podinfo application appears.metrics-server
. We don't use the metrics-server
in this tutorial, but note that it's the reason why HorizontalPodAutoscaler will report as Not ready
in your dashboard. We recommend ignoring the warning.Customize podinfo¶
patches
section as shown below to the field spec of your podinfo-kustomization.yaml
file so it looks like this:Expand to see Kustomization patches
---
+apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+kind: Kustomization
+metadata:
+ name: podinfo
+ namespace: flux-system
+spec:
+ interval: 60m0s
+ path: ./kustomize
+ prune: true
+ sourceRef:
+ kind: GitRepository
+ name: podinfo
+ targetNamespace: flux-system
+// highlight-start
+ patches:
+ - patch: |-
+ apiVersion: autoscaling/v2beta2
+ kind: HorizontalPodAutoscaler
+ metadata:
+ name: podinfo
+ spec:
+ minReplicas: 3
+ target:
+ name: podinfo
+ kind: HorizontalPodAutoscaler
+// highlight-end
+
podinfo-kustomization.yaml
changes:git add -A && git commit -m "Increase podinfo minimum replicas"
+git push
+
Suspend updates¶
Delete Podinfo¶
rm ./clusters/management/podinfo-kustomization.yaml
+ rm ./clusters/management/podinfo-source.yaml
+ git add -A && git commit -m "Remove podinfo kustomization and source"
+ git push
+
Complete!¶
Step 1: Install Weave GitOps Open Source on Your Cluster¶
Prerequisites¶
fleet-infra
. To create this, follow GitHub’s instructions—using fleet-infra
instead of hello-world
.Check your Cluster's Kubernetes Version¶
Install Flux¶
./clusters/my-cluster/
in the repositoryInstall the Flux CLI¶
brew install fluxcd/tap/flux
+
brew upgrade fluxcd/tap/flux
+
flux bootstrap
.flux -v
, and use that for flux bootstrap --version=v<CLI-VERSION>
.sudo curl -s https://fluxcd.io/install.sh | sudo FLUX_VERSION=<VERSION> bash
.Export your credentials¶
repo
scope.export GITHUB_TOKEN=<your-token>
+export GITHUB_USER=<your-username>
+
Check your Kubernetes cluster¶
flux check --pre
+
► checking prerequisites
+✔ kubernetes 1.22.2 >=1.20.6
+✔ prerequisites checks passed
+
Install Flux onto your cluster with the
flux bootstrap
command¶flux bootstrap
creates a flux system
folder in your repository that includes the manifests Flux needs to operate. It also generates a key value pair for Flux to access the repo.github
. If you would rather use GitLab, change this to gitlab
.flux bootstrap github \
+ --owner=$GITHUB_USER \
+ --repository=fleet-infra \
+ --branch=main \
+ --path=./clusters/my-cluster \
+ --personal \
+ --components-extra image-reflector-controller,image-automation-controller
+
Install the
gitops
CLI¶gitops
CLI is currently supported on Mac (x86 and Arm) and Linux, including Windows Subsystem for Linux (WSL). Windows support is a planned enhancement.gitops
CLI:curl --silent --location "https://github.com/weaveworks/weave-gitops/releases/download/v0.36.0/gitops-$(uname)-$(uname -m).tar.gz" | tar xz -C /tmp
+sudo mv /tmp/gitops /usr/local/bin
+gitops version
+
brew tap weaveworks/tap
+brew install weaveworks/tap/gitops
+
Deploy Weave GitOps¶
HelmRelease
and HelmRepository
objects.fleet-infra
repo.Clone your Git repository where Flux has been bootstrapped¶
git clone https://github.com/$GITHUB_USER/fleet-infra
+cd fleet-infra
+
mkdir -p ./clusters/my-cluster
.Deploy¶
HelmRepository
and HelmRelease
to deploy Weave GitOps:PASSWORD="<A new password you create, removing the brackets and including the quotation marks>"
+gitops create dashboard ww-gitops \
+ --password=$PASSWORD \
+ --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml
+
--values
CLI flag to supply one or more values files.Commit and push the
weave-gitops-dashboard.yaml
to the fleet-infra
repository¶git add -A && git commit -m "Add Weave GitOps Dashboard"
+git push
+
Validate that Weave GitOps and Flux are installed¶
kubectl get pods -n flux-system
+
NAME READY STATUS RESTARTS AGE
+helm-controller-5bfd65cd5f-gj5sz 1/1 Running 0 10m
+kustomize-controller-6f44c8d499-s425n 1/1 Running 0 10m
+notification-controller-844df5f694-2pfcs 1/1 Running 0 10m
+source-controller-6b6c7bc4bb-ng96p 1/1 Running 0 10m
+ww-gitops-weave-gitops-86b645c9c6-k9ftg 1/1 Running 0 5m
+
Next steps¶
Optional: Running the UI on a Subpath
Running the UI on a subpath¶
/
. It is possible to run the UI on a subpath, for example /weave-gitops
. This is useful if you want to run weave-gitops alongside other applications on the same domain.--route-prefix
flag on the weave-gitops server. For example, if you want to run the UI on /weave-gitops
, you can set the flag to --route-prefix=/weave-gitops
.additionalArgs
field in the spec.values
section of the weave-gitops HelmRelease
.spec:
+ values:
+ additionalArgs:
+ - --route-prefix=/weave-gitops
+
Ingress¶
Ingress
is a Kubernetes resource that allows you to expose your application to the internet. Please refer to the Kubernetes documentation for more information about a complete Ingress
configuration. It often depends on the Kubernetes provider you are using and your particular setup.Ingress
resource to integrate with the ingress controller you have configured for your cluster. To enable ingress generation set the ingress.enabled
field to true
.
path
field to the same subpath specified in the --route-prefix
flag./
.spec:
+ values:
+ ingress:
+ enabled: true
+ hosts:
+ - host: ""
+ paths:
+ - path: /wego # set the path to `/` if you have not set the `--route-prefix` flag
+ pathType: Prefix
+
Step 2: Explore the Weave GitOps Open Source UI¶
Overview¶
Kustomization
and HelmRelease
objects so that you can quickly understand the state of your deployments across a cluster. * a Sources view that shows summary information from gitrepository, helmrepository and bucket objects and tells you the current status of resources that are synchronizing content from where you’ve declared the desired state of your system—for example, Git repositories. * a Flux Runtime view that provides the status of the GitOps engine that continuously reconciles your desired and live state. It shows your installed GitOps Toolkit Controllers and version. * an Image Automation view that reduces GitOps friction, particularly in non-production environments, by enabling you to discover repositories, policies, and updates on your cluster. Deploy the latest image in a dev or staging environment with minimal fuss, and keep your platform updated with the latest approved versions—for example, patch releases to reduce exposure to CVEs. Auto-deploy when approval is gated before the image is added to an internal registry. * A Notifications View that leverages Flux's notification controller to show which notifications are already configured within the UI. This enables WeGO users to set up and receive notifications from Weave GitOps. Here you can find the list of providers. If you’re a platform operator, this view will help you to understand your egress topology across clusters so you’ll know where events are being sent beyond your clusters. * multiple views for debugging. * a dark mode option.Login to the GitOps Dashboard¶
kubectl port-forward svc/ww-gitops-weave-gitops -n flux-system 9001:9001
+
admin
. This means that you can use “admin” as your user name, and the password that you set earlier during installation as $PASSWORD
.values.yaml
file or the spec.values
section of the Weave GitOps HelmRelease
resource:envVars:
+ - name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL
+ value: "Login with ACME"
+
The Applications View¶
Kustomization
and HelmRelease
objects. You can apply dark mode using the toggle switch in the top right corner.Kustomizations
called podinfo
and canaries
corresponding to the applications with the same names. The source referenced by podinfo
is shipping-service-podinfo
which has been verified whereas the one referenced by canaries
does not have verification set up. - three HelmReleases
called weave-gitops-enterprise
, tf-controller
and podinfo
which deploys the respective Helm Charts.Source
object they are reconciling from and whether or not that Source
is verified (this requires verification to have been set up for the source). Clicking the name of the Source will take you to a detail view for the given Source object. The view automatically updates every few seconds so you know the current state of your system.Name
. * Filter by Type
by clicking the strawberry icon to its right. * Click the Name
of an object to get a detailed view for the given Kustomization
or HelmRelease
. (You'll see this again in the Sources view.) * In the main Applications view, you can use the checkbox to the left of your listed applications to select them and perform actions from the actions menu at the top. These actions are Sync (reconcile), Suspend, and Resume, and they affect Flux resources.A Closer Look: Exploring the flux-system Deployment¶
flux-system
Kustomization. Navigate back to the Applications
view, and click on the flux-system
object.Source
it is reading from * the latest applied commit * the exact path with the Source repository that is being deployed * the Interval
where Flux will look to reconcile any differences between the declared and live state. For example, if a kubectl
patch has been applied on the cluster, it will effectively be reverted. If a longer error message is reported by this object, you'll be able to see it in its entirety on this page.
kustomization
.The Sources View¶
GitRepository
, HelmRepository
, HelmChart
, and Bucket
objects.GitRepository
called shipping-service-podinfo
- an OCIRepository
called podinfo-oci
Interval
—namely, how frequently Flux will check for updates in a given source location. You can also see whether or not that source is verified (if this is something that you have set up for the specific source).URL
to navigate to a given source—i.e. a repository in GitHub—or the Name
of a Source
to view more details about it.GitRepository/flux-system
from the summary at the top of the page.The Image Automation View¶
The Flux Runtime View¶
Flux Runtime
. This view provides information on the GitOps engine, which continuously reconciles your desired and live state, and helps users to know which apiVersion to use in manifests. It comes with two tabs: one for controllers, and other for custom resource definitions (CRDs).Controllers¶
flux bootstrap
will install the following controllers: - helm-controller - kustomize-controller - notification-controller - source-controllerCRDs¶
Moving On¶
Authorization ENTERPRISE¶
pipeline
resource and the underlying application
resources. This sample configuration shows a recommended way to configure RBAC to provide such access. The pipeline-reader
role and the search-pipeline-reader
role-binding allow a group search-developer
to access pipeline resources within the search
namespace.apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: pipeline-reader
+rules:
+ - apiGroups: [ "pipelines.weave.works" ]
+ resources: [ "pipelines" ]
+ verbs: [ "get", "list", "watch"]
+ - apiGroups: ["helm.toolkit.fluxcd.io"]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: search-pipeline-reader
+ namespace: search
+subjects:
+ - kind: Group
+ name: search-developer
+ apiGroup: rbac.authorization.k8s.io
+roleRef:
+ kind: ClusterRole
+ name: pipeline-reader
+ apiGroup: rbac.authorization.k8s.io
+
Pipelines ENTERPRISE¶
Benefits to Developers¶
Getting Started with Pipelines ENTERPRISE¶
Prerequisites¶
enablePipelines
has been enabled. This flag is part of the Weave GitOps Enterprise Helm chart values and is enabled by default. - Any leaf clusters running workloads that you need to visualise using Pipelines have been added to Weave GitOps Enterprise. - You have exposed the promotion webhook on the management cluster and leaf clusters can reach that webhook endpoint over the network.Define a Pipeline¶
application
to deliver - the environments
that your app will go through on its way to production (general). An environment describes the different stages of a pipeline and consists of one or more deployment targets. - the deployment targets
, the clusters that each environment has. A deployment target consists of a namespace and a GitOpsCluster
reference and is used to specify where the application is running in your fleet. Pipeline
custom resource. An example of such a CR is shown here:Expand to view
---
+apiVersion: pipelines.weave.works/v1alpha1
+kind: Pipeline
+metadata:
+name: podinfo-02
+namespace: flux-system
+spec:
+appRef:
+ apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ name: podinfo
+environments:
+ - name: dev
+ targets:
+ - namespace: podinfo-02-dev
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - name: test
+ targets:
+ - namespace: podinfo-02-qa
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - namespace: podinfo-02-perf
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - name: prod
+ targets:
+ - namespace: podinfo-02-prod
+ clusterRef:
+ kind: GitopsCluster
+ name: prod
+ namespace: flux-system
+
podinfo
application is delivered to a traditional pipeline composed of dev
, test
, and prod
environments. In this case, the test
environment consists of two deployment targets, qa
and perf
. This is to indicate that, although both targets are part of the same stage (testing), they can evolve separately and may run different versions of the application. Note that two clusters, dev
and prod
, are used for the environments; both are defined in the flux-system
namespace.View Your List of Pipelines¶
Type
and Environments
it goes through.View Pipeline Details¶
Using GitOpsTemplates for Pipelines ENTERPRISE¶
Adding New Resources From Within the Weave GitOps Enterprise Dashboard¶
Create a Pipeline
from within the Pipeline view. This will take you to a pre-filtered list of templates with the label: weave.works/template-type: pipeline
.Templates
view (shown below) lists all templates for which a given user has the appropriate permission to view. You can install GitOpsTemplates into different namespaces and apply standard Kubernetes RBAC to limit which teams can utilize which templates. You can also configure policy to enforce permitted values within a template.Example: GitOpsTemplates¶
Pipeline: Visualization Only¶
Expand to view example template
---
+apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+name: pipeline-sample
+namespace: default # Namespace where the GitOpsTemplate is installed, consider that a team will need READ access to this namespace and the custom resource
+labels:
+ weave.works/template-type: pipeline
+spec:
+description: Sample Pipeline showing visualization of two helm releases across two environments.
+params:
+ - name: RESOURCE_NAME # This is a required parameter name to enable Weave GitOps to write to your Git Repository
+ description: Name of the Pipeline
+ - name: RESOURCE_NAMESPACE
+ description: Namespace for the Pipeline on the management cluster
+ default: flux-system # default values make it easier for users to fill in a template
+ - name: FIRST_CLUSTER_NAME
+ description: Name of GitopsCluster object for the first environment
+ - name: FIRST_CLUSTER_NAMESPACE
+ description: Namespace where this object exists
+ default: default
+ - name: FIRST_APPLICATION_NAME
+ description: Name of the HelmRelease for your application in the first environment
+ - name: FIRST_APPLICATION_NAMESPACE
+ description: Namespace for this application
+ default: flux-system
+ - name: SECOND_CLUSTER_NAME
+ description: Name of GitopsCluster object for the second environment
+ - name: SECOND_CLUSTER_NAMESPACE
+ description: Namespace where this object exists
+ default: default
+ - name: SECOND_APPLICATION_NAME
+ description: Name of the HelmRelease for your application in the second environment
+ - name: SECOND_APPLICATION_NAMESPACE
+ description: Namespace for this application
+ default: flux-system
+resourcetemplates:
+ - content:
+ - apiVersion: pipelines.weave.works/v1alpha1
+ kind: Pipeline
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: ${RESOURCE_NAMESPACE}
+ spec:
+ appRef:
+ apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ name: ${APPLICATION_NAME}
+ environments:
+ - name: First-Environment
+ targets:
+ - namespace: ${FIRST_APPLICATION_NAMESPACE}
+ clusterRef:
+ kind: GitopsCluster
+ name: ${FIRST_CLUSTER_NAME}
+ namespace: ${FIRST_CLUSTER_NAMESPACE}
+ - name: Second-Environment
+ targets:
+ - namespace: ${SECOND_APPLICATION_NAMESPACE}
+ clusterRef:
+ kind: GitopsCluster
+ name: ${SECOND_CLUSTER_NAME}
+ namespace: ${SECOND_CLUSTER_NAMESPACE}
+
Pipeline - Multi-Cluster Promotion¶
Expand to view example template
---
+apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+name: pipeline-sample
+namespace: default
+labels:
+ weave.works/template-type: pipeline
+spec:
+description: Sample Pipeline showing visualization of two helm releases across two environments.
+params:
+ - name: RESOURCE_NAME
+ description: Name of the Pipeline
+ - name: RESOURCE_NAMESPACE
+ description: Namespace for the Pipeline on the management cluster
+ default: flux-system
+ - name: FIRST_CLUSTER_NAME
+ description: Name of GitopsCluster object for the first environment
+ - name: FIRST_CLUSTER_NAMESPACE
+ description: Namespace where this object exists
+ default: default
+ - name: FIRST_APPLICATION_NAME
+ description: Name of the HelmRelease for your application in the first environment
+ - name: FIRST_APPLICATION_NAMESPACE
+ description: Namespace for this application
+ default: flux-system
+ - name: SECOND_CLUSTER_NAME
+ description: Name of GitopsCluster object for the second environment
+ - name: SECOND_CLUSTER_NAMESPACE
+ description: Namespace where this object exists
+ default: default
+ - name: SECOND_APPLICATION_NAME
+ description: Name of the HelmRelease for your application in the second environment
+ - name: SECOND_APPLICATION_NAMESPACE
+ description: Namespace for this application
+ default: flux-system
+ - name: APPLICATION_REPO_URL
+ description: URL for the git repository containing the HelmRelease objects
+ - name: APPLICATION_REPO_BRANCH
+ description: Branch to update with new version
+ - name: GIT_CREDENTIALS_SECRET
+ description: Name of the secret in RESOURCE_NAMESPACE containing credentials to create pull requests
+resourcetemplates:
+ - content:
+ - apiVersion: pipelines.weave.works/v1alpha1
+ kind: Pipeline
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: ${RESOURCE_NAMESPACE}
+ spec:
+ appRef:
+ apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ name: ${APPLICATION_NAME}
+ environments:
+ - name: First-Environment
+ targets:
+ - namespace: ${FIRST_APPLICATION_NAMESPACE}
+ clusterRef:
+ kind: GitopsCluster
+ name: ${FIRST_CLUSTER_NAME}
+ namespace: ${FIRST_CLUSTER_NAMESPACE}
+ - name: Second-Environment
+ targets:
+ - namespace: ${SECOND_APPLICATION_NAMESPACE}
+ clusterRef:
+ kind: GitopsCluster
+ name: ${SECOND_CLUSTER_NAME}
+ namespace: ${SECOND_CLUSTER_NAMESPACE}
+ promotion:
+ pull-request:
+ url: ${APPLICATION_REPO_URL}
+ baseBranch: ${APPLICATION_REPO_BRANCH}
+ secretRef:
+ name: ${GIT_CREDENTIALS_SECRET}
+
Git Credentials¶
Promotion Marker Added to HelmRelease in
Second-Environment
¶spec.chart.spec.version
is defined. For example, if the values used in the above template were as follows:RESOURCE_NAME=my-app
+RESOURCE_NAMESPACE=pipeline-01
+
# {"$promotion": "pipeline-01:my-app:Second-Environment"}
+
Alerts and Providers¶
Alert
, this template filters events to detect when an update has occurred. Depending on your use case, you can use different filtering.Provider
, this template uses authenticated (HMAC) communication to the promotion endpoint, where a secret must be present on both the management cluster and the leaf cluster(s). For simplicity's sake, you can use a generic
provider instead; this will not require the secret.Expand to view example template
---
+apiVersion: templates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+name: pipeline-notification-resources
+namespace: default
+labels:
+ weave.works/template-type: application # These are generic Flux resources rather than Pipeline-specific
+spec:
+description: Creates flux notification controller resources for a cluster, required for promoting applications via pipelines.
+params:
+ - name: RESOURCE_NAME
+ description: Name for the generated objects, should match the target Application (HelmRelease) name.
+ - name: RESOURCE_NAMESPACE
+ description: Namespace for the generated objects, should match the target Application (HelmRelease) namespace.
+ - name: PROMOTION_HOST
+ description: Host for the promotion webhook on the management cluster, i.e. "promotions.example.org"
+ - name: SECRET_REF
+ description: Name of the secret containing HMAC key in the token field
+ - name: ENV_NAME
+ description: Environment the cluster is a part of within a pipeline.
+resourcetemplates:
+ - content:
+ - apiVersion: notification.toolkit.fluxcd.io/v1beta1
+ kind: Provider
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: ${RESOURCE_NAMESPACE}
+ spec:
+ address: http://${PROMOTION_HOST}/promotion/${APP_NAME}/${ENV_NAME}
+ type: generic-hmac
+ secretRef: ${SECRET_REF}
+ - apiVersion: notification.toolkit.fluxcd.io/v1beta1
+ kind: Alert
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: ${RESOURCE_NAMESPACE}
+ spec:
+ providerRef:
+ name: ${RESOURCE_NAME}
+ eventSeverity: info
+ eventSources:
+ - kind: HelmRelease
+ name: ${RESOURCE_NAME}
+ exclusionList:
+ - ".*upgrade.*has.*started"
+ - ".*is.*not.*ready"
+ - "^Dependencies.*"
+
Summary¶
Setting Up Pipelines to Notify a Jenkins Webhook ENTERPRISE¶
Configuring Jenkins¶
Post content parameters¶
Expand to see an example Promotion Event payload
{
+"involvedObject": {
+ "kind": "Pipeline",
+ "namespace": "flux-system",
+ "name": "podinfo-pipeline",
+ "uid": "74d9e3b6-0269-4c12-9051-3ce8cfb7886f",
+ "apiVersion": "pipelines.weave.works/v1alpha1",
+ "resourceVersion": "373617"
+},
+"severity": "info",
+"timestamp": "2023-02-08T12:34:13Z",
+"message": "Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5",
+"reason": "Promote",
+"reportingController": "pipeline-controller",
+"reportingInstance": "chart-pipeline-controller-8549867565-7822g"
+}
+
Configure Notification Provider¶
apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: jenkins-token
+ namespace: podinfo
+stringData:
+ headers: |
+ token: epicsecret
+
apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Provider
+metadata:
+ name: jenkins-promotion
+ namespace: podinfo
+spec:
+ type: generic
+ address: https://jenkins.domain.tld/generic-webhook-trigger/invoke
+ secretRef:
+ name: jenkins-token
+
Set Up Alerts¶
jenkins-promotion
provider. For example an Alert for the podinfo-pipeline
in the flux-system
namespace:apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Alert
+metadata:
+ name: podinfo-pipeline-promotion
+ namespace: podinfo
+spec:
+ eventSeverity: info
+ eventSources:
+ - kind: Pipeline
+ name: podinfo-pipeline
+ namespace: flux-system
+ providerRef:
+ name: jenkins-promotion
+
Setting up Pipelines to Trigger a Tekton Pipeline ENTERPRISE¶
Configuring Tekton Pipelines¶
Tekton Tasks¶
name
, namespace
, and message
.---
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: hello
+ namespace: ww-pipeline
+spec:
+ params:
+ - name: name
+ type: string
+ - name: namespace
+ type: string
+ - name: message
+ type: string
+ steps:
+ - name: echo
+ image: alpine
+ script: |
+ #!/bin/sh
+ echo "Hello $(params.namespace)/$(params.name)!"
+ echo "Message: $(params.message)"
+---
+apiVersion: tekton.dev/v1beta1
+kind: Task
+metadata:
+ name: goodbye
+ namespace: ww-pipeline
+spec:
+ params:
+ - name: name
+ type: string
+ - name: namespace
+ type: string
+ - name: message
+ type: string
+ steps:
+ - name: goodbye
+ image: ubuntu
+ script: |
+ #!/bin/bash
+ echo "Goodbye $(params.namespace)/$(params.name)!"
+ echo "Message: $(params.message)"
+
Tekton Pipeline¶
hello-goodbye
Tekton Pipeline has the same three parameters as the tasks and it passes down the values to them.---
+apiVersion: tekton.dev/v1beta1
+kind: Pipeline
+metadata:
+ name: hello-goodbye
+ namespace: ww-pipeline
+spec:
+ params:
+ - name: name
+ type: string
+ - name: namespace
+ type: string
+ - name: message
+ type: string
+ tasks:
+ - name: hello
+ taskRef:
+ name: hello
+ params:
+ - name: namespace
+ value: $(params.namespace)
+ - name: name
+ value: $(params.name)
+ - name: message
+ value: $(params.message)
+ - name: goodbye
+ runAfter:
+ - hello
+ taskRef:
+ name: goodbye
+ params:
+ - name: namespace
+ value: $(params.namespace)
+ - name: name
+ value: $(params.name)
+ - name: message
+ value: $(params.message)
+
Configuring Tekton Pipline Automation¶
TriggerBinding
: This resource binds the incoming JSON message to parameter variables.TriggerTemplate
: This resource is the template of the PipelineRun
that will be started.EventListener
: This resource glues the above two resources together and creates an http listener service.Tekton TriggerBinding¶
{
+ "involvedObject": {
+ "kind": "Pipeline",
+ "namespace": "flux-system",
+ "name": "podinfo-pipeline",
+ "uid": "74d9e3b6-0269-4c12-9051-3ce8cfb7886f",
+ "apiVersion": "pipelines.weave.works/v1alpha1",
+ "resourceVersion": "373617"
+ },
+ "severity": "info",
+ "timestamp": "2023-02-08T12:34:13Z",
+ "message": "Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5",
+ "reason": "Promote",
+ "reportingController": "pipeline-controller",
+ "reportingInstance": "chart-pipeline-controller-8549867565-7822g"
+}
+
involvedObject.name
, involvedObject.namespace
and message
fields:---
+apiVersion: triggers.tekton.dev/v1beta1
+kind: TriggerBinding
+metadata:
+ name: ww-pipeline-binding
+ namespace: ww-pipeline
+spec:
+ params:
+ - name: namespace
+ value: $(body.involvedObject.namespace)
+ - name: name
+ value: $(body.involvedObject.name)
+ - name: message
+ value: $(body.message)
+
Tekton TriggerTemplate¶
Pipeline
resources:---
+apiVersion: triggers.tekton.dev/v1beta1
+kind: TriggerTemplate
+metadata:
+ name: ww-pipeline-template
+ namespace: ww-pipeline
+spec:
+ params:
+ - name: namespace
+ default: "Unknown"
+ - name: name
+ default: "Unknown"
+ - name: message
+ default: "no message"
+ resourcetemplates:
+ - apiVersion: tekton.dev/v1beta1
+ kind: PipelineRun
+ metadata:
+ generateName: hello-goodbye-run-
+ spec:
+ pipelineRef:
+ name: hello-goodbye
+ params:
+ - name: name
+ value: $(tt.params.name)
+ - name: namespace
+ value: $(tt.params.namespace)
+ - name: message
+ value: $(tt.params.message)
+
Tekton EventListener¶
---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: tekton-ww-pipeline-robot
+ namespace: ww-pipeline
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: triggers-example-eventlistener-binding
+ namespace: ww-pipeline
+subjects:
+- kind: ServiceAccount
+ name: tekton-ww-pipeline-robot
+ namespace: ww-pipeline
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: tekton-triggers-eventlistener-roles
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: triggers-example-eventlistener-clusterbinding
+subjects:
+- kind: ServiceAccount
+ name: tekton-ww-pipeline-robot
+ namespace: ww-pipeline
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: tekton-triggers-eventlistener-clusterroles
+
ServiceAccount
, we can create the EventListener
using the TriggerBinding
and TriggerTemplate
:---
+apiVersion: triggers.tekton.dev/v1beta1
+kind: EventListener
+metadata:
+ name: ww-pipeline-listener
+ namespace: ww-pipeline
+spec:
+ serviceAccountName: tekton-ww-pipeline-robot
+ triggers:
+ - name: ww-pipeline-trigger
+ bindings:
+ - ref: ww-pipeline-binding
+ template:
+ ref: ww-pipeline-template
+
Service
for our EventListener
.❯ kubectl get service -n ww-pipeline
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+el-ww-pipeline-listener ClusterIP 10.96.250.23 <none> 8080/TCP,9000/TCP 3d
+
Configure Notification Provider¶
EventListener
service. If they are not in the same cluster, exposing the service may be required through an ingress or a service mesh.---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Provider
+metadata:
+ name: tekton-promotion
+ namespace: hello-podinfo
+spec:
+ type: generic
+ address: http://el-ww-pipeline-listener.ww-pipeline:8080/
+
Set Up Alerts¶
tekton-promotion
provider. For example, an Alert for the podinfo-pipeline
in the flux-system
namespace:---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Alert
+metadata:
+ name: tekton-promotion-podinfo
+ namespace: hello-podinfo
+spec:
+ eventSeverity: info
+ eventSources:
+ - kind: Pipeline
+ name: hello-podinfo
+ namespace: flux-system
+ providerRef:
+ name: tekton-promotion
+
Promoting applications through pipeline environments¶
Pipeline
resource itself so that each pipeline governs a single application and all the environments to which it is deployed.HelmReleases
are supported in automatic promotions.
Expose the promotion webhook¶
spec:
+ values:
+ enablePipelines: true
+ pipeline-controller:
+ promotion:
+ ingress:
+ enabled: true
+ className: nginx
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt
+ hosts:
+ - host: promotions.example.org
+ paths:
+ - path: /?(.*)
+ pathType: ImplementationSpecific
+ tls:
+ - secretName: promotions-tls
+ hosts:
+ - promotions.example.org
+
Setup notifications from leaf clusters¶
---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Provider
+metadata:
+ name: promotion-my-app
+spec:
+ address: "https://promotions.example.org/promotion/pipeline-01/my-app/dev"
+ type: generic-hmac
+ secretRef:
+ name: hmac-secret
+
generic-hmac
Provider is used to ensure notifications originate from authenticated sources. The referenced Secret, should include a token
field which holds the HMAC key. The same HMAC key must be specified in the Secret referenced by the .spec.promotion.strategy.secretRef.name
field, so that the pipeline controller can verify any incoming notifications. For more information on the generic-hmac
Provider, please refer to the notification controller docs./promotion
as shown in the example above. However you may use rewrite rules in your ingress configuration to omit it, if desired. For example, if using NGINX ingress controller, you may use the following annotation: annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /promotion/$1
+
https://promotions.example.org/pipeline-01/my-app/dev
. address
field's URL path is comprised of 3 components again:
---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Alert
+spec:
+ eventSeverity: info
+ eventSources:
+ - kind: HelmRelease
+ name: my-app
+ exclusionList:
+ - .*upgrade.*has.*started
+ - .*is.*not.*ready
+ - ^Dependencies.*
+ providerRef:
+ name: promotion-my-app
+
HelmRelease
on the first environment defined in the pipeline is bumped (e.g. by Flux discovering a new version in the Helm repository), an event is sent to the promotion webhook which will determine the next action based on the pipeline definition and chosen strategy. The rest of this guide describes how to setup up any of the available strategies depending on your requirements.Pull request¶
Security¶
Environments and Repositories¶
RBAC¶
list
and watch
on secrets as:
Policy¶
apiVersion: source.toolkit.fluxcd.io/v1
+kind: GitRepository
+metadata:
+ name: policy-library
+ namespace: flux-system
+spec:
+ interval: 10m0s
+ url: https://github.com/weaveworks/policy-library.git
+ secretRef:
+ name: policy-library-github-credentials
+---
+apiVersion: kustomize.toolkit.fluxcd.io/v1
+kind: Kustomization
+metadata:
+ name: rbac-secrets-good-practices
+ namespace: flux-system
+spec:
+ interval: 1m0s
+ sourceRef:
+ kind: GitRepository
+ name: policy-library
+ path: ./goodpractices/kubernetes/rbac/secrets
+ prune: true
+
Warning
PolicyConfig
:apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: allow-flux
+spec:
+match:
+ apps:
+ - kind: Kustomization
+ name: flux-system
+ namespace: flux-system
+config:
+ weave.templates.rbac-prohibit-wildcards-policyrule-resources:
+ parameters:
+ exclude_label_key: "app.kubernetes.io/part-of"
+ exclude_label_value: "flux"
+ weave.templates.rbac-prohibit-wildcards-policyrule-verbs:
+ parameters:
+ exclude_label_key: "app.kubernetes.io/part-of"
+ exclude_label_value: "flux"
+ weave.policies.rbac-prohibit-list-secrets:
+ parameters:
+ exclude_label_key: "app.kubernetes.io/part-of"
+ exclude_label_value: "flux"
+ weave.policies.rbac-prohibit-watch-secrets:
+ parameters:
+ exclude_label_key: "app.kubernetes.io/part-of"
+ exclude_label_value: "flux"
+ weave.policies.rbac-prohibit-wildcard-secrets:
+ parameters:
+ exclude_label_key: "app.kubernetes.io/part-of"
+ exclude_label_value: "flux"
+
apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+ name: reject-workloads-pipeline-namespace
+spec:
+ match:
+ namespaces:
+ - podinfo
+ config:
+ weave.policies.containers-should-not-run-in-namespace:
+ parameters:
+ custom_namespace: "podinfo"
+
Service Account¶
Pipeline
resource on the management cluster. You should create a RoleBinding
for the Pipeline Controller ServiceAccount
in the pipeline namespace. For a pipeline in namespace podinfo
, it would look like the following:---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: read-app-promotion-credentials
+ namespace: podinfo # change for the pipeline namespace
+rules:
+ - apiGroups:
+ - ""
+ resourceNames:
+ - "app-promotion-credentials" # change for the secret name holding the pull requests secret
+ resources:
+ - "secrets"
+ verbs:
+ - "get"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: pipeline-controller-read-app-promotion-credentials
+ namespace: podinfo
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: read-app-promotion-credentials
+subjects:
+ - kind: ServiceAccount
+ name: chart-pipeline-controller # change in case pipeline controller service account has a different name in your context
+ namespace: flux-system
+
Verify Security Context¶
$ kubectl get secret -n podinfo --as=system:serviceaccount:flux-system:chart-pipeline-controller
+
+Error from server (Forbidden): secrets is forbidden: User "system:serviceaccount:flux-system:chart-pipeline-controller" cannot list resource "secrets" in API group "" in the namespace "podinfo"
+
$ kubectl get secret -n podinfo --as=system:serviceaccount:flux-system:chart-pipeline-controller app-promotion-credentials
+
+NAME TYPE DATA AGE
+app-promotion-credentials Opaque 1 21m
+
Tokens¶
Expand to see example
Expand to see example
Expand to see example
Expand to see example
Add markers to app manifests¶
HelmRelease
manifest with such a marker looks like this:---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+spec:
+ chart:
+ spec:
+ version: 0.13.7 # {"$promotion": "pipeline-01:my-app:prod"}
+
$promotion
field in the comment is comprised of 3 components separated by colons:
pipeline-01
.name
field of one of the environments defined in the pipeline's .spec.environments
array.Supported Git Providers¶
.spec.promotion.strategy.pull-request.type
. For example, for gitlab
it would look similar to:promotion:
+ strategy:
+ pull-request:
+ type: gitlab
+ url: "https://gitlab.com/weaveworks/<my-awesome-project.git>"
+ baseBranch: main
+ secretRef:
+ name: gitlab-promotion-credentials
+
Credentials Secret¶
Expand to see example
# example to use git over https with basic auth and pat
+$ kubectl create secret generic promotion-credentials \
+--namespace=pipeline-01 \
+--from-literal="username=<bot account name>" \
+--from-literal="password=<token value>" \
+--from-literal="token=<token value>" \
+--from-literal="hmac-key=<hmac-key value>"
+
---
+apiVersion: v1
+kind: Secret
+metadata:
+name: promotion-credentials
+namespace: pipeline-01
+data:
+username: ZXhhbXBsZQ==
+password: ZXhhbXBsZS1wYXNzd29yZA==
+token: Z2hwX01IL3RsTFpXTXZMY0FxVWRYY1ZGL0lGbzh0WDdHNjdsZmRxWQ==
+hmac-key: OEIzMTNBNjQ0REU0OEVGODgxMTJCQ0VFNTQ3NkE=
+type: Opaque
+
token
field needs to be given permission to create pull requests in the pipeline's repository (defined in .spec.promotion.strategy.pull-request.url
).hmac-key
field must match the key used for the Provider resource (.spec.secretRef), if specified in the leaf clusters.Define promotion in pipeline resource¶
.spec.promotion.strategy.pull-request
defines details about the Git repository used for promoting the given app. Set the secretRef.name
field to the name of the Secret created in the previous step and the url
and branch
fields to the Git repository's HTTPS URL and optionally a specific branch (if the branch is not set, it defaults to main
). If using the generic-hmac
Provider from leaf clusters, also set the .spec.promotion.strategy.secretRef.name
to the name of the Secret created previously.Notification¶
Pipeline
to be used as event sources. An example of a patch applied to enable this is shown below:---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- gotk-components.yaml
+- gotk-sync.yaml
+patches:
+- patch: |
+ - op: add
+ path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/-
+ value: Pipeline
+ target:
+ kind: CustomResourceDefinition
+ name: alerts.notification.toolkit.fluxcd.io
+
---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Provider
+metadata:
+ name: promotion-my-app-via-github-actions
+spec:
+ type: githubdispatch
+ address: https://github.com/my-org/my-app-repo
+ secretRef:
+ name: github-credentials
+
---
+apiVersion: notification.toolkit.fluxcd.io/v1beta1
+kind: Alert
+metadata:
+ name: promotion-my-app-via-github-actions
+spec:
+ eventSeverity: info
+ eventSources:
+ - kind: Pipeline
+ name: my-app
+ namespace: my-app-ns
+ providerRef:
+ name: promotion-my-app-via-github-actions
+
---
+apiVersion: pipelines.weave.works/v1alpha1
+kind: Pipeline
+metadata:
+ name: my-app
+ namespace: my-app-ns
+spec:
+ promotion:
+ notification: {}
+
Manual promotion¶
spec.promotion.manual
key to true
.Expand to see example
apiVersion: pipelines.weave.works/v1alpha1
+kind: Pipeline
+metadata:
+name: my-app
+namespace: my-app-ns
+spec:
+promotion:
+ manual: true
+ strategy:
+ pull-request:
+ type: github
+ url: https://github.com/my-org/my-app-repo
+ baseBranch: main
+ secretRef:
+ name: promotion-credentials
+
Configuration¶
Retry Logic¶
# values.yaml
+promotion:
+ retry:
+ # Initial delay between retries.
+ delay: 2
+ # Maximum delay between retries.
+ maxDelay: 20
+ # Number of attempts.
+ threshold: 3
+
maxDelay
option is there. If the calculated delay would exceed this value, it will use that as delay. For example if the delay values would be [2, 4, 8, 16, 32, 64]
, but maxDelay
is set to 15, the list will be [2, 4, 8, 15, 15, 15]
. With this option, the promotion will be retried on failure, but the sum of delay values will be only 59 seconds instead of 126 seconds.Rate Limiting¶
# values.yaml
+promotion:
+ rateLimit:
+ # Number of requests allowed in set interval.
+ value: 20
+ interval: 30
+
Pipeline ENTERPRISE¶
apiVersion: pipelines.weave.works/v1alpha1
+kind: Pipeline
+metadata:
+ name: podinfo-02
+ namespace: flux-system
+spec:
+ appRef:
+ apiVersion: helm.toolkit.fluxcd.io/v2beta1
+ kind: HelmRelease
+ name: podinfo
+ environments:
+ - name: dev
+ targets:
+ - namespace: podinfo-02-dev
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - name: test
+ targets:
+ - namespace: podinfo-02-qa
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - namespace: podinfo-02-perf
+ clusterRef:
+ kind: GitopsCluster
+ name: dev
+ namespace: flux-system
+ - name: prod
+ targets:
+ - namespace: podinfo-02-prod
+ clusterRef:
+ kind: GitopsCluster
+ name: prod
+ namespace: flux-system
+ promotion:
+ strategy:
+ pull-request:
+ type: github
+ url: https://github.com/my-org/my-app-repo
+ baseBranch: main
+ secretRef:
+ name: github-credentials
+
Specification¶
v1alpha1
of a Pipeline
resource is found next.Pipeline¶
// Pipeline is the Schema for the pipelines API
+type Pipeline struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PipelineSpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status PipelineStatus `json:"status,omitempty"`
+}
+
+type PipelineSpec struct {
+ // Environments is a list of environments to which the pipeline's application is supposed to be deployed.
+ // +required
+ Environments []Environment `json:"environments"`
+ // AppRef denotes the name and type of the application that's governed by the pipeline.
+ // +required
+ AppRef LocalAppReference `json:"appRef"`
+ // Promotion defines details about how promotions are carried out between the environments
+ // of this pipeline.
+ // +optional
+ Promotion *Promotion `json:"promotion,omitempty"`
+}
+
+type Environment struct {
+ // Name defines the name of this environment. This is commonly something such as "dev" or "prod".
+ // +required
+ Name string `json:"name"`
+ // Targets is a list of targets that are part of this environment. Each environment should have
+ // at least one target.
+ // +required
+ Targets []Target `json:"targets"`
+ // Promotion defines details about how the promotion is done on this environment.
+ // +optional
+ Promotion *Promotion `json:"promotion,omitempty"`
+}
+
+type Target struct {
+ // Namespace denotes the namespace of this target on the referenced cluster. This is where
+ // the app pointed to by the environment's `appRef` is searched.
+ // +required
+ Namespace string `json:"namespace"`
+ // ClusterRef points to the cluster that's targeted by this target. If this field is not set, then the target is assumed
+ // to point to a Namespace on the cluster that the Pipeline resources resides on (i.e. a local target).
+ // +optional
+ ClusterRef *CrossNamespaceClusterReference `json:"clusterRef,omitempty"`
+}
+
+// Promotion define promotion configuration for the pipeline.
+type Promotion struct {
+ // Manual option to allow promotion between to require manual approval before proceeding.
+ // +optional
+ Manual bool `json:"manual,omitempty"`
+ // Strategy defines which strategy the promotion should use.
+ Strategy Strategy `json:"strategy"`
+}
+
+// Strategy defines all the available promotion strategies. All of the fields in here are mutually exclusive, i.e. you can only select one
+// promotion strategy per Pipeline. Failure to do so will result in undefined behaviour.
+type Strategy struct {
+ // PullRequest defines a promotion through a Pull Request.
+ // +optional
+ PullRequest *PullRequestPromotion `json:"pull-request,omitempty"`
+ // Notification defines a promotion where an event is emitted through Flux's notification-controller each time an app is to be promoted.
+ // +optional
+ Notification *NotificationPromotion `json:"notification,omitempty"`
+ // SecrefRef reference the secret that contains a 'hmac-key' field with HMAC key used to authenticate webhook calls.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+}
+type GitProviderType string
+
+const (
+ Github GitProviderType = "github"
+ Gitlab GitProviderType = "gitlab"
+ BitBucketServer GitProviderType = "bitbucket-server"
+)
+
+type PullRequestPromotion struct {
+ // Indicates the git provider type to manage pull requests.
+ // +required
+ // +kubebuilder:validation:Enum=github;gitlab;bitbucket-server
+ Type GitProviderType `json:"type"`
+ // The git repository HTTPS URL used to patch the manifests for promotion.
+ // +required
+ URL string `json:"url"`
+ // The branch to checkout after cloning. Note: This is just the base
+ // branch that will eventually receive the PR changes upon merge and does
+ // not denote the branch used to create a PR from. The latter is generated
+ // automatically and cannot be provided.
+ // +required
+ BaseBranch string `json:"baseBranch"`
+ // SecretRef specifies the Secret containing authentication credentials for
+ // the git repository and for the Git provider API.
+ // For HTTPS repositories the Secret must contain 'username' and 'password'
+ // fields.
+ // For Git Provider API to manage pull requests, it must contain a 'token' field.
+ // +required
+ SecretRef meta.LocalObjectReference `json:"secretRef"`
+}
+
+type NotificationPromotion struct{}
+
References¶
// LocalAppReference is used together with a Target to find a single instance of an application on a certain cluster.
+type LocalAppReference struct {
+ // API version of the referent.
+ // +required
+ APIVersion string `json:"apiVersion"`
+
+ // Kind of the referent.
+ // +required
+ Kind string `json:"kind"`
+
+ // Name of the referent.
+ // +required
+ Name string `json:"name"`
+}
+
+// CrossNamespaceClusterReference contains enough information to let you locate the
+// typed Kubernetes resource object at cluster level.
+type CrossNamespaceClusterReference struct {
+ // API version of the referent.
+ // +optional
+ APIVersion string `json:"apiVersion,omitempty"`
+
+ // Kind of the referent.
+ // +required
+ Kind string `json:"kind"`
+
+ // Name of the referent.
+ // +required
+ Name string `json:"name"`
+
+ // Namespace of the referent, defaults to the namespace of the Kubernetes resource object that contains the reference.
+ // +optional
+ Namespace string `json:"namespace,omitempty"`
+}
+
Status¶
type PipelineStatus struct {
+ // ObservedGeneration is the last observed generation.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions holds the conditions for the Pipeline.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
Condition Reasons¶
// Reasons are provided as utility, and are not part of the declarative API.
+const (
+ // TargetClusterNotFoundReason signals a failure to locate a cluster resource on the management cluster.
+ TargetClusterNotFoundReason string = "TargetClusterNotFound"
+ // TargetClusterNotReadyReason signals that a cluster pointed to by a Pipeline is not ready.
+ TargetClusterNotReadyReason string = "TargetClusterNotReady"
+ // ReconciliationSucceededReason signals that a Pipeline has been successfully reconciled.
+ ReconciliationSucceededReason string = "ReconciliationSucceeded"
+)
+
Authorization ENTERPRISE¶
View Resources¶
policies
, policysconfigs
, and events
resource.policies-reader
role and developer-policies-reader
cluster role binding, to allow a group developer
to access all the policy-related resources.apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: policies-reader
+rules:
+ - apiGroups: ["pac.weave.works"]
+ resources: ["policies", "policyconfigs"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "watch", "list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: developer-policies-reader
+subjects:
+ - kind: Group
+ name: developer
+ apiGroup: rbac.authorization.k8s.io
+roleRef:
+ kind: ClusterRole
+ name: policies-reader
+ apiGroup: rbac.authorization.k8s.io
+
Commit/Build Time Checks ENTERPRISE¶
Overview¶
weaveworks/weave-iac-validator:v1.1
.Expand to view of the usage options
USAGE:
+ app [global options] command [command options] [arguments...]
+
+VERSION:
+ 0.0.1
+
+COMMANDS:
+ help, h Shows a list of commands or help for one command
+
+GLOBAL OPTIONS:
+ --path value path to scan resources from
+ --helm-values-file value path to resources helm values file
+ --policies-path value path to policies kustomization directory
+ --policies-helm-values-file value path to policies helm values file
+ --git-repo-provider value git repository provider
+ --git-repo-host value git repository host
+ --git-repo-url value git repository url
+ --git-repo-branch value git repository branch
+ --git-repo-sha value git repository commit sha
+ --git-repo-token value git repository toke
+ --azure-project value azure project name
+ --sast value save result as gitlab sast format
+ --sarif value save result as sarif format
+ --json value save result as json format
+ --generate-git-report generate git report if supported (default: false)
+ --remediate auto remediate resources if possible (default: false)
+ --no-exit-error exit with no error (default: false)
+ --help, -h show help (default: false)
+ --version, -v print the version (default: false)
+
Setup policies¶
└── policies
+ ├── kustomization.yaml
+ ├── minimum-replica-count.yaml
+ ├── privileged-mode.yaml
+ └── privilege-escalation.yaml
+
# kustomization.yaml
+ kind: Kustomization
+ apiVersion: kustomize.config.k8s.io/v1beta1
+ resources:
+ - minimum-replica-count.yaml
+ - privilege-escalation.yaml
+ - privileged-mode.yaml
+
Supported CI/CD¶
Auto-Remediation¶
--remediate
flag and --git-repo-token
.
UseCase: Github¶
UseCase: Gitlab¶
weave:
+ image:
+ name: weaveworks/weave-iac-validator:v1.1
+ script:
+ - weave-validator --path <path to resources> --policies-path <path to policies>
+
Enable Auto Remediation¶
script:
+ - weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $GITLAB_TOKEN --remediate
+
Enable Static Application Security Testing¶
stages:
+ - weave
+ - sast
+
+ weave:
+ stage: weave
+ image:
+ name: weaveworks/weave-iac-validator:v1.1
+ script:
+ - weave-validator <path to resources> --policies-path <path to policies> --sast sast.json
+ artifacts:
+ when: on_failure
+ paths:
+ - sast.json
+
+ upload_sast:
+ stage: sast
+ when: always
+ script:
+ - echo "creating sast report"
+ artifacts:
+ reports:
+ sast: sast.json
+
UseCase: Bitbucket¶
pipelines:
+ default:
+ - step:
+ name: 'Weaveworks'
+ image: weaveworks/weave-iac-validator:v1.1
+ script:
+ - weave-validator --path <path to resources> --policies-path <path to policies>
+
Enable Auto Remediation¶
script:
+ - weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $TOKEN --remediate
+
Create Pipeline Report¶
script:
+ - weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $TOKEN -generate-git-report
+
UseCase: CircleCI¶
jobs:
+ weave:
+ docker:
+ - image: weaveworks/weave-iac-validator:v1.1
+ steps:
+ - checkout
+ - run:
+ command: weave-validator --path <path to resources> --policies-path <path to policies>
+
Enable Auto Remediation¶
- run:
+ command: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token ${GITHUB_TOKEN} --remediate
+
UseCase: Azure DevOps¶
trigger:
+- <list of branches to trigger the pipeline on>
+
+pool:
+ vmImage: ubuntu-latest
+
+container:
+ image: weaveworks/weave-iac-validator:v1.1-azure
+
+steps:
+- script: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $(TOKEN)
+
Enable Auto Remediation¶
steps:
+- script: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $(TOKEN) --remediate
+
Getting Started ENTERPRISE¶
The Policy Ecosystem¶
Installation Pre-requisites¶
Weave GitOps¶
Policy Library¶
Install the Policy Agent¶
weave-policy-agent
from the profiles dropdown in the Create Cluster
page.values.yaml
to pull the policies from your repo into the cluster. This is done by configuring the policySource
section. If your policy library repo is private, you will also need to reference the Secret
that contains the repo credentials. This is usually the secret you created while bootstrapping Flux on the management cluster and is copied to your leaf cluster during creation.Expand to see an example that creates a new git source
policySource:
+enabled: true
+url: ssh://git@github.com/weaveworks/policy-library # This should be the url of the forked repo
+tag: v1.0.0
+path: ./ # Could be a path to the policies dir or a kustomization.yaml file
+secretRef: my-pat # the name of the secret containing the repo credentials
+
Expand to see an example that uses an existing git source
policySource:
+enabled: true
+sourceRef: # Specify the name for an existing GitSource reference
+ kind: GitRepository
+ name: policy-library
+ namespace: flux-system
+
Policies in UI¶
Prevent Violating Changes¶
Container Image Pull Policy
which is one of the enforced policies. This policy is violated when the container's imagePullPolicy
is not set to Always
.Expand for an example of a violating deployment
apiVersion: apps/v1
+kind: Deployment
+metadata:
+name: nginx-deployment
+labels:
+ app: nginx
+spec:
+replicas: 3
+selector:
+ matchLabels:
+ app: nginx
+template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.14.2
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 80
+
Violations Logs in UI¶
Violations Log
in Weave GitOps UI to view the policy violations of all the connected clusters, and dive into the details of each violation.
Introduction ENTERPRISE¶
Policy¶
Admission Controller¶
Audit¶
Commit/Build Time Checks¶
PolicyConfig ENTERPRISE¶
Goal¶
PolicyConfig
CRD allows us to do that without duplicating policies by overriding policy parameters of multiple policies for a specific target.Schema¶
match
used to specify the target of this PolicyConfig and 2) config
used to specify the policy parameters that will override the orginal policy parameters.Expand to see a PolicyConfig example
apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig # policy config resource kind
+metadata:
+name: my-config # policy config name
+spec:
+match: # matches (targets of the policy config)
+ workspaces: # add one or more name workspaces
+ - team-a
+ - team-b
+config: # config for policies [one or more]
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 3
+
match:
+ workspaces:
+ - team-a
+ - team-b
+
match:
+ namespaces:
+ - dev
+ - prod
+
match:
+ apps: # add one or more apps [HelmRelease, Kustomization]
+ - kind: HelmRelease
+ name: my-app # app name
+ namespace: flux-system # app namespace [if empty will match in any namespace]
+
match:
+ resources: # add one or more resources [Deployment, ReplicaSet, ..]
+ - kind: Deployment
+ name: my-deployment # resource name
+ namespace: default # resource namespace [if empty will match in any namespace]
+
config: # config for policies [one or more]
+ weave.policies.containers-minimum-replica-count: # the id of the policy
+ parameters:
+ replica_count: 3
+ owner: owner-4
+ weave.policies.containers-running-in-privileged-mode:
+ parameters:
+ privilege: true
+
Overlapping Targets¶
Example¶
app-a
and deployment deployment-1
which is part of this application.Expand to see manifests
apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: my-config-1
+spec:
+match:
+ namespaces:
+ - flux-system
+config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 2
+ owner: owner-1
+---
+apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: my-config-2
+spec:
+match:
+ apps:
+ - kind: Kustomization
+ name: app-a
+config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 3
+---
+apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: my-config-3
+spec:
+match:
+ apps:
+ - kind: Kustomization
+ name: app-a
+ namespace: flux-system
+config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 4
+---
+apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: my-config-4
+spec:
+match:
+ resources:
+ - kind: Deployment
+ name: deployment-1
+config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 5
+ owner: owner-4
+---
+
+apiVersion: pac.weave.works/v2beta2
+kind: PolicyConfig
+metadata:
+name: my-config-5
+spec:
+match:
+ resources:
+ - kind: Deployment
+ name: deployment-1
+ namespace: flux-system
+config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 6
+
app-a
will be affected by my-config-5
. It will be applied on the policies defined in it, which will affect deployment deployment-1
in namespace flux-system
as it matches the kind, name and namespace.deployment-1
in another namespace other than flux-system
won't be affected by this configuration config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 6 # from my-config-5
+ owner: owner-4 # from my-config-4
+
deployment-1
in namespace flux-system
, replica_count
must be >= 6
my-config-4
for owner
configuration parameter owner: owner-4
my-config-1
, my-config-2
, my-config-3
and my-config-4
my-config-4
will be applied on the policies defined in it which will affect deployment deployment-1
in all namespaces as it matches the kind and name only. config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 5 # from my-config-4
+ owner: owner-4 # from my-config-4
+
deployment-1
in all namespaces replica_count
must be >= 5
my-config-4
for owner
configuration parameter owner: owner-4
my-config-1
, my-config-2
and my-config-3
my-config-3
will be applied on the policies defined in it which will affect application app-a
and all the resources in it in namespace flux-system
as it matches the kind, name and namespace.app-a
in another namespace other than flux-system
won't be affected by this configuration config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 4 # from my-config-3
+ owner: owner-1 # from my-config-1
+
app-a
and all the resources in it in namespaces flux-system
, replica_count
must be >= 4
my-config-1
for owner
configuration parameter owner: owner-1
my-config-1
and my-config-2
my-config-2
will be applied on the policies defined in it which will affect application app-a
and all the resources in it in all namespaces as it matches the kind and name only. config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 3 # from my-config-2
+ owner: owner-1 # from my-config-1
+
app-a
and all the resources in all namespaces, replica_count
must be >= 3
my-config-1
for owner
configuration parameter owner: owner-1
my-config-1
my-config-1
will be applied on the policies defined in it. which will affect the namespace flux-system
with all applications and resources in it as it matches by namespace only. config:
+ weave.policies.containers-minimum-replica-count:
+ parameters:
+ replica_count: 2 # from my-config-1
+ owner: owner-1 # from my-config-1
+
flux-system
, replica_count
must be >= 2
my-config-1
for owner
configuration parameter owner: owner-1
PolicySet ENTERPRISE¶
admission
policysetsapiVersion: pac.weave.works/v2beta2
+kind: PolicySet
+metadata:
+ name: my-policy-set
+spec:
+ mode: admission
+ filters:
+ ids:
+ - weave.policies.containers-minimum-replica-count
+ categories:
+ - security
+ severities:
+ - high
+ - medium
+ standards:
+ - pci-dss
+ tags:
+ - tag-1
+
admission
, audit
, and tfAdmission
.Grouping Policies¶
Migration from v2beta1 to v2beta2¶
New fields¶
spec.mode
is added. PolicySets should be updated to set the modeExample of the agent configuration in versions older than v2.0.0¶
# config.yaml
+admission:
+ enabled: true
+ policySet: admission-policy-set
+ sinks:
+ filesystemSink:
+ fileName: admission.txt
+
Example of current PolicySet with mode field¶
apiVersion: pac.weave.works/v2beta2
+kind: PolicySet
+metadata:
+ name: admission-policy-set
+spec:
+ mode: admission
+ filters:
+ ids:
+ - weave.policies.containers-minimum-replica-count
+
Updated fields¶
spec.name
became optional.Deprecate fields¶
spec.id
is deprecated.
Policy ENTERPRISE¶
Policy CRD¶
Policy Library¶
Tenant Policy¶
tenancy
.Mutating Resources¶
v2.2.0
, the policy agent will support mutating resources.mutate
set to true
and the rego code should return the violating_key
and the recommended_value
in the violation response. The mutation webhook will use the violating_key
and recommended_value
to mutate the resource and return the new mutated resource.result = {
+ "issue_detected": true,
+ "msg": sprintf("Replica count must be greater than or equal to '%v'; found '%v'.", [min_replica_count, replicas]),
+ "violating_key": "spec.replicas",
+ "recommended_value": min_replica_count
+}
+
Policy Validation¶
id: string # identifier for the violation
+account_id: string # organization identifier
+cluster_id: string # cluster identifier
+policy: object # contains related policy data
+entity: object # contains related resource data
+status: string # Violation or Compliance
+message: string # message that summarizes the policy validation
+type: string # the mode that produced this object. one of: Admission, Audit, TFAdmission
+trigger: string # what triggered the validation, create request or initial audit,..
+created_at: string # time that the validation occurred in
+
Profile Releases ENTERPRISE¶
v0.6.5¶
Highlights¶
Dependency Versions¶
Policy Library Compatibility¶
v0.6.4¶
Highlights¶
Dependency Versions¶
Policy Library Compatibility¶
v0.6.3¶
Highlights¶
Dependency Versions¶
Policy Library Compatibility¶
v0.6.2¶
Highlights¶
Dependency Versions¶
Policy Library Compatibility¶
horizontalpodautoscalers
v0.6.1¶
Highlights¶
config.audit.interval
. It defaults to 24 hours.Dependency Versions¶
Policy Library Compatibility¶
v0.6.0¶
Highlights¶
Dependency Versions¶
Policy Library Compatibility¶
Policy Profile ENTERPRISE¶
Overview¶
policySource
to configure the source for deploying policies and policy-agent
to configure the policy agent.Expand for an example of the profile values file
policy-agent:
+failurePolicy: Ignore
+
+# If you don't want to use cert-manager, set useCertManager to false and provide your own certs
+useCertManager: true
+certificate: ""
+key: ""
+caCertificate: ""
+
+persistence:
+ enabled: false
+ # claimStorage: 1Gi
+ # sinkDir: /tmp
+ # storageClassName: standard
+
+config:
+ accountId: ""
+ clusterId: ""
+
+ audit:
+ # Enable audit functionality
+ enabled: false
+ # sinks:
+ # # Enable writing violations as K8s events
+ # k8sEventsSink:
+ # enabled: true
+
+ admission:
+ # Enable admission functionality
+ enabled: true
+ # mutate: true # enable mutating violating resources
+ sinks:
+ # Enable writing violations as K8s events
+ k8sEventsSink:
+ enabled: true
+
+
+policySource:
+enabled: false
+# url: ssh://git@github.com/weaveworks/policy-library
+# tag: v1.0.0
+# branch:
+# path: ./ # Could be a path to the policies dir or a kustomization.yaml file
+# secretRef: policy-library-auth # (Optional): Name of the K8s secret with private repo auth credentials
+# sourceRef: # Could specify a name for an existing GitSource reference instead of creating a new one
+# kind: GitRepository
+# name: policy-library
+# namespace: flux-system
+
Policy Sources¶
kustomize.toolkit.fluxcd.io.Kustomization
, that deploys the policies to the cluster.kustomize.config.k8s.io.Kustomization
file should be defined in the repository.apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources: # specifies the path to each required policy
+ - policies/ControllerContainerAllowingPrivilegeEscalation/policy.yaml
+ - policies/ControllerContainerRunningAsRoot/policy.yaml
+ - policies/ControllerReadOnlyFileSystem/policy.yaml
+
policySource:
+ enabled: true
+ url: URL of the repo where your policies exist
+ tag: tag name on the policies repo
+ path: Path to the policies dir - or a kustomization.yaml that selects some policies - in the repo
+ secretRef (if the repo is private): Name of the K8s secret with private repo credentials (leave empty if the repo is public)
+
policySource:
+ enabled: true
+ sourceRef:
+ kind: Kind of the existing source
+ name: Name of the policy library source
+ namespace: Namespace where the source exists
+
Policy Agent Configuration¶
config
section is the single entry point for configuring the agent.
accountId
: unique identifier that signifies the owner of that agentclusterId
: unique identifier for the cluster that the agent will run against
logLevel
: app log level (default: "info")probesListen
: address for the probes server to run on (default: ":9000")metricsAddress
: address the metric endpoint binds to (default: ":8080")Agent Modes¶
Admission¶
controller-runtime
Kubernetes package to register a callback that will be called when the agent receives an admission request. Once called, the agent will validate the received resource against the admission and tenant policies and k8s will use the result of this validation to either allow or reject the creation/update of said resource.
kubernetes
policy-agent:
+ config:
+ admission:
+ enabled: true
+
policy-agent:
+ useCertManager: true
+
policy-agent:
+ certificate: "---" # admission server certificate
+ key: "---" # admission server private key
+ caCertificate: "---" # CA bundle to validate the webhook server, used by the client
+
policy-agent:
+ failurePolicy: Ignore
+
Audit¶
kubernetes
policy-agent:
+ config:
+ audit:
+ enabled: true
+ interval: 24 # configuring the frequent of audit operations running in hours (default is 24 hours)
+
Terraform Admission¶
terraform
policy-agent:
+ config:
+ tfAdmission:
+ enabled: true
+
Policy Validation Sinks¶
policy-agent:
+ config:
+ audit:
+ writeCompliance: true
+
logs
directory, in the agent container as a json string. It is important to note that this file will not be persisted and will be deleted upon pod restart, so generally this approach is not recommended for a production environment.policy-agent:
+config:
+ audit:
+ sinks:
+ fileSystemSink:
+ fileName: "file.json"
+
policy-agent:
+config:
+ admission:
+ sinks:
+ fileSystemSink:
+ fileName: "file.json"
+
policy-agent:
+persistence:
+ enabled: false # specifies whether to use persistence or not
+ claimStorage: 1Gi # claim size
+ storageClassName: standard # k8s StorageClass name
+
policy-agent:
+config:
+ audit:
+ sinks:
+ k8sEventsSink:
+ enabled: true
+
policy-agent:
+config:
+ admission:
+ sinks:
+ k8sEventsSink:
+ enabled: true
+
policy-agent:
+config:
+ audit:
+ sinks:
+ fluxNotificationSink:
+ address: ""
+
policy-agent:
+config:
+ admission:
+ sinks:
+ fluxNotificationSink:
+ address: ""
+
policy-agent:
+config:
+ audit:
+ sinks:
+ elasticSink:
+ address: ""
+ username: ""
+ password: ""
+ indexName: ""
+ insertionMode: "upsert"
+
policy-agent:
+config:
+ admission:
+ sinks:
+ elasticSink:
+ address: ""
+ username: ""
+ password: ""
+ indexName: ""
+ insertionMode: "insert"
+
insert
: doesn't update or delete any old records. The index will contain a log for all validation objects and give an insight of all the historical data.upsert
: updates the old result of validating an entity against a policy that happened on the same day. So the index will only contain the latest validation results for a policy and entity combination per day.
Manual Approval for Progressive Delivery Deployments ENTERPRISE¶
Canary
objects and rollout progress.Prerequisites¶
Canary
object and target deploymentBasic Introduction to Webhooks and Gating¶
confirm-rollout
. - Before increasing traffic weight with confirm-traffic-increase
. - Before promoting a new version after successful canary analysis with confirm-promotion
.200 OK
status code is returned, and halt if 403 Forbidden
.CanaryWebhookPayload
:type CanaryWebhookPayload struct {
+ // Name of the canary
+ Name string `json:"name"`
+
+ // Namespace of the canary
+ Namespace string `json:"namespace"`
+
+ // Phase of the canary analysis
+ Phase CanaryPhase `json:"phase"`
+
+ // Metadata (key-value pairs) for this webhook
+ Metadata map[string]string `json:"metadata,omitempty"`
+}
+
Use Flagger's Load Tester to Manually Gate a Promotion¶
confirm-promotion
webhook. This will call a particular gate provided through Flagger's load tester, and is an easy way to experiment using Flagger's included components. Configure the
confirm-promotion
Webhook¶analysis
section: analysis:
+ webhooks:
+ - name: "ask for confirmation"
+ type: confirm-promotion
+ url: http://flagger-loadtester.test/gate/check
+
Deploy a New Version of Your Application¶
Wait for the Canary Analysis to Complete¶
confirm-promotion
webhook and change status to WaitingPromotion
:Open the Gate¶
$ kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh
+
+# to open
+> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/open
+
> curl -d '{"name": "app","namespace":"test"}' http://localhost:8080/gate/close
+
Progressive Delivery Using Flagger ENTERPRISE¶
gitops-canaries-reader
includes the minimum permissions necessary for a user to be able to view canary object details, metric template object details and canary related events. Prerequisites¶
autoscaling/v2
or autoscaling/v2beta2
API to be installed on your cluster. You can use kubectl api-resources
to check which API versions are supported.Installing Linkerd Using Flux¶
secretGenerator
.linkerd install
command. However, when using a Helm chart to install Linkerd, you must provide these certificates deliberately. The step
CLI, listed above, allows us to generate these certificates.step certificate create root.linkerd.cluster.local ca.crt ca.key \
+--profile root-ca --no-password --insecure
+
step certificate create identity.linkerd.cluster.local issuer.crt issuer.key \
+--profile intermediate-ca --not-after 8760h --no-password --insecure \
+--ca ca.crt --ca-key ca.key
+
ca.crt
, issuer.crt
, and issuer.key
files to the cluster repository under a linkerd
directory../linkerd
directory: - A Namespace
resource to control where the components are installed - A HelmRepository
resource to make the Linkerd Helm repo available on the cluster - A HelmRelease
resource to install the latest version of Linkerd from the HelmRepository
Expand to see and copy-paste the three Linkerd manifests to add
---
+apiVersion: v1
+kind: Namespace
+metadata:
+name: linkerd
+labels:
+ config.linkerd.io/admission-webhooks: disabled
+
---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: linkerd
+spec:
+interval: 1h
+url: https://helm.linkerd.io/stable
+
spec.values.identity.issuer.crtExpiry
field below depends on the parameter value used during the creation of the issuer certificate. In this example, it should be set to one year from the certificate creation.---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: linkerd
+spec:
+interval: 10m
+chart:
+ spec:
+ chart: linkerd2
+ reconcileStrategy: ChartVersion
+ sourceRef:
+ kind: HelmRepository
+ name: linkerd
+install:
+ crds: Create
+upgrade:
+ crds: CreateReplace
+valuesFrom:
+ - kind: Secret
+ name: linkerd-certs
+ valuesKey: ca.crt
+ targetPath: identityTrustAnchorsPEM
+ - kind: Secret
+ name: linkerd-certs
+ valuesKey: issuer.crt
+ targetPath: identity.issuer.tls.crtPEM
+ - kind: Secret
+ name: linkerd-certs
+ valuesKey: issuer.key
+ targetPath: identity.issuer.tls.keyPEM
+values:
+ installNamespace: false
+ identity:
+ issuer:
+ crtExpiry: "2023-07-18T20:00:00Z" # Change this to match generated certificate expiry date
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: linkerd-viz
+spec:
+interval: 10m
+dependsOn:
+ - name: linkerd
+chart:
+ spec:
+ chart: linkerd-viz
+ reconcileStrategy: ChartVersion
+ sourceRef:
+ kind: HelmRepository
+ name: linkerd
+
Secrets
that are referenced in HelmRelease
manifests. The second file is a Kustomization
that references all the other linkerd
resource files.Expand to see the Linkerd Kustomization manifests
nameReference:
+- kind: Secret
+ version: v1
+ fieldSpecs:
+ - path: spec/valuesFrom/name
+ kind: HelmRelease
+
---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namespace: linkerd
+configurations:
+- kustomizeconfig.yaml
+resources:
+- namespace.yaml
+- source.yaml
+- releases.yaml
+secretGenerator:
+- name: linkerd-certs
+ files:
+ - ca.crt
+ - issuer.crt
+ - issuer.key
+
secretGenerator
generates Secrets from the files you've just created.linkerd
directory in your cluster repository should look like this:> tree linkerd
+linkerd
+├── ca.crt
+├── issuer.crt
+├── issuer.key
+├── kustomization.yaml
+├── kustomizeconfig.yaml
+├── namespace.yaml
+├── releases.yaml
+└── source.yaml
+
> kubectl get pods -n linkerd
+NAME READY STATUS RESTARTS AGE
+linkerd-destination-66d5668b-4mw49 4/4 Running 0 10m
+linkerd-identity-6b4658c74b-6nc97 2/2 Running 0 10m
+linkerd-proxy-injector-6b76789cb4-8vqj4 2/2 Running 0 10m
+
+> kubectl get pods -n linkerd-viz
+NAME READY STATUS RESTARTS AGE
+grafana-db56d7cb4-xlnn4 2/2 Running 0 10m
+metrics-api-595c7b564-724ps 2/2 Running 0 10m
+prometheus-5d4dffff55-8fscd 2/2 Running 0 10m
+tap-6dcb89d487-5ns8n 2/2 Running 0 10m
+tap-injector-54895654bb-9xn7k 2/2 Running 0 10m
+web-6b6f65dbc7-wltdg 2/2 Running 0 10m
+
Installing Flagger Using Flux¶
flagger
directory. Make sure to locate it under a repository path that Flux reconciles. Namespace
resource to control where the components are installed - A HelmRepository
resource to make the Flagger Helm repo available on the cluster - A HelmRelease
resource to install the latest version of Flagger and the load tester app (which generates synthetic traffic during the analysis phase), from that HelmRepository
Expand to see the three Flagger resource manifests
---
+apiVersion: v1
+kind: Namespace
+metadata:
+name: flagger
+
---
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+name: flagger
+spec:
+interval: 1h
+url: https://flagger.app
+
---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: flagger
+spec:
+releaseName: flagger
+install:
+ crds: Create
+upgrade:
+ crds: CreateReplace
+interval: 10m
+chart:
+ spec:
+ chart: flagger
+ reconcileStrategy: ChartVersion
+ sourceRef:
+ kind: HelmRepository
+ name: flagger
+values:
+ metricsServer: http://prometheus.linkerd-viz:9090
+ meshProvider: linkerd
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+name: loadtester
+spec:
+interval: 10m
+chart:
+ spec:
+ chart: loadtester
+ reconcileStrategy: ChartVersion
+ sourceRef:
+ kind: HelmRepository
+ name: flagger
+
Expand to see the Flagger Kustomization manifest
---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namespace: flagger
+resources:
+- namespace.yaml
+- source.yaml
+- releases.yaml
+
flagger
directory in the cluster repository should look like this:> tree flagger
+flagger
+├── kustomization.yaml
+├── namespace.yaml
+├── releases.yaml
+└── source.yaml
+
> kubectl get pods -n flagger
+NAME READY STATUS RESTARTS AGE
+flagger-7d456d4fc7-knf2g 1/1 Running 0 4m
+loadtester-855b4d77f6-scl6r 1/1 Running 0 4m
+
Custom Resources Generated by Flagger¶
spec.provider
to be set in each canary resource.
Provider API Group Resource AppMesh appmesh.k8s.aws virtualnode appmesh.k8s.aws virtualrouter appmesh.k8s.aws virtualservice Linkerd split.smi-spec.io trafficsplit Istio networking.istio.io destinationrule networking.istio.io virtualservice Contour projectcontour.io httpproxy Gloo gateway.solo.io routetable gloo.solo.io upstream Nginx networking.k8s.io ingress Skipper networking.k8s.io ingress Traefik traefik.containo.us traefikservice Open Service Mesh split.smi-spec.io trafficsplit Kuma kuma.io trafficroute GatewayAPI gateway.networking.k8s.io httproute gitops-canaries-reader
has been extended to allow the user for viewing TrafficSplit resources when Linkerd is used:Expand to see example canary reader RBAC
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+name: gitops-canaries-reader
+rules:
+- apiGroups:
+- flagger.app
+resources:
+- canaries
+- metrictemplates
+verbs:
+- get
+- list
+- apiGroups:
+- ""
+resources:
+- events
+verbs:
+- get
+- watch
+- list
+# Additional permissions for Linkerd resources are added below
+- apiGroups:
+- split.smi-spec.io
+resources:
+- trafficsplits
+verbs:
+- get
+- list
+
Setting up Remote Cluster Permissions¶
wego-admin
user will be able to view canary information from within the Weave GitOps Enterprise UI on the management cluster:Expand to see example of remote cluster canary reader
apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+name: user-groups-impersonator
+rules:
+- apiGroups: [""]
+ resources: ["users", "groups"]
+ verbs: ["impersonate"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+name: impersonate-user-groups
+subjects:
+- kind: ServiceAccount
+ name: remote-cluster-01 # Service account created in remote cluster
+ namespace: default
+roleRef:
+kind: ClusterRole
+name: user-groups-impersonator
+apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+name: canary-reader
+rules:
+- apiGroups: [""]
+ resources: [ "events", "services" ]
+ verbs: [ "get", "list", "watch" ]
+- apiGroups: [ "apps" ]
+ resources: [ "*" ]
+ verbs: [ "get", "list" ]
+- apiGroups: [ "autoscaling" ]
+ resources: [ "*" ]
+ verbs: [ "get", "list" ]
+- apiGroups: [ "flagger.app" ]
+ resources: [ "canaries", "metrictemplates"]
+ verbs: [ "get", "list", "watch" ]
+- apiGroups: [ "helm.toolkit.fluxcd.io" ]
+ resources: [ "helmreleases" ]
+ verbs: [ "get", "list" ]
+- apiGroups: [ "kustomize.toolkit.fluxcd.io" ]
+ resources: [ "kustomizations" ]
+ verbs: [ "get", "list" ]
+- apiGroups: [ "source.toolkit.fluxcd.io" ]
+ resources: [ "buckets", "helmcharts", "gitrepositories", "helmrepositories", "ocirepositories" ]
+ verbs: [ "get", "list" ]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+name: read-canaries
+subjects:
+- kind: User
+name: wego-admin # User logged in management cluster, impersonated via service account
+apiGroup: rbac.authorization.k8s.io
+roleRef:
+kind: ClusterRole
+name: canary-reader
+apiGroup: rbac.authorization.k8s.io
+
read-canaries
ClusterRoleBinding to ensure additional users can view canary information from within the Weave GitOps Enterprise UI.Deploy a Canary Release¶
test
directory and add these three canary resource manifests under it: - A Namespace
resource to control where the components are installed - A Deployment
and HorizontalPodAutoscaler
for the podinfo
application - A Canary
resource which references the Deployment
and HorizontalPodAutoscaler
resourcesExpand to see the three canary resource manifests
---
+apiVersion: v1
+kind: Namespace
+metadata:
+name: test
+annotations:
+ linkerd.io/inject: enabled
+
---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+name: podinfo
+labels:
+ app: podinfo
+spec:
+minReadySeconds: 5
+revisionHistoryLimit: 5
+progressDeadlineSeconds: 60
+strategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+selector:
+ matchLabels:
+ app: podinfo
+template:
+ metadata:
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "9797"
+ labels:
+ app: podinfo
+ spec:
+ containers:
+ - name: podinfod
+ image: ghcr.io/stefanprodan/podinfo:6.1.8
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: http
+ containerPort: 9898
+ protocol: TCP
+ - name: http-metrics
+ containerPort: 9797
+ protocol: TCP
+ - name: grpc
+ containerPort: 9999
+ protocol: TCP
+ command:
+ - ./podinfo
+ - --port=9898
+ - --port-metrics=9797
+ - --grpc-port=9999
+ - --grpc-service-name=podinfo
+ - --level=info
+ - --random-delay=false
+ - --random-error=false
+ env:
+ - name: PODINFO_UI_COLOR
+ value: "#34577c"
+ livenessProbe:
+ exec:
+ command:
+ - podcli
+ - check
+ - http
+ - localhost:9898/healthz
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ readinessProbe:
+ exec:
+ command:
+ - podcli
+ - check
+ - http
+ - localhost:9898/readyz
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 64Mi
+
+---
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+name: podinfo
+spec:
+scaleTargetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: podinfo
+minReplicas: 2
+maxReplicas: 4
+metrics:
+ - type: Resource
+ resource:
+ name: cpu
+ target:
+ type: Utilization
+ # scale up if usage is above
+ # 99% of the requested CPU (100m)
+ averageUtilization: 99
+
---
+apiVersion: flagger.app/v1beta1
+kind: Canary
+metadata:
+name: podinfo
+spec:
+# deployment reference
+targetRef:
+ apiVersion: apps/v1
+ kind: Deployment
+ name: podinfo
+# HPA reference (optional)
+autoscalerRef:
+ apiVersion: autoscaling/v2beta2
+ kind: HorizontalPodAutoscaler
+ name: podinfo
+# the maximum time in seconds for the canary deployment
+# to make progress before it is rollback (default 600s)
+progressDeadlineSeconds: 60
+service:
+ # ClusterIP port number
+ port: 9898
+ # container port number or name (optional)
+ targetPort: 9898
+analysis:
+ # schedule interval (default 60s)
+ interval: 30s
+ # max number of failed metric checks before rollback
+ threshold: 5
+ # max traffic percentage routed to canary
+ # percentage (0-100)
+ maxWeight: 50
+ # canary increment step
+ # percentage (0-100)
+ stepWeight: 5
+ # Linkerd Prometheus checks
+ metrics:
+ - name: request-success-rate
+ # minimum req success rate (non 5xx responses)
+ # percentage (0-100)
+ thresholdRange:
+ min: 99
+ interval: 1m
+ - name: request-duration
+ # maximum req duration P99
+ # milliseconds
+ thresholdRange:
+ max: 500
+ interval: 30s
+ # testing (optional)
+ webhooks:
+ - name: acceptance-test
+ type: pre-rollout
+ url: http://loadtester.flagger/
+ timeout: 30s
+ metadata:
+ type: bash
+ cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
+ - name: load-test
+ type: rollout
+ url: http://loadtester.flagger/
+ metadata:
+ cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
+
test
namespace:Expand to see the Canary Kustomization manifest
---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namespace: test
+resources:
+- namespace.yaml
+- deployment.yaml
+- canary.yaml
+
test
directory in the cluster repository should look like this:> tree test
+test
+├── canary.yaml
+├── deployment.yaml
+├── kustomization.yaml
+└── namespace.yaml
+
Initialized
:> kubectl get canary podinfo -n test
+NAME STATUS WEIGHT LASTTRANSITIONTIME
+podinfo Initialized 0 2022-07-22T12:37:58Z
+
podinfo
:> kubectl set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1 -n test
+
> kubectl get canary podinfo -n test
+NAME STATUS WEIGHT LASTTRANSITIONTIME
+podinfo Progressing 5 2022-07-22T12:41:57Z
+
Succeeded
:> kubectl get canary podinfo -n test
+NAME STATUS WEIGHT LASTTRANSITIONTIME
+podinfo Succeeded 0 2022-07-22T12:47:58Z
+
Summary¶
Gitops
gitops¶
Synopsis¶
Examples¶
# Get help for gitops create dashboard command
+ gitops create dashboard -h
+ gitops help create dashboard
+
+ # Get the version of gitops along with commit, branch, and flux version
+ gitops version
+
+ To learn more, you can find our documentation at https://docs.gitops.weave.works/
+
Options¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ -h, --help help for gitops
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops check
gitops check¶
gitops check [flags]
+
Examples¶
# Validate flux and kubernetes compatibility
+gitops check
+
Options¶
-h, --help help for check
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops completion
gitops completion¶
Synopsis¶
Options¶
-h, --help help for completion
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops completion bash
gitops completion bash¶
Synopsis¶
1
source <(gitops completion bash)
+
Linux:¶
1
gitops completion bash > /etc/bash_completion.d/gitops
+
macOS:¶
1
gitops completion bash > $(brew --prefix)/etc/bash_completion.d/gitops
+
gitops completion bash
+
Options¶
-h, --help help for bash
+ --no-descriptions disable completion descriptions
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops completion fish
gitops completion fish¶
Synopsis¶
1
gitops completion fish | source
+
1
gitops completion fish > ~/.config/fish/completions/gitops.fish
+
gitops completion fish [flags]
+
Options¶
-h, --help help for fish
+ --no-descriptions disable completion descriptions
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops completion powershell
gitops completion powershell¶
Synopsis¶
1
gitops completion powershell | Out-String | Invoke-Expression
+
gitops completion powershell [flags]
+
Options¶
-h, --help help for powershell
+ --no-descriptions disable completion descriptions
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops completion zsh
gitops completion zsh¶
Synopsis¶
1
echo "autoload -U compinit; compinit" >> ~/.zshrc
+
1
source <(gitops completion zsh)
+
Linux:¶
1
gitops completion zsh > "${fpath[1]}/_gitops"
+
macOS:¶
1
gitops completion zsh > $(brew --prefix)/share/zsh/site-functions/_gitops
+
gitops completion zsh [flags]
+
Options¶
-h, --help help for zsh
+ --no-descriptions disable completion descriptions
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops create
gitops create¶
Examples¶
# Create a HelmRepository and HelmRelease to deploy Weave GitOps
+gitops create dashboard ww-gitops \
+ --password=$PASSWORD \
+ --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml
+
+# Create a Terraform object
+gitops create terraform my-resource \
+ -n my-namespace \
+ --source GitRepository/my-project \
+ --path ./terraform \
+ --interval 1m \
+ --export > ./clusters/my-cluster/infra/terraform-my-resource.yaml
+
Options¶
--export Export in YAML format to stdout.
+ -h, --help help for create
+ --timeout duration The timeout for operations during resource creation. (default 3m0s)
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops create dashboard
gitops create dashboard¶
Synopsis¶
gitops create dashboard [flags]
+
Examples¶
# Create a HelmRepository and HelmRelease to deploy Weave GitOps
+gitops create dashboard ww-gitops \
+ --password=$PASSWORD \
+ --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for dashboard
+ --password string The password of the dashboard admin user.
+ --username string The username of the dashboard admin user. (default "admin")
+ --values strings Local path to values.yaml files for HelmRelease, also accepts comma-separated values.
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --export Export in YAML format to stdout.
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ --timeout duration The timeout for operations during resource creation. (default 3m0s)
+
SEE ALSO¶
Gitops create terraform
gitops create terraform¶
Synopsis¶
gitops create terraform [flags]
+
Examples¶
# Create a Terraform resource in the default namespace
+gitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m
+
+# Create and export a Terraform resource manifest to the standard output
+gitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m --export
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+ --interval string Interval at which the Terraform configuration should be applied
+ --path string Path to the Terraform configuration
+ --source string Source of the Terraform configuration
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --export Export in YAML format to stdout.
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ --timeout duration The timeout for operations during resource creation. (default 3m0s)
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops delete
gitops delete¶
Options¶
-h, --help help for delete
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops delete terraform
gitops delete terraform¶
gitops delete terraform [flags]
+
Examples¶
# Delete a Terraform resource in the default namespace
+gitops delete terraform -n default my-resource
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops get
gitops get¶
Examples¶
# Get the CLI configuration for Weave GitOps
+gitops get config
+
+# Generate a hashed secret
+PASSWORD="<your password>"
+echo -n $PASSWORD | gitops get bcrypt-hash
+
Options¶
-h, --help help for get
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops get bcrypt hash
gitops get bcrypt-hash¶
gitops get bcrypt-hash [flags]
+
Examples¶
PASSWORD="<your password>"
+echo -n $PASSWORD | gitops get bcrypt-hash
+
Options¶
-h, --help help for bcrypt-hash
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops get config
gitops get config¶
gitops get config [flags]
+
Examples¶
# Prints out the CLI configuration for Weave GitOps
+gitops get config
+
Options¶
-h, --help help for config
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Gitops logs
gitops logs¶
Options¶
-h, --help help for logs
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops logs terraform
gitops logs terraform¶
gitops logs terraform [flags]
+
Examples¶
# Get the runner logs of a Terraform object in the "flux-system" namespace
+gitops logs terraform --namespace flux-system my-resource
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops replan
gitops replan¶
Examples¶
# Replan the Terraform plan of a Terraform object from the "flux-system" namespace
+gitops replan terraform --namespace flux-system my-resource
+
Options¶
-h, --help help for replan
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops replan terraform
gitops replan terraform¶
gitops replan terraform [flags]
+
Examples¶
# Replan the Terraform plan of a Terraform object from the "flux-system" namespace
+gitops replan terraform --namespace flux-system my-resource
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops resume
gitops resume¶
Examples¶
# Suspend a Terraform object from the "flux-system" namespace
+gitops resume terraform --namespace flux-system my-resource
+
Options¶
-h, --help help for resume
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops resume terraform
gitops resume terraform¶
gitops resume terraform [flags]
+
Examples¶
# Resume a Terraform object in the "flux-system" namespace
+gitops resume terraform --namespace flux-system my-resource
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops set
gitops set¶
Examples¶
# Enables analytics in the current user's CLI configuration for Weave GitOps
+gitops set config analytics true
+
Options¶
-h, --help help for set
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops set config
gitops set config¶
gitops set config [flags]
+
Examples¶
# Enables analytics in the current user's CLI configuration for Weave GitOps
+gitops set config analytics true
+
Options¶
-h, --help help for config
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Gitops suspend
gitops suspend¶
Examples¶
# Suspend a Terraform object in the "flux-system" namespace
+gitops resume terraform --namespace flux-system my-resource
+
Options¶
-h, --help help for suspend
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops suspend terraform
gitops suspend terraform¶
gitops suspend terraform [flags]
+
Examples¶
# Suspend a Terraform object in the "flux-system" namespace
+gitops suspend terraform --namespace flux-system my-resource
+
Options¶
--context string The name of the kubeconfig context to use
+ --disable-compression If true, opt-out of response compression for all requests to the server
+ -h, --help help for terraform
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Gitops version
gitops version¶
gitops version [flags]
+
Options¶
-h, --help help for version
+
Options inherited from parent commands¶
-e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable
+ --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure
+ --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.
+ -n, --namespace string The namespace scope for this operation (default "flux-system")
+ -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable
+ -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable
+
SEE ALSO¶
Auto generated by spf13/cobra on 9-Nov-2023¶
Helm chart reference¶
Values¶
Key Type Default Description additionalArgs list []
Additional arguments to pass in to the gitops-server adminUser.create bool false
Whether the local admin user should be created. If you use this make sure you add it to rbac.impersonationResourceNames
. adminUser.createClusterRole bool true
Specifies whether the clusterRole & binding to the admin user should be created. Will be created only if adminUser.create
is enabled. Without this, the adminUser will only be able to see resources in the target namespace. adminUser.createSecret bool true
Whether we should create the secret for the local adminUser. Will be created only if adminUser.create
is enabled. Without this, we'll still set up the roles and permissions, but the secret with username and password has to be provided separately. adminUser.passwordHash string nil
Set the password for local admin user. Requires adminUser.create
and adminUser.createSecret
This needs to have been hashed using bcrypt. You can do this via our CLI with gitops get bcrypt-hash
. adminUser.username string "gitops-test-user"
Set username for local admin user, this should match the value in the secret cluster-user-auth
which can be created with adminUser.createSecret
. Requires adminUser.create
. affinity object {}
annotations object {}
Annotations to add to the deployment envVars[0].name string "WEAVE_GITOPS_FEATURE_TENANCY"
envVars[0].value string "true"
envVars[1].name string "WEAVE_GITOPS_FEATURE_CLUSTER"
envVars[1].value string "false"
extraVolumeMounts list []
extraVolumes list []
fullnameOverride string ""
image.pullPolicy string "IfNotPresent"
image.repository string "ghcr.io/weaveworks/wego-app"
image.tag string "v0.36.0"
imagePullSecrets list []
ingress.annotations object {}
ingress.className string ""
ingress.enabled bool false
ingress.hosts string nil
ingress.tls list []
logLevel string "info"
What log level to output. Valid levels are 'debug', 'info', 'warn' and 'error' metrics.enabled bool false
Start the metrics exporter metrics.service.annotations object {"prometheus.io/path":"/metrics","prometheus.io/port":"{{ .Values.metrics.service.port }}","prometheus.io/scrape":"true"}
Annotations to set on the service metrics.service.port int 2112
Port to start the metrics exporter on nameOverride string ""
networkPolicy.create bool true
Specifies whether default network policies should be created. nodeSelector object {}
oidcSecret.create bool false
podAnnotations object {}
podLabels object {}
podSecurityContext object {}
rbac.additionalRules list []
If non-empty, these additional rules will be appended to the RBAC role and the cluster role. for example, additionalRules: - apiGroups: ["infra.contrib.fluxcd.io"] resources: ["terraforms"] verbs: [ "get", "list", "patch" ] rbac.create bool true
Specifies whether the clusterRole & binding to the service account should be created rbac.impersonationResourceNames list []
If non-empty, this limits the resources that the service account can impersonate. This applies to both users and groups, e.g. ['user1@corporation.com', 'user2@corporation.com', 'operations']
rbac.impersonationResources list ["users","groups"]
Limit the type of principal that can be impersonated rbac.viewSecretsResourceNames list ["cluster-user-auth","oidc-auth"]
If non-empty, this limits the secrets that can be accessed by the service account to the specified ones, e.g. ['weave-gitops-enterprise-credentials']
replicaCount int 1
resources object {}
securityContext.allowPrivilegeEscalation bool false
securityContext.capabilities.drop[0] string "ALL"
securityContext.readOnlyRootFilesystem bool true
securityContext.runAsNonRoot bool true
securityContext.runAsUser int 1000
securityContext.seccompProfile.type string "RuntimeDefault"
serverTLS.enable bool false
Enable TLS termination in gitops itself. If you enable this, you need to create a secret, and specify the secretName. Another option is to create an ingress. serverTLS.secretName string "my-secret-tls"
Specify the tls secret name. This type of secrets have a key called tls.crt
and tls.key
containing their corresponding values in base64 format. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for more details and examples service.annotations object {}
service.create bool true
service.port int 9001
service.type string "ClusterIP"
serviceAccount.annotations object {}
Annotations to add to the service account serviceAccount.create bool true
Specifies whether a service account should be created serviceAccount.name string ""
The name of the service account to use. If not set and create is true, a name is generated using the fullname template tolerations list []
"},{"location":"#project-layout","title":"Project layout","text":"mkdocs new [dir-name]
- Create a new project.mkdocs serve
- Start the live-reloading docs server.mkdocs build
- Build the documentation site.mkdocs -h
- Print help message and exit.userdocs
mkdocs.yml # The configuration file. docs/ index.md # The documentation homepage. ... # Other markdown pages, images and other files.@weaveworksoss/backstage-plugin-flux
Backstage plugin provides a set of components that you can add to your existing Backstage app to display the state of Flux resources.# From your Backstage root directory\nyarn add --cwd packages/app @weaveworksoss/backstage-plugin-flux\n
EntityFluxHelmReleasesCard
to your Entity home page for components with the backstage.io/kubernetes-id
entity annotation.import {\n EntityFluxHelmReleasesCard,\n} from '@weaveworksoss/backstage-plugin-flux';\nimport { isKubernetesAvailable } from '@backstage/plugin-kubernetes';\n\nconst overviewContent = (\n <Grid item md={6}>\n <EntityAboutCard variant=\"gridItem\" />\n </Grid>\n\n <EntitySwitch>\n <EntitySwitch.Case if={isKubernetesAvailable}>\n <EntityFluxHelmReleasesCard />\n </EntitySwitch.Case>\n </EntitySwitch>\n);\n
apiVersion: backstage.io/v1alpha1\nkind: Component\nmetadata:\nname: catalogue-service\ndescription: A microservices-demo service that provides catalogue/product information\nannotations:\nbackstage.io/kubernetes-id: podinfo\n
HelmReleases
that have the correct label:
"},{"location":"backstage/#building-a-custom-page-with-resources","title":"Building a Custom Page with Resources","text":"apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: podinfo\nnamespace: podinfo\n# The label here is matched to the Backstage Entity annotation\nlabels:\nbackstage.io/kubernetes-id: podinfo\nspec:\ninterval: 5m\nchart:\nspec:\nchart: podinfo\nversion: '6.3.6'\nsourceRef:\nkind: HelmRepository\nname: podinfo\nnamespace: podinfo\n
/kustomizations
to your Entity for components with the backstage.io/kubernetes-id
entity annotation:
"},{"location":"backstage/#connecting-to-weave-gitops","title":"Connecting to Weave GitOps","text":"import {\n EntityFluxGitRepositoriesCard,\n EntityFluxKustomizationsCard,\n} from '@weaveworksoss/backstage-plugin-flux';\nimport { isKubernetesAvailable } from '@backstage/plugin-kubernetes';\n\nconst serviceEntityPage = (\n // insert in the page where you need it\n\n <EntityLayout.Route path=\"/kustomizations\" title=\"Kustomizations\" if={isKubernetesAvailable}>\n <Grid container spacing={3} alignItems=\"stretch\">\n <Grid item md={12}>\n <EntityFluxKustomizationsCard />\n </Grid>\n <Grid item md={12}>\n <EntityFluxGitRepositoriesCard />\n </Grid>\n </Grid>\n </EntityLayout.Route>\n);\n
app:\ntitle: Backstage Example App\nbaseUrl: http://localhost:3000\n...\ngitops:\n# Set this to be the root of your Weave GitOps application\nbaseUrl: https://example.com\n
"},{"location":"feedback-and-telemetry/#anonymous-aggregate-user-behavior-analytics","title":"Anonymous Aggregate User Behavior Analytics","text":"
gitops get bcrypt-hash
--password
, but not the value)
app=cli
, to know it\u2019s a CLI metric
"},{"location":"feedback-and-telemetry/#when-is-the-data-collected-and-where-is-it-sent","title":"When is the data collected and where is it sent?","text":"
kube-system
namespace uuid
WEAVE_GITOPS_FEATURE_TELEMETRY
from the envVars
value.
"},{"location":"help-and-support/#other","title":"Other","text":"
"},{"location":"intro-weave-gitops/","title":"Introducing Weave GitOps","text":"
monitoring
:---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\nvalues:\nmonitoring:\nenabled: true # enable it if you want to expose a monitoring server\nservice:\nname: monitoring\nport: 8080 # port to expose the monitoring server\nmetrics:\nenabled: true # enable it to expose a prometheus metrics endpoint in `/metrics`\nprofiling:\nenabled: false # enable it to expose a pprof debug endpoint `/debug/pprof`\n
Expand to see manifest contents apiVersion: source.toolkit.fluxcd.io/v1\nkind: GitRepository\nmetadata:\nname: weave-gitops-quickstart\nnamespace: flux-system\nspec:\ninterval: 10m0s\nref:\nbranch: main\nurl: https://github.com/weaveworks/weave-gitops-quickstart\n---\napiVersion: v1\nkind: Namespace\nmetadata:\nname: monitoring\n---\napiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\nname: kube-prometheus-stack\nnamespace: flux-system\nspec:\ninterval: 10m0s\nsourceRef:\nkind: GitRepository\nname: weave-gitops-quickstart\npath: ./monitoring/kube-prometheus-stack\nprune: true\ntargetNamespace: monitoring\nwait: true\n
Expand to see manifest contents apiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\nname: monitoring-config\nnamespace: flux-system\nspec:\ninterval: 10m0s\nsourceRef:\nkind: GitRepository\nname: weave-gitops-quickstart\npath: ./monitoring/weave-gitops\ndependsOn:\n- name: kube-prometheus-stack\nprune: true\ntargetNamespace: monitoring\n
"},{"location":"monitoring/#dashboards","title":"Dashboards","text":"flux
or weave-gitops
.
/debug/pprof
path where the pprof web interface is exposed.pprof
.
"},{"location":"security/#handling","title":"Handling","text":"
"},{"location":"security/#disclosures","title":"Disclosures","text":"
templates.weave.works/create-request
annotation (in the case of editing or deleting of resources) metadata:\nannotations:\ntemplates.weave.works/create-request: \"{...\\\"parameter_values\\\":{...\\\"url\\\":\\\"https://github.com/weave-example-org/weave-demo\\\"}\"\n
the first repository found with a weave.works/repo-role: default
annotation
metadata:\nannotations:\nweave.works/repo-role: default\n
the flux-system repository
metadata:\nname: flux-system\nnamespace: flux-system\n
the first repository in the list of Git repositories that the user has access to.
In the case of deletion and editing, if the resource repository is found amongst the Git repositories that the user has access to, it will be preselected and the selection will be disabled. If it is not found, you can choose a new repository.
In the case of tenants, we recommend adding the weave.works/repo-role: default
to an appropriate Git repository.
The system will try and automatically calculate the correct HTTPS API endpoint to create a pull request against. For example, if the Git repository URL is ssh://git@github.com/org/repo.git
, the system will try and convert it to https://github.com/org/repo.git
.
However, it is not always possible to accurately derive this URL. An override can be specified to set the correct URL instead. For example, the SSH URL may be ssh://git@interal-ssh-server:2222/org/repo.git
and the correct HTTPS URL may be https://gitlab.example.com/org/repo.git
.
In this case, we set the override via the weave.works/repo-https-url
annotation on the GitRepository
object:
apiVersion: source.toolkit.fluxcd.io/v1beta1\nkind: GitRepository\nmetadata:\nname: repo\nnamespace: flux-system\nannotations:\n// highlight-start\nweave.works/repo-https-url: https://gitlab.example.com/org/repo.git\n// highlight-end\nspec:\ninterval: 1m\nurl: ssh://git@interal-ssh-server:2222/org/repo.git\n
The pull request will then be created against the correct HTTPS API.
The above also applies to application creation.
"},{"location":"cluster-management/deploying-capa-eks/","title":"Deploying CAPA with EKS ENTERPRISE","text":"Weave GitOps Enterprise can leverage Cluster API providers to enable leaf cluster creation. Cluster API provides declarative APIs, controllers, and tooling to manage the lifecycle of Kubernetes clusters across a large number of infrastructure providers. Cluster API custom resource definitions (CRDs) are platform-independent as each provider implementation handles the creation of virtual machines, VPCs, networks, and other required infrastructure parts\u2014enabling consistent and repeatable cluster deployments.
As an AWS advanced technology partner, Weaveworks has been working tirelessly to ensure that deploying EKS anywhere is smooth and removes the barriers to application modernization.
"},{"location":"cluster-management/deploying-capa-eks/#prerequisites","title":"Prerequisites","text":"You'll need to install the following software before continuing with these instructions:
github cli
>= 2.3.0 (source)kubectl
(source)eksctl
(source)aws cli
(source)clusterctl
>= v1.1.3 (source); follow these steps to initialise the cluster and enable feature gatesclusterawsadm
>= v1.1.0, following Cluster API's instructionsAWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
with either aws configure
or by exporting it in the current shell.GITHUB_TOKEN
as an environment variable in the current shell. It should have permissions to create Pull Requests against the cluster config repo.Some Cluster API providers allow you to choose the account or identity that the new cluster will be created with. This is often referred to as Multi-tenancy in the CAPI world. Weave GitOps currently supports:
When a cluster is provisioned, by default it will reconcile all the manifests in ./clusters/<cluster-namespace>/<cluster-name>
and ./clusters/bases
.
To display Applications and Sources in the UI we need to give the logged in user permissions to inspect the new cluster.
Adding common RBAC rules to ./clusters/bases/rbac
is an easy way to configure this!
import WegoAdmin from \"!!raw-loader!./assets/rbac/wego-admin.yaml\";
curl -o clusters/bases/rbac/wego-admin.yaml https://docs.gitops.weave.works/assets/files/wego-admin-c80945c1acf9908fe6e61139ef65c62e.yaml\n
Expand to see full template yaml clusters/bases/rbac/wego-admin.yaml\n
<CodeBlock title=\"clusters/bases/rbac/wego-admin.yaml\" className=\"language-yaml\"
{WegoAdmin}
"},{"location":"cluster-management/deploying-capa-eks/#2-build-a-kubernetes-platform-with-built-in-components-preconfigured-for-your-organization","title":"2. Build a Kubernetes Platform with Built-in Components Preconfigured for Your Organization","text":"To do this, go to Weaveworks' Profiles Catalog.
See CAPI Templates page for more details on this topic. Once we load a template we can use it in the UI to create clusters!
import CapaTemplate from \"!!raw-loader!./assets/templates/capa-template.yaml\";
Download the template below to your config repository path, then commit and push to your Git origin.
curl -o clusters/management/capi/templates/capa-template.yaml https://docs.gitops.weave.works/assets/files/capa-template-49001fbae51e2a9f365b80caebd6f341.yaml\n
clusters/management/apps/capi/templates/capa-template.yaml {% include '/assets/templates/capa-template.yaml' %}\n
<CodeBlock title=\"clusters/management/apps/capi/templates/capa-template.yaml\" className=\"language-yaml\"
{CapaTemplate}
"},{"location":"cluster-management/deploying-capa-eks/#3-add-a-cluster-bootstrap-config","title":"3. Add a Cluster Bootstrap Config","text":"This step ensures that Flux gets installed into your cluster.\u00a0Create a cluster bootstrap config as follows:
kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN\n
import CapiGitopsCDC from \"!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml\";
Download the config with:
curl -o clusters/management/capi/bootstrap/capi-gitops-cluster-bootstrap-config.yaml https://docs.gitops.weave.works/assets/files/capi-gitops-cluster-bootstrap-config-d9934a1e6872a5b7ee5559d2d97a3d83.yaml\n
Then update the GITOPS_REPO
variable to point to your cluster
\n
<CodeBlock title=\"clusters/management/capi/boostrap/capi-gitops-cluster-bootstrap-config.yaml\" className=\"language-yaml\"
{CapiGitopsCDC}
"},{"location":"cluster-management/deploying-capa-eks/#4-delete-a-cluster-with-the-weave-gitops-enterprise-ui","title":"4. Delete a Cluster with the Weave GitOps Enterprise UI","text":"Here are the steps:
Create a PR to delete clusters
buttonRemove clusters
buttonNote that you can't apply an empty repository to a cluster. If you have Cluster API clusters and other manifests committed to this repository, and then delete all of them so there are zero manifests left, then the apply will fail and the resources will not be removed from the cluster. A workaround is to add a dummy ConfigMap back to the Git repository after deleting everything else so that there is at least one manifest to apply.
"},{"location":"cluster-management/deploying-capa-eks/#5-disable-capi-support","title":"5. Disable CAPI Support","text":"If you do not need CAPI-based cluster management support, you can disable CAPI via the Helm Chart values.
Update your Weave GitOps Enterprise HelmRelease
object with the global.capiEnabled
value set to false
:
---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\nspec:\ninterval: 60m\nsecretRef:\nname: weave-gitops-enterprise-credentials\nurl: https://charts.dev.wkp.weave.works/releases/charts-v3\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\nchart:\nspec:\ninterval: 65m\nchart: mccp\nsourceRef:\nkind: HelmRepository\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\nversion: 0.12.0\ninstall:\ncrds: CreateReplace\nupgrade:\ncrds: CreateReplace\ninterval: 50m\nvalues:\nglobal:\ncapiEnabled: false\n
And that's it!
"},{"location":"cluster-management/managing-clusters-without-capi/","title":"Managing Clusters Without Cluster API","text":"import CodeBlock from \"@theme/CodeBlock\"; import BrowserOnly from \"@docusaurus/BrowserOnly\";
"},{"location":"cluster-management/managing-clusters-without-capi/#managing-clusters-without-cluster-api-enterprise","title":"Managing Clusters Without Cluster API ENTERPRISE","text":"You do not need Cluster API to add your Kubernetes cluster to Weave GitOps Enterprise. The only thing you need is a secret containing a valid kubeconfig
.
Here's how to create a kubeconfig secret.
apiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: demo-01\nnamespace: default\n
---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: impersonate-user-groups\nsubjects:\n- kind: ServiceAccount\nname: wgesa\nnamespace: default\nroleRef:\nkind: ClusterRole\nname: user-groups-impersonator\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: user-groups-impersonator\nrules:\n- apiGroups: [\"\"]\nresources: [\"users\", \"groups\"]\nverbs: [\"impersonate\"]\n- apiGroups: [\"\"]\nresources: [\"namespaces\"]\nverbs: [\"get\", \"list\"]\n
This will allow WGE to introspect the cluster for available namespaces.
Once we know what namespaces are available we can test whether the logged in user can access them via impersonation.
kubectl get secrets --field-selector type=kubernetes.io/service-account-token\n NAME TYPE DATA AGE\n default-token-lsjz4 kubernetes.io/service-account-token 3 13d\n demo-01-token-gqz7p kubernetes.io/service-account-token 3 99m\n
(demo-01-token-gqz7p
is the secret that holds the token for demo-01
service account.)
Then, run the following command to get the service account token:
TOKEN=$(kubectl get secret demo-01-token-gqz7p -o jsonpath={.data.token} | base64 -d)\n
static-kubeconfig.sh
: #!/bin/bash\nif [[ -z \"$CLUSTER_NAME\" ]]; then\necho \"Ensure CLUSTER_NAME has been set\"\nexit 1\nfi\nif [[ -z \"$CA_CERTIFICATE\" ]]; then\necho \"Ensure CA_CERTIFICATE has been set to the path of the CA certificate\"\nexit 1\nfi\nif [[ -z \"$ENDPOINT\" ]]; then\necho \"Ensure ENDPOINT has been set\"\nexit 1\nfi\nif [[ -z \"$TOKEN\" ]]; then\necho \"Ensure TOKEN has been set\"\nexit 1\nfi\nexport CLUSTER_CA_CERTIFICATE=$(cat \"$CA_CERTIFICATE\" | base64)\nenvsubst <<EOF\n apiVersion: v1\n kind: Config\n clusters:\n - name: $CLUSTER_NAME\n cluster:\n server: https://$ENDPOINT\n certificate-authority-data: $CLUSTER_CA_CERTIFICATE\n users:\n - name: $CLUSTER_NAME\n user:\n token: $TOKEN\n contexts:\n - name: $CLUSTER_NAME\n context:\n cluster: $CLUSTER_NAME\n user: $CLUSTER_NAME\n current-context: $CLUSTER_NAME\n EOF\n
Obtain the cluster certificate (CA). How you do this depends on your cluster.
AKS: Visit the Azure user docs for more information.
You'll need to copy the contents of the certificate into the ca.crt
file used below.
CLUSTER_NAME=demo-01 \\\nCA_CERTIFICATE=ca.crt \\\nENDPOINT=<control-plane-ip-address> \\\nTOKEN=<token> ./static-kubeconfig.sh > demo-01-kubeconfig\n
Update the following fields:
CLUSTER_NAME: insert the name of your cluster\u2014i.e., demo-01
34.218.72.31
TOKEN: add the token of the service account retrieved in the previous step
Finally, create a secret for the generated kubeconfig in the WGE management cluster:
kubectl create secret generic demo-01-kubeconfig \\\n--from-file=value=./demo-01-kubeconfig\n
"},{"location":"cluster-management/managing-clusters-without-capi/#adding-kubeconfig-to-your-management-cluster","title":"Adding kubeconfig to Your Management Cluster","text":"If you already have a kubeconfig
stored in a secret in your management cluster, continue with the \"Create a GitopsCluster
\" step below.
If you have a kubeconfig, but it is not yet stored in your management cluster, load it into the cluster using this command:
kubectl create secret generic demo-01-kubeconfig \\\n--from-file=value=./demo-01-kubeconfig\n
"},{"location":"cluster-management/managing-clusters-without-capi/#add-a-cluster-bootstrap-config","title":"Add a Cluster Bootstrap Config","text":"This step ensures that Flux gets installed into your cluster.\u00a0Create a cluster bootstrap config as follows:
kubectl create secret generic my-pat --from-literal GITHUB_TOKEN=$GITHUB_TOKEN\n
import CapiGitopsCDC from \"!!raw-loader!./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml\";
Download the config with:
{() => ( curl -o clusters/management/capi/bootstrap/capi-gitops-cluster-bootstrap-config.yaml{\" \"} {window.location.protocol} //{window.location.host} { require(\"./assets/bootstrap/capi-gitops-cluster-bootstrap-config.yaml\") .default } )}
Then update the GITHUB_USER
variable to point to your repository
<CodeBlock title=\"clusters/management/capi/boostrap/capi-gitops-cluster-bootstrap-config.yaml\" className=\"language-yaml\"
{CapiGitopsCDC}
"},{"location":"cluster-management/managing-clusters-without-capi/#connect-a-cluster","title":"Connect a Cluster","text":"To connect your cluster, you need to add some common RBAC rules into the clusters/bases
folder. When a cluster is provisioned, by default it will reconcile all the manifests in ./clusters/<cluster-namespace>/<cluster-name>
and ./clusters/bases
.
To display Applications and Sources in the UI, we need to give the logged-in user the permission to inspect the new cluster. Adding common RBAC rules to ./clusters/bases/rbac
is an easy way to configure this.
import WegoAdmin from \"!!raw-loader!./assets/rbac/wego-admin.yaml\";
{() => ( curl -o clusters/bases/rbac/wego-admin.yaml {window.location.protocol}// {window.location.host} {require(\"./assets/rbac/wego-admin.yaml\").default} )}
Expand to see full template yaml<CodeBlock title=\"clusters/bases/rbac/wego-admin.yaml\" className=\"language-yaml\"
{WegoAdmin}
"},{"location":"cluster-management/managing-clusters-without-capi/#create-a-gitopscluster","title":"Create aGitopsCluster
","text":"When a GitopsCluster
appears in the cluster, the Cluster Bootstrap Controller will install Flux on it and by default start reconciling the ./clusters/demo-01
path in your management cluster's Git repository:
apiVersion: gitops.weave.works/v1alpha1\nkind: GitopsCluster\nmetadata:\nname: demo-01\nnamespace: default\n# Signals that this cluster should be bootstrapped.\nlabels:\nweave.works/capi: bootstrap\nspec:\nsecretRef:\nname: demo-01-kubeconfig\n
To use the Weave GitOps Enterprise user interface (UI) to inspect the Applications and Sources running on the new cluster, you'll need permissions. We took care of this above when we stored your RBAC rules in ./clusters/bases
. In the following step, we'll create a kustomization to add these common resources onto our new cluster:
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\ncreationTimestamp: null\nname: clusters-bases-kustomization\nnamespace: flux-system\nspec:\ninterval: 10m0s\npath: clusters/bases\nprune: true\nsourceRef:\nkind: GitRepository\nname: flux-system\n
Save these two files in your Git repository, then commit and push.
Once Flux has reconciled the cluster, you can inspect your Flux resources via the UI!
"},{"location":"cluster-management/managing-clusters-without-capi/#debugging-tip-checking-that-your-kubeconfig-secret-is-in-your-cluster","title":"Debugging Tip: Checking that Your kubeconfig Secret Is in Your Cluster","text":"To test that your kubeconfig secret is correctly set up, apply the following manifest and check the logs after the job completes:
Expand to see manifest ---\napiVersion: batch/v1\nkind: Job\nmetadata:\nname: kubectl\nspec:\nttlSecondsAfterFinished: 30\ntemplate:\nspec:\ncontainers:\n- name: kubectl\nimage: bitnami/kubectl\nargs:\n[\n\"get\",\n\"pods\",\n\"-n\",\n\"kube-system\",\n\"--kubeconfig\",\n\"/etc/kubeconfig/value\",\n]\nvolumeMounts:\n- name: kubeconfig\nmountPath: \"/etc/kubeconfig\"\nreadOnly: true\nrestartPolicy: Never\nvolumes:\n- name: kubeconfig\nsecret:\nsecretName: demo-01-kubeconfig\noptional: false\n
In the manifest above, demo-01-kubeconfig
is the name of the secret that contains the kubeconfig for the remote cluster.
Other documentation that you might find useful:
BEFORE YOU START
The following instructions require you to make minor changes to the content of your own hosted Helm repository.
To put it simply, Profiles are Helm charts. To create a Profile, you need to add an annotation to a Helm chart.
A very simple Helm chart marked up as a Profile looks like this:
name: demo-profile\nversion: 0.0.1\nannotations:\nweave.works/profile: \"A Demo Profile\"\n
The chart can use either subcharts or dependencies to include other charts. These other charts do not need the annotation, and they will not show up as Profiles."},{"location":"cluster-management/profiles/#mark-a-helmrepository-as-containing-profiles","title":"Mark a HelmRepository as Containing Profiles","text":"Alternatively, you can annotate a Flux HelmRepository
apiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: podinfo\nnamespace: default\nannotations:\nweave.works/profiles: \"true\" # this identifies all charts as profiles\nspec:\ninterval: 5m0s\nurl: https://stefanprodan.github.io/podinfo\n
This will ensure that all charts in the HelmRepository
are identified as Profiles.
Profile layers are a mechanism for loosely defining dependencies between Profiles.
To add a layer to a Profile chart:
name: demo-profile\nversion: 0.0.1\nannotations:\nweave.works/profile: \"A Demo Profile\"\nweave.works/layer: \"demo\"\n
When multiple Profiles are specified in an API call, with layers in the API request then the set of layers is sorted, reversed, and configured as dependencies using Flux's dependsOn mechanism.
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 layer-3 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u25ba layer-2 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u25ba layer-1 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n dependsOn dependsOn\n
The scope of the dependsOn
calculation is limited to the set of Profiles in the API call.
If only one chart is being installed, no dependsOn
is configured.
If several charts are installed in the same layer, then the preceeding layer charts will be configured to depend on all the charts in the succeeding layer.
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2502 layer-3 \u251c\u2500\u2500\u2500\u2500\u2500\u25ba layer-2 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u25ba layer-1 \u2502\n\u2502 \u2502 \u2502 \u2502 \u2502 \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 \u2514\u2500\u25b2\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n dependsOn \u2502 dependsOn \u2502\n \u2502 \u2502\n \u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u2502\n \u2502 \u2502 \u2502 \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u25ba layer-2 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n \u2502 \u2502\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n dependsOn\n
If a chart with no layer specified is installed with a chart that has a layer specified, the service will configure the dependsOn
for the chart without a layer to depend on the chart with layer."},{"location":"cluster-management/profiles/#optional-use-a-helm-chart-from-a-remote-publicprivate-repository","title":"(Optional) Use a Helm Chart from a Remote Public/Private Repository","text":"You can add your Profiles to a remote repository that can be referenced using a HelmRepository resource. The repository can be either public or private. Using a private repo requires a few extra steps.
In this example, a public repo and branch is referenced directly where the Helm releases are: HelmRepository.yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1\nkind: HelmRepository\nmetadata:\nname: weaveworks-charts\nnamespace: flux-system\nspec:\ninterval: 1m\nurl: https://weaveworks.github.io/weave-gitops-profile-examples/\n
To use private repositories with restricted access, you can use a secret synced to the target leaf cluster. SecretSync references the secret as spec.secretRef
. The labels of your target leaf cluster are added for the syncer to match clusters against those labels using spec.clusterSelector.matchLabels
.
apiVersion: capi.weave.works/v1alpha1\nkind: SecretSync\nmetadata:\nname: my-dev-secret-syncer\nnamespace: flux-system\nspec:\nclusterSelector:\nmatchLabels:\nweave.works/capi: bootstrap\nsecretRef:\nname: weave-gitops-enterprise-credentials\ntargetNamespace: flux-system\n
Once the SecretSync and Secret are available, the secret can be directly referenced in the HelmRepository object:
PrivateHelmRepository.yamlapiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: weaveworks-charts\nnamespace: flux-system\nspec:\ninterval: 60m\nsecretRef:\nname: weave-gitops-enterprise-credentials\nurl: https://charts.dev.wkp.weave.works/releases/charts-v3\n
Note: The HelmRepoSecret
, SecretSync
, and the GitopsCluster
should all be in the same namespace.
WGE inspects the namespace in the management cluster where it is deployed, and looks for a HelmRepository
object named weaveworks-charts
. This Kubernetes object should point to a Helm chart repository that includes the Profiles available for installation.
When creating a cluster from the UI using a CAPI template, these Profiles are available for selection in the Profiles
section of the template. For example:
As shown above, some Profiles are optional, while others are required. This is determined when the template is authored and allows for operations teams to control which Helm packages should be installed on new clusters by default.
To enable editing of the yaml values for required Profiles, add the editable
flag in the annotation and describe the required Profile in the template. For example:
apiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: connect-a-cluster-with-policies\nnamespace: default\nannotations:\ncapi.weave.works/profile-0: '{\"name\": \"weave-policy-agent\", \"editable\": true, \"version\": \"0.2.8\", \"values\": \"accountId: weaveworks\\nclusterId: ${CLUSTER_NAME}\" }'\n
"},{"location":"cluster-management/advanced-cluster-management-topics/how-to-inject-credentials-into-template/","title":"How to Inject Credentials Into Your Template ENTERPRISE","text":"Weave GitOps templates describe the properties of your cluster\u2014how many nodes, what version of Kubernetes, etc. The identity refers to which account will be used to create the cluster. When you render a template, you may want to set the credentials to be used for this cluster\u2014for example, if the cost is allocated to a specific team.
The rendered resource can be automatically configured with the selected credentials.
Credentials are injected into the following resources: * AWSCluster, AWSManagedControlPlane * AzureCluster, AzureManagedCluster * VSphereCluster
If no credentials are selected, no changes will be applied, and the credentials used by your CAPI controller will be used as the default.
In our cluster we have the template:
apiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: capa-cluster-template\nspec:\nresourcetemplates:\n- contents:\n- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4\nkind: AWSCluster\nmetadata:\nname: \"${CLUSTER_NAME}\"\nspec:\nregion: \"${AWS_REGION}\"\n
and the identity
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3\nkind: AWSClusterStaticIdentity\nmetadata:\nname: \"test-account\"\nspec:\nsecretRef:\nname: test-account-creds\nnamespace: capa-system\nallowedNamespaces:\nselector:\nmatchLabels:\ncluster.x-k8s.io/ns: \"testlabel\"\n
We can select Weave GitOps to use the test-account
when creating the cluster by using the Infrastructure provider credentials dropdown on the Create new cluster with template page:
The resulting definition will have the identity injected into the appropriate place in the template, for this example:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4\nkind: AWSCluster\nmetadata:\nname: example-cluster\nspec:\nregion: eu-north-1\nidentityRef:\nkind: AWSClusterStaticIdentity\nname: test-account\n
"},{"location":"cluster-management/advanced-cluster-management-topics/how-to-inject-credentials-into-template/#identityrefs","title":"identityRef
s","text":"The supported providers implement multi-tenancy by setting an identityRef
on the the provider cluster object, e.g. AWSCluster
, AzureCluster
or VSphereCluster
.
Weave GitOps will search all namespaces in the cluster for potential identities that can be used to create a cluster. The following identity kind
s are currently supported and their corresponding Cluster kinds:
AWSClusterStaticIdentity
: AWSCluster
AWSClusterRoleIdentity
: AWSCluster
AzureClusterIdentity
: AzureCluster
VSphereClusterIdentity
: VSphereCluster
Ready for more GitOps?
To purchase an entitlement to Weave GitOps Enterprise, please contact sales@weave.works.
Weave GitOps Enterprise provides ops teams with an easy way to assess the health of multiple clusters in a single place. It shows cluster information such as Kubernetes version and number of nodes and provides details about the GitOps operations on those clusters, such as Git repositories and recent commits. Additionally, it aggregates Prometheus alerts to assist with troubleshooting.
If you have already purchased your entitlement, head to the installation page.
"},{"location":"enterprise/#feature-breakdown","title":"Feature Breakdown","text":"In addition to the features in the OSS edition, Weave GitOps Enterprise offers the following capabilities, taking your delivery from simple Continuous Delivery to Internal Developer Platform:
"},{"location":"enterprise/#cluster-fleet-management","title":"Cluster Fleet Management","text":"Weave GitOps Enterprise (WGE) simplifies cluster lifecycle management at scale\u2014even massive scale. Through pull requests, which make every action recorded and auditable, WGE makes it possible for teams to create, update, and delete clusters across entire fleets. WGE further simplifies the process by providing both a user interface (UI) and a command line interface (CLI) for teams to interact with and manage clusters on-prem, across clouds, and in hybrid environments. WGE works with Terraform, Crossplane, and any Cluster API provider.
"},{"location":"enterprise/#trusted-application-delivery","title":"Trusted Application Delivery","text":"Add policy as code to GitOps pipelines and enforce security and compliance, application resilience and coding standards from source to production. Validate policy conformance at every step in the software delivery pipeline: commit, build, deploy and run time.
"},{"location":"enterprise/#progressive-delivery","title":"Progressive Delivery","text":"Deploy into production environments safely using canary, blue/green deployment, and A/B strategies. Simple, single-file configuration defines success rollback. Measure Service Level Objectives (SLOs) using observability metrics from Prometheus, Datadog, New Relic, and others.
"},{"location":"enterprise/#cd-pipelines","title":"CD Pipelines","text":"Rollout new software from development to production. Environment rollouts that work with your existing CI system.
"},{"location":"enterprise/#team-workspaces","title":"Team Workspaces","text":"Allow DevOps teams to work seamlessly together with multi-tenancy, total RBAC control, and policy enforcement, with integration to enterprise IAM.
"},{"location":"enterprise/#self-service-templates-and-profiles","title":"Self-Service Templates and Profiles","text":"Component profiles enable teams to deploy standard services quickly, consistently and reliably. Teams can curate the profiles that are available within their estate ensuring there is consistency everywhere. Using GitOps it's easy to guarantee the latest, secure versions of any component are deployed in all production systems.
"},{"location":"enterprise/#health-status-and-compliance-dashboards","title":"Health Status and Compliance Dashboards","text":"Gain a single view of the health and state of the cluster and its workloads. Monitor deployments and alert on policy violations across apps and clusters.
"},{"location":"enterprise/#kubernetes-anywhere","title":"Kubernetes Anywhere","text":"Reduce complexity with GitOps and install across all major target environments including support for on-premise, edge, hybrid, and multi-cloud Kubernetes clusters.
"},{"location":"enterprise/#critical-247-support","title":"Critical 24/7 Support","text":"Your business and workloads operate around the clock, and so do we. Whenever you have a problem, our experts are there to help. We\u2019ve got your back!
"},{"location":"enterprise/install-enterprise-airgap/","title":"Install Enterprise in Air-gapped Environments ENTERPRISE","text":"From wikipedia
An air gap, air wall, air gapping or disconnected network is a network security measure employed on one or more computers to ensure that a secure computer network is physically isolated from unsecured networks, such as the public Internet or an unsecured local area network...
This document guides on how to install Weave GitOps Enterprise (WGE) in a restricted environment.
"},{"location":"enterprise/install-enterprise-airgap/#before-you-start","title":"Before You Start","text":"There are multiple restrictions that could happen within an air-gapped environment. This guide assumes that you have egress network restrictions. In order to install WGE, the required artifacts must be loaded from a private registry. This guide helps you with the task to identity the Helm charts and container images required to install WGE and to load them into your private registry.
It also assumes that you could prepare the installation from a proxy host. A proxy host is defined here as a computer that is able to access to both the public and private network. It could take different shapes, for example, it could be a bastion host, a corp laptop, etc.
Access to both public and private network is required during the airgap installation but not simultaneously. It is expected to have an online stage to gather the artifacts first, and an offline stage later, to load the artifacts in the private network.
Finally, we aim to provide an end to end example to use it as a guidance more than a recipe. Feel free to adapt the details that do not fit within your context.
"},{"location":"enterprise/install-enterprise-airgap/#install-wge","title":"Install WGE","text":"There are different variations of the following stages and conditions. We consider that installing WGE in an air-gapped environment could follow the following stages.
The main goal of this stage is to recreate a local WGE within your context, to collect the container images and Helm charts, that will be required in your private registry for the offline installation.
A three-step setup is followed.
There are many possible configurations for this host. This guide will assume that the host has installed the following:
Create a kind cluster with registry following this guide
"},{"location":"enterprise/install-enterprise-airgap/#install-flux","title":"Install Flux","text":"You could just use flux install
to install Flux into your kind cluster
We are going to install ChartMuseum via Flux.
Remember to also install helm plugin cm-push.
Expand to see installation yaml---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: chartmuseum\nnamespace: flux-system\nspec:\ninterval: 10m\nurl: https://chartmuseum.github.io/charts\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: chartmuseum\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: chartmuseum\nsourceRef:\nkind: HelmRepository\nname: chartmuseum\nnamespace: flux-system\ninterval: 10m0s\ntimeout: 10m0s\nreleaseName: helm-repo\ninstall:\ncrds: CreateReplace\nremediation:\nretries: 3\nvalues:\nenv:\nopen:\nDISABLE_API: \"false\"\nAUTH_ANONYMOUS_GET: \"true\"\n
Set up access from your host.
#expose kubernetes svc\nkubectl -n flux-system port-forward svc/helm-repo-chartmuseum 8080:8080 &\n\n#add hostname\nsudo -- sh -c \"echo 127.0.0.1 helm-repo-chartmuseum >> /etc/hosts\"\n
Test that you could reach it. #add repo to helm\nhelm repo add private http://helm-repo-chartmuseum:8080\n\n#test that works\nhelm repo update private\n
At this stage you have already a private registry for container images and helm charts.
"},{"location":"enterprise/install-enterprise-airgap/#install-wge_1","title":"Install WGE","text":"This step is to gather the artifacts and images in your local environment to push to the private registry.
"},{"location":"enterprise/install-enterprise-airgap/#cluster-api","title":"Cluster API","text":"This would vary depending on the provider, given that we target a offline environment, most likely we are in a private cloud environment, so we will be using liquidmetal.
Export these environment variables to configure your CAPI experience. Adjust them to your context.
export CAPI_BASE_PATH=/tmp/capi\nexport CERT_MANAGER_VERSION=v1.9.1\nexport CAPI_VERSION=v1.3.0\nexport CAPMVM_VERSION=v0.7.0\nexport EXP_CLUSTER_RESOURCE_SET=true\nexport CONTROL_PLANE_MACHINE_COUNT=1\nexport WORKER_MACHINE_COUNT=1\nexport CONTROL_PLANE_VIP=\"192.168.100.9\"\nexport HOST_ENDPOINT=\"192.168.1.130:9090\"\n
Execute the following script to generate clusterctl
config file.
cat << EOF > clusterctl.yaml\ncert-manager:\n url: \"$CAPI_BASE_PATH/cert-manager/$CERT_MANAGER_VERSION/cert-manager.yaml\"\n\nproviders:\n - name: \"microvm\"\n url: \"$CAPI_BASE_PATH/infrastructure-microvm/$CAPMVM_VERSION/infrastructure-components.yaml\"\n type: \"InfrastructureProvider\"\n - name: \"cluster-api\"\n url: \"$CAPI_BASE_PATH/cluster-api/$CAPI_VERSION/core-components.yaml\"\n type: \"CoreProvider\"\n - name: \"kubeadm\"\n url: \"$CAPI_BASE_PATH/bootstrap-kubeadm/$CAPI_VERSION/bootstrap-components.yaml\"\n type: \"BootstrapProvider\"\n - name: \"kubeadm\"\n url: \"$CAPI_BASE_PATH/control-plane-kubeadm/$CAPI_VERSION/control-plane-components.yaml\"\n type: \"ControlPlaneProvider\"\nEOF\n
Execute make
using the following makefile to intialise CAPI in your cluster: Expand to see Makefile contents .PHONY := capi\n\ncapi: capi-init capi-cluster\n\ncapi-init: cert-manager cluster-api bootstrap-kubeadm control-plane-kubeadm microvm clusterctl-init\n\ncert-manager:\nmkdir -p $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION)\ncurl -L https://github.com/cert-manager/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml --output $(CAPI_BASE_PATH)/cert-manager/$(CERT_MANAGER_VERSION)/cert-manager.yaml\n\ncluster-api:\nmkdir -p $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)\ncurl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/core-components.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/core-components.yaml\n curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/cluster-api/$(CAPI_VERSION)/metadata.yaml\n\nbootstrap-kubeadm:\nmkdir -p $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)\ncurl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/bootstrap-components.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/bootstrap-components.yaml\n curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/bootstrap-kubeadm/$(CAPI_VERSION)/metadata.yaml\n\ncontrol-plane-kubeadm:\nmkdir -p $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)\ncurl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/control-plane-components.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/control-plane-components.yaml\n curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/$(CAPI_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/control-plane-kubeadm/$(CAPI_VERSION)/metadata.yaml\n\nmicrovm:\nmkdir -p $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)\ncurl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/infrastructure-components.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/infrastructure-components.yaml\n curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/cluster-template-cilium.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/cluster-template-cilium.yaml\n curl -L https://github.com/weaveworks-liquidmetal/cluster-api-provider-microvm/releases/download/$(CAPMVM_VERSION)/metadata.yaml --output $(CAPI_BASE_PATH)/infrastructure-microvm/$(CAPMVM_VERSION)/metadata.yaml\n\nclusterctl-init:\nclusterctl init --wait-providers -v 4 --config clusterctl.yaml --infrastructure microvm\n\ncapi-cluster:\nclusterctl generate cluster --config clusterctl.yaml -i microvm:$(CAPMVM_VERSION) -f cilium lm-demo | kubectl apply -f -\n
"},{"location":"enterprise/install-enterprise-airgap/#deploying-the-terraform-controller","title":"Deploying the Terraform Controller","text":"Apply the following example manifest to deploy the Terraform Controller:
Expand to see file contentsapiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: tf-controller\nnamespace: flux-system\nspec:\ninterval: 10m\nurl: https://weaveworks.github.io/tf-controller/\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: tf-controller\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: tf-controller\nversion: \"0.9.2\"\nsourceRef:\nkind: HelmRepository\nname: tf-controller\nnamespace: flux-system\ninterval: 10m0s\ninstall:\ncrds: CreateReplace\nremediation:\nretries: 3\nupgrade:\ncrds: CreateReplace\n
"},{"location":"enterprise/install-enterprise-airgap/#wge","title":"WGE","text":"Update the following manifest to your context.
Expand to see file contents---\napiVersion: v1\ndata:\ndeploy-key: <changeme>\nentitlement: <changeme>\npassword: <changeme>\nusername: <changeme>\nkind: Secret\nmetadata:\nlabels:\nkustomize.toolkit.fluxcd.io/name: shared-secrets\nkustomize.toolkit.fluxcd.io/namespace: flux-system\nname: weave-gitops-enterprise-credentials\nnamespace: flux-system\ntype: Opaque\n---\napiVersion: v1\ndata:\npassword: <changeme>\nusername: <changeme>\nkind: Secret\nmetadata:\nlabels:\nkustomize.toolkit.fluxcd.io/name: enterprise\nkustomize.toolkit.fluxcd.io/namespace: flux-system\nname: cluster-user-auth\nnamespace: flux-system\ntype: Opaque\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\nspec:\ninterval: 10m\nsecretRef:\nname: weave-gitops-enterprise-credentials\nurl: https://charts.dev.wkp.weave.works/releases/charts-v3\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: mccp\nversion: \"0.10.2\"\nsourceRef:\nkind: HelmRepository\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\ninterval: 10m0s\ninstall:\ncrds: CreateReplace\nremediation:\nretries: 3\nupgrade:\ncrds: CreateReplace\nvalues:\nglobal:\ncapiEnabled: true\nenablePipelines: true\nenableTerraformUI: true\nclusterBootstrapController:\nenabled: true\ncluster-controller:\ncontrollerManager:\nkubeRbacProxy:\nimage:\nrepository: gcr.io/kubebuilder/kube-rbac-proxy\ntag: v0.8.0\nmanager:\nimage:\nrepository: docker.io/weaveworks/cluster-controller\ntag: v1.4.1\npolicy-agent:\nenabled: true\nimage: weaveworks/policy-agent\npipeline-controller:\ncontroller:\nmanager:\nimage:\nrepository: ghcr.io/weaveworks/pipeline-controller\nimages:\nclustersService: docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2\nuiServer: docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2\nclusterBootstrapController: weaveworks/cluster-bootstrap-controller:v0.4.0\n
At this stage you should have a local management cluster with Weave GitOps Enterprise installed.
\u279c kubectl get pods -A\nNAMESPACE NAME READY STATUS RESTARTS AGE\n...\nflux-system weave-gitops-enterprise-cluster-controller-6f8c69dc8-tq994 2/2 Running 5 (12h ago) 13h\nflux-system weave-gitops-enterprise-mccp-cluster-bootstrap-controller-cxd9c 2/2 Running 0 13h\nflux-system weave-gitops-enterprise-mccp-cluster-service-8485f5f956-pdtxw 1/1 Running 0 12h\nflux-system weave-gitops-enterprise-pipeline-controller-85b76d95bd-2sw7v 1/1 Running 0 13h\n...\n
You can observe the installed Helm Charts with kubectl
:
kubectl get helmcharts.source.toolkit.fluxcd.io\nNAME CHART VERSION SOURCE KIND SOURCE NAME AGE READY STATUS\nflux-system-cert-manager cert-manager 0.0.7 HelmRepository weaveworks-charts 13h True pulled 'cert-manager' chart with version '0.0.7'\nflux-system-tf-controller tf-controller 0.9.2 HelmRepository tf-controller 13h True pulled 'tf-controller' chart with version '0.9.2'\nflux-system-weave-gitops-enterprise mccp v0.10.2 HelmRepository weave-gitops-enterprise-charts 13h True pulled 'mccp' chart with version '0.10.2'\n
As well as the container images:
kubectl get pods --all-namespaces -o jsonpath=\"{.items[*].spec['containers','initContainers'][*].image}\" |tr -s '[[:space:]]' '\\n' \\\n| sort | uniq | grep -vE 'kindest|etcd|coredns'\n\ndocker.io/prom/prometheus:v2.34.0\ndocker.io/weaveworks/cluster-controller:v1.4.1\ndocker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2\ndocker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2\nghcr.io/fluxcd/flagger-loadtester:0.22.0\nghcr.io/fluxcd/flagger:1.21.0\nghcr.io/fluxcd/helm-controller:v0.23.1\nghcr.io/fluxcd/kustomize-controller:v0.27.1\nghcr.io/fluxcd/notification-controller:v0.25.2\n...\n
"},{"location":"enterprise/install-enterprise-airgap/#collect-and-publish-artifacts","title":"Collect and Publish Artifacts","text":"This section guides you to push installed artifacts to your private registry. Here's a Makefile to help you with each stage:
Expand to see Makefile contents .PHONY := all\n\n #set these variable with your custom configuration\nPRIVATE_HELM_REPO_NAME=private\n REGISTRY=localhost:5001\n WGE_VERSION=0.10.2\n\n WGE=mccp-$(WGE_VERSION)\nWGE_CHART=$(WGE).tgz\n\n all: images charts\n\n charts: pull-charts push-charts\n\n images:\n kubectl get pods --all-namespaces -o jsonpath=\"{.items[*].spec['containers','initContainers'][*].image}\" \\\n|tr -s '[[:space:]]' '\\n' | sort | uniq | grep -vE 'kindest|kube-(.*)|etcd|coredns' | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY)\nkubectl get microvmmachinetemplates --all-namespaces -o jsonpath=\"{.items[*].spec.template.spec.kernel.image}\"|tr -s '[[:space:]]' '\\n' \\\n| sort | uniq | xargs -L 1 -I {} ./image-sync.sh {} $(REGISTRY)\n\npull-charts:\n curl -L https://s3.us-east-1.amazonaws.com/weaveworks-wkp/releases/charts-v3/$(WGE_CHART) --output $(WGE_CHART)\n\npush-charts:\n helm cm-push -f $(WGE_CHART) $(PRIVATE_HELM_REPO_NAME)\n
The image-sync.sh
referenced in the images
target of the the above Makefile is similar to:
skopeo copy docker://$1 docker://$2/$1 --preserve-digests --multi-arch=all\n
Skopeo allows you to configure a range a security features to meet your requirements. For example, configuring trust policies before pulling or signing containers before making them available in your private network. Feel free to adapt the previous script to meet your security needs.
make
to automatically sync Helm charts and container images.\u279c resources git:(docs-airgap-install) \u2717 make\nkubectl get microvmmachinetemplates --all-namespaces -o jsonpath=\"{.items[*].spec.template.spec.kernel.image}\"|tr -s '[[:space:]]' '\\n' \\\n| sort | uniq | xargs -L 1 -I {} ./image-pull-push.sh {} docker-registry:5000\n\n5.10.77: Pulling from weaveworks-liquidmetal/flintlock-kernel\nDigest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466\nStatus: Image is up to date for ghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77\nghcr.io/weaveworks-liquidmetal/flintlock-kernel:5.10.77\n...5.10.77: digest: sha256:5ef5f3f5b42a75fdb69cdd8d65f5929430f086621e61f00694f53fe351b5d466 size: 739\n
"},{"location":"enterprise/install-enterprise-airgap/#airgap-install","title":"Airgap Install","text":""},{"location":"enterprise/install-enterprise-airgap/#weave-gitops-enterprise","title":"Weave GitOps Enterprise","text":"At this stage you have in your private registry both the Helm charts and container images required to install Weave GitOps Enterprise. Now you are ready to install WGE from your private registry.
Follow the instructions to install WGE with the following considerations:
spec.chart.spec.sourceRef
to tell Flux to pull Helm charts from your Helm repo.spec.values
to use the container images from your private registry.An example of how it would look for Weave GitOps Enterprise is shown below.
Expand to view example WGE manifest weave-gitops-enterprise.yaml---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\nspec:\ninterval: 1m\nurl: http://helm-repo-chartmuseum:8080\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: mccp\nversion: \"0.10.2\"\nsourceRef:\nkind: HelmRepository\nname: weave-gitops-enterprise-charts\nnamespace: flux-system\ninterval: 1m0s\ninstall:\ncrds: CreateReplace\nremediation:\nretries: 3\nupgrade:\ncrds: CreateReplace\nvalues:\nglobal:\ncapiEnabled: true\nenablePipelines: true\nenableTerraformUI: true\nclusterBootstrapController:\nenabled: true\n#images changed\ncluster-controller:\ncontrollerManager:\nkubeRbacProxy:\nimage:\nrepository: localhost:5001/gcr.io/kubebuilder/kube-rbac-proxy\ntag: v0.8.0\nmanager:\nimage:\nrepository: localhost:5001/docker.io/weaveworks/cluster-controller\ntag: v1.4.1\npolicy-agent:\nenabled: true\nimage: localhost:5001/weaveworks/policy-agent\npipeline-controller:\ncontroller:\nmanager:\nimage:\nrepository: localhost:5001/ghcr.io/weaveworks/pipeline-controller\nimages:\nclustersService: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-clusters-service:v0.10.2\nuiServer: localhost:5001/docker.io/weaveworks/weave-gitops-enterprise-ui-server:v0.10.2\nclusterBootstrapController: localhost:5001/weaveworks/cluster-bootstrap-controller:v0.4.0\n
"},{"location":"enterprise/install-enterprise-airgap/#cluster-api_1","title":"Cluster API","text":"Indicate in the Cluster API configuration file clusterctl.yaml
that you want to use images from the private repo by leveraging image overrides.
images:\nall:\nrepository: localhost:5001/registry.k8s.io/cluster-api\ninfrastructure-microvm:\nrepository: localhost:5001/ghcr.io/weaveworks-liquidmetal\n
Then execute make clusterctl-init
to init capi using your private registry."},{"location":"enterprise/install-enterprise-azure/","title":"Azure and Weave GitOps Enterprise Installation ENTERPRISE","text":"Once you successfully create your Kubernetes cluster in Azure Marketplace, follow these steps to Install Weave GitOps Enterprise. These instructions apply to both Azure AKS and Azure ARC clusters\u2014they'll behave in the same way.
Tip
If you have already installed Flux, then Azure Flux will refuse to install.
"},{"location":"enterprise/install-enterprise-azure/#1-choose-the-gitops-option-in-the-marketplace","title":"1. Choose the \u201cGitOps\u201d Option in the Marketplace","text":"Search for Weave GitOps Enterprise in the \"Extensions + Applications\" of the Azure Marketplace. Click the \"GitOps\" option. This will take you to a screen that presents a first-class item called Type: Flux v2
.
Click GitOps => Create.
Add the config name, namespace (default), scope: cluster, type (Flux v2), and continuous reconciliation option. Your entries should look like this:
All of the displayed properties for the Flux objects screen are the same as what you'd supply to Flux bootstrap.
"},{"location":"enterprise/install-enterprise-azure/#optional-install-capz-the-capi-provider","title":"Optional: Install CAPZ, the CAPI Provider","text":"If you are planning to manage or connect CAPI clusters to the WE service make sure you first install the CAPI provider. Then during the WE installation process be sure to select the \"Enable CAPI support\" checkbox.
"},{"location":"enterprise/install-enterprise-azure/#2-apply-the-entitlements-secret","title":"2. Apply the Entitlements Secret","text":"Contact sales@weave.works for a valid entitlements secret. This will come in the form of a file \u201centitlements.yaml\u201d. Apply it to the cluster:
kubectl apply -f entitlements.yaml\n
"},{"location":"enterprise/install-enterprise-azure/#3-configure-access-for-writing-to-git-from-the-ui","title":"3. Configure Access for Writing to Git from the UI","text":"(This section is the same as what you'll find in the main WGE install documentation.)
Here we provide guidance for GitHub, GitLab, BitBucket Server, and Azure DevOps.
GitHub requires no additional configuration for OAuth Git access
Create a GitLab OAuth application that will request api
permissions to create pull requests on your behalf.
Follow the GitLab docs.
The application should have at least these scopes:
api
openid
email
profile
Add callback URLs to the application for each address the UI will be exposed on, e.g.:
https://localhost:8000/oauth/gitlab
for port-forwarding and testinghttps://git.example.com/oauth/gitlab
for production useSave your application, taking note of the Client ID and Client Secret. Save them into the git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is gitlabGITLAB_HOSTNAME
where the OAuth app is hostedReplace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"GITLAB_CLIENT_ID=13457\" \\\n--from-literal=\"GITLAB_CLIENT_SECRET=24680\" \\\n--from-literal=\"GITLAB_HOSTNAME=git.example.com\" \\\n--from-literal=\"GIT_HOST_TYPES=git.example.com=gitlab\"\n
Create a new incoming application link from the BitBucket administration dashboard. You will be asked to enter a unique name and the redirect URL for the external application. The redirect URL should be set to <WGE dashboard URL>/oauth/bitbucketserver
. You will also need to select permissions for the application. The minimum set of permissions needed for WGE to create pull requests on behalf of users is Repositories - Write
. An example of configuring these settings is shown below.
Configuring a new incoming application link
Save your application and take note of the Client ID and Client Secret. Save them into the git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is bitbucket-serverBITBUCKET_SERVER_HOSTNAME
where the OAuth app is hostedReplace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"BITBUCKET_SERVER_CLIENT_ID=13457\" \\\n--from-literal=\"BITBUCKET_SERVER_CLIENT_SECRET=24680\" \\\n--from-literal=\"BITBUCKET_SERVER_HOSTNAME=git.example.com\" \\\n--from-literal=\"GIT_HOST_TYPES=git.example.com=bitbucket-server\"\n
If the secret is already present, use the following command to update it using your default editor:
kubectl edit secret generic git-provider-credentials --namespace=flux-system\n
Info
If BitBucket Server is running on the default port (7990), make sure you include the port number in the values of the secret. For example: GIT_HOST_TYPES=git.example.com:7990=bitbucket-server
Navigate to VisualStudio and register a new application, as explained in the docs. Set the authorization callback URL and select which scopes to grant. Set the callback URL to <WGE dashboard URL>/oauth/azuredevops
.
Select the Code (read and write)
scope from the list. This is necessary so that WGE can create pull requests on behalf of users. An example of configuring these settings is shown below.
Creating a new application
After creating your application, you will be presented with the application settings. Take note of the App ID
and Client Secret
values\u2014you will use them to configure WGE.
Application settings
In your cluster, create a secret named git-provider-credentials
that contains the App ID
and Client Secret
values from the newly created application.
Replace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"AZURE_DEVOPS_CLIENT_ID=<App ID value>\" \\\n--from-literal=\"AZURE_DEVOPS_CLIENT_SECRET=<Client Secret value>\"\n
WGE is now configured to ask users for authorization the next time a pull request must be created as part of using a template. Note that each user can view and manage which applications they have authorized by navigating to https://app.vsaex.visualstudio.com/me.
"},{"location":"enterprise/install-enterprise-azure/#4-configure-your-password","title":"4. Configure Your Password","text":"
First, install the Weave GitOps Enterprise CLI tool. To do this, you can use either brew or curl.
brew install weaveworks/tap/gitops-ee\n
curl --silent --location \"https://artifacts.wge.dev.weave.works/releases/bin/0.27.0/gitops-$(uname)-$(uname -m).tar.gz\" | tar xz -C /tmp\nsudo mv /tmp/gitops /usr/local/bin\ngitops version\n
Now, to login to the WGE UI, generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. There are several different ways to generate a bcrypt hash. Here, we'll use gitops get bcrypt-hash
from our GitOps CLI.
PASSWORD=\"<Make up and insert a brand-new password here>\"\necho -n $PASSWORD | gitops get bcrypt-hash | kubectl create secret generic cluster-user-auth -n flux-system --from-literal=username=wego-admin --from-file=password=/dev/stdin\n
A validation to know it\u2019s working:
kubectl get secret -n flux-system cluster-user-auth\n
"},{"location":"enterprise/install-enterprise-azure/#5-install-weave-gitops-enterprise-to-your-cluster","title":"5. Install Weave GitOps Enterprise to Your Cluster","text":"First, you'll get taken to the Weaveworks portal on the Azure platform, which provides your subscription details.
Search for Weave GitOps. Pick \"View private products\" and choose WGE. Fill out the forms, selecting your cluster, then choose \"Review and Create\".
"},{"location":"enterprise/install-enterprise-azure/#6-apply-extra-configuration","title":"6. Apply Extra Configuration","text":"Additional configuration is done through an optional ConfigMap:
apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: cluster-service-extra-config\n namespace: flux-system\ndata:\n # disable TLS\nNO_TLS: \"true\"\n
Apply the configuration with:
kubectl apply -f cluster-service-extra-config.yaml\n\n# restart the clusters-service for changes to take effect\nkubectl -n flux-system rollout restart deploy/weave-gitops-enterprise-mccp-cluster-service\n
"},{"location":"enterprise/install-enterprise-azure/#available-configuration-options","title":"Available Configuration Options","text":"value default description NO_TLS
\"false\"
disable TLS CLUSTER_NAME
\"management\"
name of the management cluster AUTH_METHODS
\"token-passthrough,user-account\"
Which auth methods to use, valid values are 'oidc', 'token-pass-through' and 'user-account' OIDC_ISSUER_URL
\"token-passthrough,user-account\"
The URL of the OpenID Connect issuer OIDC_CLIENT_ID
\"token-passthrough,user-account\"
The client ID for the OpenID Connect client OIDC_CLIENT_SECRET
\"token-passthrough,user-account\"
The client secret to use with OpenID Connect issuer OIDC_REDIRECT_URL
\"token-passthrough,user-account\"
The OAuth2 redirect URL OIDC_TOKEN_DURATION
\"1h\"
The duration of the ID token. It should be set in the format: number + time unit (s,m,h) e.g., 20m OIDC_CLAIM_USERNAME
\"email\"
JWT claim to use as the user name. By default email, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as sub or name, depending on their provider OIDC_CLAIM_GROUPS
\"groups\"
JWT claim to use as the user's group. If the claim is present it must be an array of strings CUSTOM_OIDC_SCOPES
\"groups, openid, email, profile\"
Customise the requested scopes for then OIDC authentication flow - openid will always be requested"},{"location":"enterprise/install-enterprise-azure/#7-check-that-it-works","title":"7. Check That It Works","text":"Go to the \"services and ingresses\" tab in the Azure portal and look for signs that the UI installed.
"},{"location":"enterprise/install-enterprise-azure/#troubleshooting","title":"Troubleshooting","text":"WGE will try and automatically install Flux on a new cluster. If this fails for some reason, or if you need a custom Flux installation, you can manually install it before installing WGE.
Click \"Next\" and add:
And under the \"Authentication\" section:
Click \"Next\". You'll see an option to create a Kustomisation, which is optional. To create one:
Click \"Save\". Then clicking \"Next\", which will give you a summary so you can review your input. Then click \"Create\". It will take about five minutes to deploy.
You'll get to a new screen, which at the top-right shows \"Notifications\" and will display creation of the Flux configuration. When your deployment succeeds, go to the resource and pin to your dashboard. Then go to your terminal to see if it works in kubectl. In the terminal you'll get the GitRepository and Kustomizations. You should then get a green \"succeeded\" checkmark.
The Kustomisations screen does not provide an option to inspect the path/target namespace\u2014you have to supply the target Namespace in the Kustomization object.
"},{"location":"enterprise/install-enterprise-azure/#next-steps","title":"Next Steps","text":"From this point, you can follow our generalized WGE installation instructions to configure TLS and log into the UI. Installing the Azure Marketplace product installs the Helm chart.
"},{"location":"enterprise/install-enterprise-cli/","title":"Install Weave GitOps Enterprise via CLI","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
You could install Weave GitOps Enterprise via gitops-ee bootstrap
CLI command which is suitable for two main scenarios:
Each scenario is supported by an operation modes:
For those seeking other scenarios or fine-grain customisation Weave GitOps Enterprise manual install would be the recommended.
"},{"location":"enterprise/install-enterprise-cli/#getting-started","title":"Getting Started","text":""},{"location":"enterprise/install-enterprise-cli/#prerequisites","title":"Prerequisites","text":"Before you start make sure the following requirements are met:
gitops-ee
CLI (> v0.35)","text":"Weave GitOps Enterprise Bootstrap functionality is available on Weave GitOps Enterprise CLI starting from version v0.35. If you haven't already, please install the latest gitops-ee
CLI using this command.
brew install weaveworks/tap/gitops-ee\n
"},{"location":"enterprise/install-enterprise-cli/#bootstrap-weave-gitops-enterprise","title":"Bootstrap Weave GitOps Enterprise","text":"Please use the following command to start the installation wizard of Weave GitOps Enterprise.
InteractiveNon-Interactivegitops bootstrap\n
The bootstrap wizard will take you step-by-step into configuring Weave GitOps Enterprise. To understand more about the CLI configurations experience, check the below sections here. You could run the bootstrap command in non-interactive mode by providing the required configurations as flags. The following gives you an example to get started that you could adapt to your own context
gitops bootstrap \\\n--kubeconfig=$HOME/.kube/config \\\n--private-key=$HOME/.ssh/id_rsa --private-key-password=\"\" \\\n--version=\"0.35.0\" \\\n--domain-type=\"localhost\" \\\n--password=\"admin123\"\n
For more information about the CLI configurations, check the below sections here
"},{"location":"enterprise/install-enterprise-cli/#appendix","title":"Appendix","text":""},{"location":"enterprise/install-enterprise-cli/#understanding-gitops-ee-bootstrap","title":"Understandinggitops-ee bootstrap
","text":"gitops-ee bootstrap
is a workflow that will take you through the following stages:
Weave GitOps Enterprise runs on top of flux, the bootstrap CLI will check if flux is installed on the management cluster, and it will verify that it has the right version with valid git repository setup, and it is able to reconcile flux components properly. If flux is installed, but doesn't have a valid installation, the bootstrap CLI will terminate pending the fix or uninstall of current flux installation.
"},{"location":"enterprise/install-enterprise-cli/#verify-entitlement","title":"Verify Entitlement","text":"Weave GitOps Enterprise Entitlement is your obtained license to use our product. The Entitlements file is a Kubernetes secret that contains your licence. Bootstrapping
checks that the secret exists on the management cluster, and that it is valid will check if it has valid content and the entitlement is not expired. To get the entitlement secret please contact sales@weave.works, then apply it on your management cluster with the name weave-gitops-enterprise-credentials
under flux-system
namespace.
In order for gitops-ee bootstrap
to push WGE resources to the management cluster's git repository, you will be prompted to provide the private key used to access your repo via ssh. If the private key is encrypted, you will also be asked to provide the private key password.
Info
Disclaimer: The bootstrap CLI will ONLY use the private key to push WGE resources to your repo, and won't use it in any other way that can comprimise your repo or clusters security.
"},{"location":"enterprise/install-enterprise-cli/#select-wge-version","title":"Select WGE version","text":"The bootstrap CLI will prompt you to choose from the latest 3 versions of Weave GitOps Enterprise.
"},{"location":"enterprise/install-enterprise-cli/#create-cluster-user","title":"Create Cluster User","text":"You will be prompt to provide admin username and password, which will be used to access the dashboard. This will create admin secret with the credentials. If you already have previous admin credentials on your cluster, the installation will prompt you if you want to continue with the old credentials or exit and revoke them and re-run the installation.
"},{"location":"enterprise/install-enterprise-cli/#configure-dashboard-access","title":"Configure Dashboard Access","text":"To access Weave GitOps Enterprise dashboard, you have the two following options available:
localhost
in the cli and the dashboard will be available through a ClusterIP Service.externaldns
the dashboard will be available through an Ingress with the following considerations:public-nginx
.After installation is successful. The CLI will print out the URL where you can access the dashboard.
"},{"location":"enterprise/install-enterprise-cli/#optional-configure-oidc","title":"(Optional) Configure OIDC","text":"OIDC configuration will enable you to login with OIDC provider beside, or instead of the admin credentials. Afte the installation is complete, you will be prompt if you want to configure OIDC access. If you don't want to set it up right away, you can do it later by running gitops-ee bootstrap auth --type=oidc
command.
To configure OIDC access, you will be asked to provide the following values: DiscoveryUrl
this will verify that OIDC is accessible and get the issuerUrl from the OIDC settings. clientID
& clientSecret
that you have configured on your OIDC static-clients.
Note
Please don't forget to add a new static-client on your OIDC provider settings with the redirectURI your-domain/oauth2/callback
for example http://localhost:3000/oauth2/callback
--kube-config
: allows to choose the Kubeconfig for your cluster, default would be ~/.kube/config-d
, --domain externaldns
: indicate the domain to use in case of using externaldns-t
, --domain-type
: dashboard domain type: could be 'localhost' or 'externaldns'-h
, --help
: help for bootstrap-p
, --password
: Dashboard admin password-k
, --private-key
: Private key path. This key will be used to push the Weave GitOps Enterprise's resources to the default cluster repository-c
, --private-key-password
: Private key password. If the private key is encrypted using password-u
, --username
: Dashboard admin username-v
, --version
: Weave GitOps Enterprise version to installInfo
To purchase an entitlement to Weave GitOps Enterprise, please contact sales@weave.works. There is no need to install the open source version of Weave GitOps before installing Weave GitOps Enterprise.
"},{"location":"enterprise/install-enterprise/#prerequisites","title":"Prerequisites","text":"To get up and running with Weave GitOps Enterprise: - create a Kubernetes cluster - add your cluster to kubeconfig\u2014which you'll get from Kubernetes\u2014so that the kubeconfig correctly points to the management cluster - create a Git repository; in the instructions below, we refer to a fleet-infra
repository - configure your Git client properly (if using GitHub, for example, then review their docs on setting your username and your email address) - obtain a valid entitlement secret from Weaveworks and apply it to your cluster - install a compatible version of Flux onto your cluster; see below for how-to guidance
To do this, you can use either brew or curl.
HomebrewCurlbrew install weaveworks/tap/gitops-ee\n
export VERSION=<VERSION>\ncurl --silent --location \"https://artifacts.wge.dev.weave.works/releases/bin/${VERSION}/gitops-$(uname)-$(uname -m).tar.gz\" | tar xz -C /tmp\nsudo mv /tmp/gitops /usr/local/bin\ngitops version\n
"},{"location":"enterprise/install-enterprise/#install-flux-onto-your-cluster-with-the-flux-bootstrap-command","title":"Install Flux Onto Your Cluster with the flux bootstrap
Command","text":"The flux bootstrap
command enables you to deploy Flux on a cluster the GitOps way. Go here for more information about the command.
flux bootstrap github \\\n--owner=<github username> \\\n--repository=fleet-infra \\\n--branch=main \\\n--path=./clusters/management \\\n--personal \\\n--components-extra image-reflector-controller,image-automation-controller\n
flux bootstrap gitlab \\\n--owner=<gitlab username> \\\n--repository=fleet-infra \\\n--branch=main \\\n--path=./clusters/management \\\n--personal \\\n--components-extra image-reflector-controller,image-automation-controller\n
Your private Git repo should have a clusters/management folder that includes the manifests Flux needs to operate, and that also generates a key value pair for Flux to access the repo.
At this point your Flux management cluster should be running. Take a look at the repository you created earlier.
"},{"location":"enterprise/install-enterprise/#apply-your-entitlements-secret-to-your-cluster","title":"Apply Your Entitlements Secret to Your Cluster","text":"As noted above, you receive your entitlements secret by contacting sales@weave.works. Use this command to apply it to the cluster:
kubectl apply -f entitlements.yaml\n
"},{"location":"enterprise/install-enterprise/#set-up-authentication-and-rbac","title":"Set up Authentication and RBAC","text":""},{"location":"enterprise/install-enterprise/#securing-access-to-the-dashboard","title":"Securing Access to the Dashboard","text":"There are two supported methods for logging in to the dashboard, that work with standard Kubernetes RBAC: - Login via an OIDC provider: recommended, as this will allow you to control permissions for existing users and groups that have already been configured to use OIDC. OIDC decouples the need to manage user lists from the application, allowing it to be managed via a central system designed for that purpose (i.e. the OIDC provider). OIDC also enables the creation of groups\u2014either via your provider's own systems or by using a connector like Dex. - Login via a cluster user account: which is insecure, and which we only recommend for local and development environments or if you need to activate emergency access to a damaged cluster. However, it is an option if an OIDC provider is not available.
You may decide to give your engineering teams access to the WGE dashboard so they can view and manage their workloads. In this case, you will want to secure dashboard access and restrict who can interact with it. Weave GitOps Enterprise integrates with your OIDC provider and uses standard Kubernetes RBAC to give you fine-grained control of the dashboard users' permissions.
OIDC extends the OAuth2 authorization protocol by including an additional field (ID Token) that contains information (claims) about a user's identity. After a user successfully authenticates with the OIDC provider, Weave GitOps Enterprise uses this information to impersonate the user in any calls to the Kubernetes API. This allows cluster administrators to use RBAC rules to control access to the cluster and the dashboard.
Login via an OIDC providerConfiguring OIDC with Dex and GitHubLogin via a cluster user accountTo login via your OIDC provider, create a Kubernetes secret to store the OIDC configuration. This configuration consists of the following parameters:
Parameter Description DefaultissuerURL
The URL of the issuer; typically, the discovery URL without a path clientID
The client ID set up for Weave GitOps in the issuer clientSecret
The client secret set up for Weave GitOps in the issuer redirectURL
The redirect URL set up for Weave GitOps in the issuer\u2014typically the dashboard URL, followed by /oauth2/callback
tokenDuration
The time duration that the ID Token will remain valid after successful authentication \"1h0m0s\" tokenDuration
The time duration that the ID Token will remain valid after successful authentication \"1h0m0s\" oidcUsernamePrefix
The prefix added to users when impersonating API calls to the Kubernetes API, equivalent to --oidc-username-prefix oidcGroupsPrefix
The prefix added to groups when impersonating API calls to the Kubernetes API, equivalent to --oidc-groups-prefix Ensure that your OIDC provider has been set up with a client ID/secret and the dashboard's redirect URL.
Create a secret named oidc-auth
in the flux-system
namespace with these parameters set:
kubectl create secret generic oidc-auth \\\n--namespace flux-system \\\n--from-literal=issuerURL=<oidc-issuer-url> \\\n--from-literal=clientID=<client-id> \\\n--from-literal=clientSecret=<client-secret> \\\n--from-literal=redirectURL=<redirect-url> \\\n--from-literal=tokenDuration=<token-duration>\n
Once the HTTP server starts, unauthenticated users will have to click 'Login With OIDC Provider' to log in or use the cluster account (if configured). Upon successful authentication, the users' identities will be impersonated in any calls made to the Kubernetes API, as part of any action they take in the dashboard. By default the Helm chart will configure RBAC correctly, but we recommend reading the service account and user permissions pages to understand which actions are needed for Weave GitOps to function correctly.
Important
This is an insecure method of securing your dashboard which we only recommend for local and development environments, or if you need to activate emergency access to a damaged cluster.
Note also that this mechanism only exists for a single user. You will not be able to create multiple users. Weave GitOps does not provide its own authentication mechanism. For secure and fully-featured authentication we strongly recommend using an OIDC provider, as described in the other tab.
"},{"location":"enterprise/install-enterprise/#customization","title":"Customization","text":"For some OIDC configurations, you may need to customise the requested scopes or claims.
The oidcUsernamePrefix
and oidcGroupsPrefix
work in the same way as the Kubernetes kube-apiserver command-line options, if you need them for Kubernetes, you will likely need them here.
By default, the following scopes are requested: \"openid\",\"offline_access\",\"email\",\"groups\".
The \"openid\" scope is mandatory for OpenID auth and will be added if not provided. The \"email\" and \"groups\" scopes are commonly used as unique identifiers in organisations.
\"offline_access\" allows us to refresh OIDC tokens to keep login sessions alive for as long as a refresh token is valid. You can, however, change the defaults.
kubectl create secret generic oidc-auth \\\n--namespace flux-system \\\n--from-literal=issuerURL=<oidc-issuer-url> \\\n--from-literal=clientID=<client-id> \\\n--from-literal=clientSecret=<client-secret> \\\n--from-literal=redirectURL=<redirect-url> \\\n--from-literal=tokenDuration=<token-duration> \\\n--from-literal=customScopes=custom,scopes\n
The format for the customScopes
key is a comma-separated list of scopes to request. In this case, \"custom\", \"scopes\", and \"openid\" would be requested."},{"location":"enterprise/install-enterprise/#claims","title":"Claims","text":"By default, the following claims are parsed from the OpenID ID Token \"email\" and \"groups\". These are presented as the user
and groups
when WGE communicates with your Kubernetes API server.
This is equivalent to configuring your kube-apiserver
with --oidc-username-claim=email --oidc-groups-claim=groups
.
Again, you can configure these from the oidc-auth
Secret
.
kubectl create secret generic oidc-auth \\\n--namespace flux-system \\\n--from-literal=issuerURL=<oidc-issuer-url> \\\n--from-literal=clientID=<client-id> \\\n--from-literal=clientSecret=<client-secret> \\\n--from-literal=redirectURL=<redirect-url> \\\n--from-literal=tokenDuration=<token-duration> \\\n--from-literal=claimUsername=sub \\\n--from-literal=claimGroups=groups\n
There are two separate configuration keys. You can override them separately. They should match your kube-apiserver
configuration."},{"location":"enterprise/install-enterprise/#configuring-oidc-with-dex-and-github","title":"Configuring OIDC with Dex and GitHub","text":"This example uses Dex and its GitHub connector to show you how to log in to the Weave GitOps dashboard by authenticating with your GitHub account. It assumes you have already installed Weave GitOps on a Kubernetes cluster, per the instructions above, and have also enabled TLS.
Dex is an identity service that uses OpenID Connect to drive authentication for other apps. There are other solutions for identity and access management, such as Keycloak.
Create a namespace where you will install Dex:
---\napiVersion: v1\nkind: Namespace\nmetadata:\nname: dex\n
Get a GitHub ClientID and Client secret by creating a new OAuth application.
kubectl create secret generic github-client \\\n--namespace=dex \\\n--from-literal=client-id=${GITHUB_CLIENT_ID} \\\n--from-literal=client-secret=${GITHUB_CLIENT_SECRET}\n
"},{"location":"enterprise/install-enterprise/#deploy-dex","title":"Deploy Dex","text":"Use HelmRepository
and HelmRelease
objects to let Flux deploy everything.
---\napiVersion: source.toolkit.fluxcd.io/v1beta1\nkind: HelmRepository\nmetadata:\nname: dex\nnamespace: dex\nspec:\ninterval: 1m\nurl: https://charts.dexidp.io\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: dex\nnamespace: dex\nspec:\ninterval: 5m\nchart:\nspec:\nchart: dex\nversion: 0.15.3\nsourceRef:\nkind: HelmRepository\nname: dex\nnamespace: dex\ninterval: 1m\nvalues:\nenvVars:\n- name: GITHUB_CLIENT_ID\nvalueFrom:\nsecretKeyRef:\nname: github-client\nkey: client-id\n- name: GITHUB_CLIENT_SECRET\nvalueFrom:\nsecretKeyRef:\nname: github-client\nkey: client-secret\nconfig:\n# Set it to a valid URL\nissuer: https://dex.dev.example.tld\n\n# See https://dexidp.io/docs/storage/ for more options\nstorage:\ntype: memory\n\nstaticClients:\n- name: 'Weave GitOps'\nid: weave-gitops\nsecret: AiAImuXKhoI5ApvKWF988txjZ+6rG3S7o6X5En\nredirectURIs:\n- 'https://localhost:9001/oauth2/callback'\n- 'https://0.0.0.0:9001/oauth2/callback'\n- 'http://0.0.0.0:9001/oauth2/callback'\n- 'http://localhost:4567/oauth2/callback'\n- 'https://localhost:4567/oauth2/callback'\n- 'http://localhost:3000/oauth2/callback'\n\nconnectors:\n- type: github\nid: github\nname: GitHub\nconfig:\nclientID: $GITHUB_CLIENT_ID\nclientSecret: $GITHUB_CLIENT_SECRET\nredirectURI: https://dex.dev.example.tld/callback\norgs:\n- name: weaveworks\nteams:\n- team-a\n- team-b\n- QA\n- name: ww-test-org\ningress:\nenabled: true\nclassName: nginx\nannotations:\ncert-manager.io/cluster-issuer: letsencrypt-prod\nhosts:\n- host: dex.dev.example.tld\npaths:\n- path: /\npathType: ImplementationSpecific\ntls:\n- hosts:\n- dex.dev.example.tld\nsecretName: dex-dev-example-tld\n
An important part of the configuration is the orgs
field on the GitHub connector, which allows you to define groups within a GitHub organisation:
orgs:\n- name: weaveworks\nteams:\n- team-a\n- team-b\n- QA\n
In this example, the GitHub organisation is weaveworks
and all members of the team-a
, team-b
, and QA
teams can authenticate. Group membership is added to the user.
Based on these groups, we can bind roles to groups:
Expand to see group role bindings---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: wego-test-user-read-resources\nnamespace: flux-system\nsubjects:\n- kind: Group\nname: weaveworks:QA\nnamespace: flux-system\nroleRef:\nkind: Role\nname: wego-admin-role\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\nname: wego-admin-role\nnamespace: flux-system\nrules:\n- apiGroups: [\"\"]\nresources: [\"secrets\", \"pods\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"apps\"]\nresources: [ \"deployments\", \"replicasets\"]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\", \"patch\" ]\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\", \"patch\" ]\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [\"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\"]\nverbs: [\"get\", \"list\", \"patch\"]\n- apiGroups: [\"\"]\nresources: [\"events\"]\nverbs: [\"get\", \"watch\", \"list\"]\n
In the same way, we can bind cluster roles to a group:
Expand to see group cluster role bindings---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: weaveworks:team-a\nsubjects:\n- kind: Group\nname: weaveworks:team-a\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: cluster-admin\napiGroup: rbac.authorization.k8s.io\n
"},{"location":"enterprise/install-enterprise/#set-up-a-static-user","title":"Set up a Static User","text":"For a static user, add staticPasswords
to the config
:
spec:\nvalues:\nconfig:\nstaticPasswords:\n- email: \"admin@example.tld\"\nhash: \"$2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W\"\nusername: \"admin\"\nuserID: \"08a8684b-db88-4b73-90a9-3cd1661f5466\"\n
Generate a static user password via the gitops
CLI:
PASSWORD=\"<your password>\"\necho -n $PASSWORD | gitops get bcrypt-hash\n$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q\n
"},{"location":"enterprise/install-enterprise/#oidc-login","title":"OIDC Login","text":"Using the \"Login with OIDC Provider\" button:
We have to authorize the GitHub OAuth application:
After that, grant access to Dex:
Now we are logged in with our GitHub user and can see all of the resources we have access to:
"},{"location":"enterprise/install-enterprise/#configuring-the-emergency-user","title":"Configuring the Emergency User","text":"Before you log in via the emergency user account, you need to generate a bcrypt hash for your chosen password and store it as a secret in Kubernetes. There are several different ways to generate a bcrypt hash. This guide uses gitops get bcrypt-hash
from our CLI.
Generate the password by running:
PASSWORD=\"<your password>\"\necho -n $PASSWORD | gitops get bcrypt-hash\n$2a$10$OS5NJmPNEb13UgTOSKnMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q\n
Now create a Kubernetes secret to store your chosen username and the password hash:
kubectl create secret generic cluster-user-auth \\\n--namespace flux-system \\\n--from-literal=username=wego-admin \\\n--from-literal=password='$2a$10$OS5NJmPNEb13UTOSKngMxOWlmS7mlxX77hv4yAiISvZ71Dc7IuN3q'\n
You should now be able to login via the cluster user account using your chosen username and password.
"},{"location":"enterprise/install-enterprise/#updating-the-emergency-user","title":"Updating the Emergency User","text":"To change either the username or the password, recreate the cluster-user-auth
with the new details.
Only one emergency user can be created this way. To add more users, enable an OIDC provider.
"},{"location":"enterprise/install-enterprise/#user-permissions","title":"User Permissions","text":"By default, both a ClusterRole and Role are generated for the emergency user. Both have the same permissions, with the former being optional and the latter being bound to the flux-system
namespace (where Flux stores its resources by default). The default set of rules are configured like this:
rules:\n# Flux Resources\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [ \"notification.toolkit.fluxcd.io\" ]\nresources: [ \"providers\", \"alerts\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"infra.contrib.fluxcd.io\"]\nresources: [\"terraforms\"]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n# Read access for all other Kubernetes objects\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [ \"get\", \"list\", \"watch\" ]\n
These permissions give the emergency user Administrator-level powers. We do not advise leaving it active on production systems.
If required, the permissions can be expanded with the rbac.additionalRules
field in the Helm Chart. Follow the instructions in the next section in order to configure RBAC correctly.
To remove the emergency user as a login method, set the following values in the Helm Chart:
#\nadminUser:\ncreate: false\n#\nadditionalArgs:\n- --auth-methods=oidc\n#\n
If you are disabling an already existing emergency user, you will need to manually delete the Kubernetes Secret and any User Roles that were created on the cluster.
"},{"location":"enterprise/install-enterprise/#gitops-dashboard-service-account-permissions","title":"GitOps Dashboard Service Account Permissions","text":"This section covers the service account permissions for the Weave GitOps application, which the WGE UI requires to work. The default permissions will generate a cluster role that includes the permissions:
rules:\n- apiGroups: [\"\"]\nresources: [\"users\", \"groups\"] verbs: [ \"impersonate\" ]\n- apiGroups: [\"\"]\nresources: [ \"secrets\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [ \"\" ]\nresources: [ \"namespaces\" ]\nverbs: [ \"get\", \"list\" ]\n
These allow the pod to do three things: - Impersonate the user and operate in the cluster as them - Read the available namespaces; this is required to understand users' permissions - Read the cluster-user-auth
and oidc-auth
secrets, the default secrets to store the emergency cluster user account and OIDC configuration (see securing access to the dashboard)
The primary way Weave GitOps queries the Kube API is via impersonation
. The permissions granted to users and groups that Weave GitOps can impersonate will determine the scope of actions that WGE can take within your cluster.
The application, not the cluster, authenticates the user, either via the emergency cluster user credentials or OIDC. Then it makes Kube API calls on the user's behalf. This is equivalent to making a kubectl call like:
$ kubectl get deployments --as aisha@example.com\n
Assuming the user aisha@example.com
has permissions to get deployments within the cluster, this will return those deployments. The same occurs within the application, so properly configuring application permissions is very important. Without proper restrictions the application can impersonate very powerful users
or groups
. For example, the system:masters
is a group generally bound to the cluster-admin
role, which can do anything.
The application itself uses get namespace permissions to pre-cache the list of available namespaces. As the user accesses resources their permissions within various namespaces is also cached to speed up future operations.
"},{"location":"enterprise/install-enterprise/#reading-the-cluster-user-auth-and-oidc-auth-secrets","title":"Reading thecluster-user-auth
and oidc-auth
Secrets","text":"The cluster-user-auth
and oidc-auth
secrets provide information for authenticating to the application. The former holds the username and bcrypt-hashed password for the emergency user, and the latter holds OIDC configuration.
The application needs to be able to access these secrets in order to authenticate users.
"},{"location":"enterprise/install-enterprise/#user-permissions_1","title":"User Permissions","text":"This section discusses the Kubernetes permissions needed by Weave GitOps application users and groups. At a minimum, a User should be bound to a Role in the flux-system
namespace\u2014which is where Flux stores its resources by default\u2014with the following permissions:
rules:\n# Flux Resources\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [ \"notification.toolkit.fluxcd.io\" ]\nresources: [ \"providers\", \"alerts\" ]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n- apiGroups: [\"infra.contrib.fluxcd.io\"]\nresources: [\"terraforms\"]\nverbs: [ \"get\", \"list\", \"watch\", \"patch\" ]\n\n# Read access for all other Kubernetes objects\n- apiGroups: [\"*\"]\nresources: [\"*\"]\nverbs: [ \"get\", \"list\", \"watch\" ]\n
For a wider scope, the User can be bound to a ClusterRole with the same set.
On top of this you can add other permissions to view WGE resources like GitOpsSets
and Templates
.
The following table lists resources that Flux works with directly.
API Group Resources Permissions kustomize.toolkit.fluxcd.io kustomizations get, list, patch helm.toolkit.fluxcd.io Helm Releases get, list, patch source.toolkit.fluxcd.io buckets, Helm charts, Git repositories, Helm repositories, OCI repositories get, list, patch notification.toolkit.fluxcd.io providers, alerts get, list infra.contrib.fluxcd.io Terraform get, list, patchWeave GitOps needs to be able to query the CRDs that Flux uses before it can accurately display Flux state. The get
and list
permissions facilitate this.
The patch
permissions are used for two features: to suspend and resume reconciliation of a resource by modifying the 'spec' of a resource, and to force reconciliation of a resource by modifying resource annotations. These features work in the same way that flux suspend
, flux resume
, and flux reconcile
does on the CLI.
Weave GitOps reads basic resources so that it can monitor the effect that Flux has on what's running.
Reading secrets
enables Weave GitOps to monitor the state of Helm releases as that's where it stores the state by default. For clarity this these are the Helm release objects not the Flux HelmRelease resource (which are dealt with by the earlier section).
Flux communicates the status of itself primarily via events. These events will show when reconciliations start and stop, whether they're successful, and information as to why they're not.
"},{"location":"enterprise/install-enterprise/#login-ui","title":"Login UI","text":"The label of the OIDC button on the login screen is configurable via a feature flag environment variable. This can give your users a more familiar experience when logging in.
Adjust the configuration in the Helm values.yaml
file or the spec.values
section of the Weave GitOps HelmRelease
resource:
extraEnvVars:\n- name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL\nvalue: \"Login with ACME\"\n
"},{"location":"enterprise/install-enterprise/#recommended-rbac-configuration","title":"Recommended RBAC Configuration","text":"This section is purposefully vague as we intend to give a broad idea of how to implement such a system. The specifics will dependent on your circumstances and goals.
Our general recommendation is to use OIDC and a small number of groups that Weave GitOps can impersonate.
Configuring Weave GitOps to impersonate Kubernetes groups rather than users has the following benefits: - A user's permissions for impersonation by Weave GitOps can be separate from any other permissions that they may or may not have within the cluster. - Users do not have to be individually managed within the cluster and can have their permissions managed together.
"},{"location":"enterprise/install-enterprise/#example-setup","title":"Example Setup","text":"Assume that your company has the following people in OIDC: - Aisha, a cluster admin, who should have full admin access to Weave GitOps - Brian, lead of Team-A, who should have admin permissions to their team's namespace in Weave GitOps and read-only otherwise - June and Jo, developers in Team-A who should have read-only access to Weave GitOps
You can then create three groups:
wego-admin
ClusterRole
, created by Helm, wego-admin-cluster-role
wego-team-a-admin
Role
, using the same permissions as wego-admin-role
, created in Team-A's namespacewego-readonly
ClusterRole
that matches wego-admin-cluster-role
but with no patch
permissions.Using OIDC for cluster and Weave GitOps Authentication
If the same OIDC provider is used to authenticate a user with the cluster itself (e.g. for use with kubectl
) and to Weave GitOps then, depending on OIDC configuration, they may end up with the super-set of their permissions from Weave GitOps and any other permissions granted to them.
This can lead to unintended consequences, like viewing secrets
. To avoid this, OIDC providers will often let you configure which groups are returned to which clients. The Weave GitOps groups should not be returned to the cluster client (and vice versa).
The yaml to configure these permissions would look roughly like:
Expand to see example RBAC # Admin cluster role\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: wego-admin-cluster-role\nrules:\n- apiGroups: [\"\"]\nresources: [\"secrets\", \"pods\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"apps\"]\nresources: [ \"deployments\", \"replicasets\"]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\", \"patch\" ]\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\", \"patch\" ]\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"get\", \"list\", \"patch\" ]\n- apiGroups: [\"\"]\nresources: [\"events\"]\nverbs: [\"get\", \"watch\", \"list\"]\n---\n# Read-only cluster role\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: wego-readonly-role\nrules:\n# All the 'patch' permissions have been removed\n- apiGroups: [\"\"]\nresources: [\"secrets\", \"pods\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"apps\"]\nresources: [ \"deployments\", \"replicasets\"]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [\"\"]\nresources: [\"events\"]\nverbs: [\"get\", \"watch\", \"list\"]\n---\n# Bind the cluster admin role to the wego-admin group\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: wego-cluster-admin\nsubjects:\n- kind: Group\nname: wego-admin # only Aisha is a member\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: wego-admin-cluster-role\napiGroup: rbac.authorization.k8s.io\n---\n# Bind the admin role in the team-a namespace for the wego-team-a-admin group\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: wego-team-a-admin-role\nnamespace: team-a\nsubjects:\n- kind: Group\nname: wego-team-a-admin # Aisha & Brian are members\napiGroup: rbac.authorization.k8s.io\nroleRef:\n# Use the cluster role to set rules, just bind them in the team-a namespace\nkind: ClusterRole\nname: wego-admin-role\napiGroup: rbac.authorization.k8s.io\n---\n# Bind the read-only role to the read-only group\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: wego-readonly-role\nsubjects:\n- kind: Group\nname: wego-readonly # Everyone is a member\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: wego-readonly-role\napiGroup: rbac.authorization.k8s.io\n---\n
"},{"location":"enterprise/install-enterprise/#configure-access-for-writing-to-git-from-the-weave-gitops-enterprise-ui","title":"Configure Access for Writing to Git from the Weave GitOps Enterprise UI","text":"Here we provide guidance for GitHub, GitLab, BitBucket Server, and Azure DevOps.
GitHubBitBucket ServerAzure DevOpsGitHub requires no additional configuration for OAuth git access
Create a GitLab OAuth application that will request api
permissions to create pull requests on your behalf.
Follow the GitLab docs.
The application should have at least these scopes:
api
openid
email
profile
Add callback URLs to the application for each address the UI will be exposed on, e.g.:
https://localhost:8000/oauth/gitlab
for port-forwarding and testinghttps://git.example.com/oauth/gitlab
for production useSave your application, taking note of the Client ID and Client Secret. Save them into the git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is gitlabGITLAB_HOSTNAME
where the OAuth app is hostedReplace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"GITLAB_CLIENT_ID=13457\" \\\n--from-literal=\"GITLAB_CLIENT_SECRET=24680\" \\\n--from-literal=\"GITLAB_HOSTNAME=git.example.com\" \\\n--from-literal=\"GIT_HOST_TYPES=git.example.com=gitlab\"\n
Create a new incoming application link from the BitBucket administration dashboard. You will be asked to enter a unique name and the redirect URL for the external application. The redirect URL should be set to <WGE dashboard URL>/oauth/bitbucketserver
. You will also need to select permissions for the application. The minimum set of permissions needed for WGE to create pull requests on behalf of users is Repositories - Write
. An example of configuring these settings is shown below.
Configuring a new incoming application link
Save your application and take note of the Client ID and Client Secret. Save them into the git-provider-credentials
secret, along with:
GIT_HOST_TYPES
to tell WGE that the host is bitbucket-serverBITBUCKET_SERVER_HOSTNAME
where the OAuth app is hostedReplace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"BITBUCKET_SERVER_CLIENT_ID=13457\" \\\n--from-literal=\"BITBUCKET_SERVER_CLIENT_SECRET=24680\" \\\n--from-literal=\"BITBUCKET_SERVER_HOSTNAME=git.example.com\" \\\n--from-literal=\"GIT_HOST_TYPES=git.example.com=bitbucket-server\"\n
If the secret is already present, use the following command to update it using your default editor:
kubectl edit secret generic git-provider-credentials --namespace=flux-system\n
Info
If BitBucket Server is running on the default port (7990), make sure you include the port number in the values of the secret. For example: GIT_HOST_TYPES=git.example.com:7990=bitbucket-server
Navigate to VisualStudio and register a new application, as explained in the docs. Set the authorization callback URL and select which scopes to grant. Set the callback URL to <WGE dashboard URL>/oauth/azuredevops
.
Select the Code (read and write)
scope from the list. This is necessary so that WGE can create pull requests on behalf of users. An example of configuring these settings is shown below.
Creating a new application
After creating your application, you will be presented with the application settings. Take note of the App ID
and Client Secret
values\u2014you will use them to configure WGE.
Application settings
In your cluster, create a secret named git-provider-credentials
that contains the App ID
and Client Secret
values from the newly created application.
Replace values in this snippet and run:
kubectl create secret generic git-provider-credentials --namespace=flux-system \\\n--from-literal=\"AZURE_DEVOPS_CLIENT_ID=<App ID value>\" \\\n--from-literal=\"AZURE_DEVOPS_CLIENT_SECRET=<Client Secret value>\"\n
WGE is now configured to ask users for authorization the next time a pull request must be created as part of using a template. Note that each user can view and manage which applications they have authorized by navigating to https://app.vsaex.visualstudio.com/me.
"},{"location":"enterprise/install-enterprise/#tls-configuration","title":"TLS Configuration","text":"By default, the WGE UI pod will listen on port 8000
with TLS enabled. WGE will generate and use a self-signed certificate for this purpose.
It can then be accessed via port-forwarding:
kubectl port-forward --namespace flux-system svc/clusters-service 8000:8000
If you're using an ingress controller to terminate TLS you can disable it in the Helm release:
values:\ntls:\nenabled: false\n
Other ingress conguration changes can be made via the ingress configuration
values:\ningress:\nenabled: true\n... other parameters specific to the ingress type ...\n
"},{"location":"enterprise/install-enterprise/#configure-helm-chart-and-commit","title":"Configure Helm Chart and Commit","text":"We deploy WGE via a Helm chart. We'll save and adapt the below template before committing it in Git to a Flux-reconciled path.
Clone the newly created repo locally. We're gonna add some things!
git clone git@<provider>:<username>/fleet-infra\ncd fleet-infra\n
Download the helm-release to clusters/management/weave-gitops-enterprise.yaml
.
import ExampleWGE from \"../assets/example-enterprise-helm.yaml\"; import ExampleWGEContent from \"!!raw-loader!../assets/example-enterprise-helm.yaml\";
Expand to see file contentsOnce you have copied the above file, open and adjust the following configuration options:
"},{"location":"enterprise/install-enterprise/#valuesconfigcapirepositoryurl","title":"values.config.capi.repositoryURL
","text":"Ensure this has been set to your repository URL.
"},{"location":"enterprise/install-enterprise/#valuesconfigcapirepositorypath","title":"values.config.capi.repositoryPath
","text":"By default, WGE will create new clusters in the clusters/management/clusters
path. You can configure it with values.config.capi.repositoryPath
. You might what to change it to clusters/my-cluster/cluster
if you configured Flux to reconcile ./clusters/my-cluster
instead.
values.config.capi.repositoryClustersPath
","text":"The other important path to configure is where you'll store applications and workloads run on the new cluster. By default this is ./clusters
. When a new cluster is specified, any selected profiles will be written to ./clusters/{.namespace}/{.clusterName}/profiles.yaml
. When the new cluster is bootstrapped, Flux will sync the ./clusters/{.namespace}/{.clusterName}
path.
To login to the WGE UI, generate a bcrypt hash for your chosen password and store it as a secret in the Kubernetes cluster. There are several different ways to generate a bcrypt hash. Here, we'll use gitops get bcrypt-hash
from our CLI.
PASSWORD=\"<Make up and insert a brand-new password here>\"\necho -n $PASSWORD | gitops get bcrypt-hash | kubectl create secret generic cluster-user-auth -n flux-system --from-literal=username=wego-admin --from-file=password=/dev/stdin\n
A validation to know it\u2019s working:
kubectl get secret -n flux-system cluster-user-auth\n
"},{"location":"enterprise/install-enterprise/#optional-install-policy-agent","title":"(Optional) Install Policy Agent","text":"Policy agent comes packaged with the WGE chart. To install it, set the following values:
values.policy-agent.enabled
: set to true to install the agent with WGEvalues.policy-agent.config.accountId
: organization name, used as identifiervalues.policy-agent.config.clusterId
: unique identifier for the clusterCommit and push all the files
git add clusters/management/weave-gitops-enterprise.yaml\ngit commit -m \"Deploy Weave GitOps Enterprise\"\ngit push\n
Flux will reconcile the helm-release and WGE will be deployed into the cluster. You can check the flux-system
namespace to verify all pods are running.
Here are a couple of options for you to take your next steps with WGE. Explore one option or all of them, in no particular order.
See also our guide to installing Weave GitOps Enterprise on Azure: - An Azure cluster deployed with either the Azure Portal or Azure CLI tools. - Azure Flux add-on deployed by adding a GitOps configuration, either via the Azure Portal or the CLI tool.
Note that this documentation applies to both Azure AKS and Azure ARC clusters.
"},{"location":"enterprise/join-cluster-azure-flux/#initial-status","title":"Initial Status","text":"The Azure cluster already has the Azure Flux add-on installed. This differs from CNCF Flux in that there are two additional controllers: - fluxconfig-agent - fluxconfig-controller
These controllers have CRDs that define the version of Flux and any Flux Kustomizations that are managed via the Azure CLI.
The CRDs are all apiVersion: clusterconfig.azure.com/v1beta1.
The Kinds are: - FluxConfig - FluxConfigSyncStatus
The FluxConfig Kind configures Flux itself and creates any Kustomizations that refer to a single-source GitRepository. This guide assumes that this process is already completed and that a top-level Kustomization has been configured for the fleet repo cluster directory already set up at clusters/default/CLUSTER_NAME/manifests
.
The CRDs that this FluxConfig generates are Flux CRDs, as follows: - GitRepositories - Kustomizations
These generated resources are viewable through Weave GitOps Enterprise.
Weave GitOps itself is deployed by Flux using a HelmRelease that pulls the Helm Chart. It doesn\u2019t need to install Flux, as it is assumed that Flux is already deployed. Therefore it can use the Azure Flux add-on, which poses no conflicts with WGE itself.
Incompatibilities exist between the Azure Flux add-on and CNCF Flux. They should not be run at the same time, on the same cluster, due to conflicts in the CRD management. If the Flux bootstrapping process IS run on a cluster with Azure Flux add-on, it will override the Azure Flux add-on with the Flux version used in the bootstrap. Also, it would add Flux manifests to the source Git repository. This would be undesirable.
Azure Flux add-on-enabled clusters keep the Azure Flux add-on in place.
"},{"location":"enterprise/join-cluster-azure-flux/#joining-a-cluster-to-wge","title":"Joining a Cluster to WGE","text":""},{"location":"enterprise/join-cluster-azure-flux/#setting-up-a-service-account","title":"Setting up a Service Account","text":"To join a cluster, you'll set up a service account with permissions and create a kubeconfig for the service account. This service account does not need cluster admin permissions unless you are bootstrapping Flux into the cluster. The bootstrapping process will either be A) carried out before joining the cluster to WGE; or B) configured specifically for Flux to be bootstrapped into the cluster from WGE.
If you already have Flux running, you can create the service account in your fleet repo:
apiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: wgesa\nnamespace: default\n---\napiVersion: v1\nkind: Secret\ntype: kubernetes.io/service-account-token\nmetadata:\nname: wgesa-secret\nnamespace: default\nannotations:\nkubernetes.io/service-account.name: \"wgesa\"\n
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: impersonate-user-groups\nsubjects:\n- kind: ServiceAccount\nname: wgesa\nnamespace: default\nroleRef:\nkind: ClusterRole\nname: user-groups-impersonator\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: user-groups-impersonator\nrules:\n- apiGroups: [\"\"]\nresources: [\"users\", \"groups\"]\nverbs: [\"impersonate\"]\n- apiGroups: [\"\"]\nresources: [\"namespaces\"]\nverbs: [\"get\", \"list\"]\n
Commit to your fleet repo to sync.
Create a secret to store the kubeconfig, and a GitopsCluster object in the WGE management cluster that points to the kubeconfig secret. This allows you to connect to the target cluster and read various Kubernetes objects\u2014including the Flux objects, such as:
Kubernetes 1.24+ will not create secrets for Service Accounts for you, so you have to add it yourself.
Add a new secret for the service account by adding to the service account yaml file in step 1.
Create a kubeconfig secret. We'll use a helper script to generate the kubeconfig, and then save it into static-kubeconfig.sh
:
#!/bin/bash\n\nif [[ -z \"$CLUSTER_NAME\" ]]; then\necho \"Ensure CLUSTER_NAME has been set\"\nexit 1\nfi\n\nif [[ -z \"$CA_CERTIFICATE\" ]]; then\necho \"Ensure CA_CERTIFICATE has been set to the path of the CA certificate\"\nexit 1\nfi\n\nif [[ -z \"$ENDPOINT\" ]]; then\necho \"Ensure ENDPOINT has been set\"\nexit 1\nfi\n\nif [[ -z \"$TOKEN\" ]]; then\necho \"Ensure TOKEN has been set\"\nexit 1\nfi\n\nexport CLUSTER_CA_CERTIFICATE=$(cat \"$CA_CERTIFICATE\" | base64)\n\nenvsubst <<EOF\napiVersion: v1\nkind: Config\nclusters:\n- name: $CLUSTER_NAME\n cluster:\n server: https://$ENDPOINT\n certificate-authority-data: $CLUSTER_CA_CERTIFICATE\nusers:\n- name: $CLUSTER_NAME\n user:\n token: $TOKEN\ncontexts:\n- name: $CLUSTER_NAME\n context:\n cluster: $CLUSTER_NAME\n user: $CLUSTER_NAME\ncurrent-context: $CLUSTER_NAME\n\nEOF\n
Create a secret for the generated kubeconfig in the WGE management cluster:
kubectl create secret generic demo-01-kubeconfig \\\n--from-file=value=./demo-01-kubeconfig\n
You can also take care of this step in WGE's Secrets UI, setting up a a secret in SOPS or ESO.
Flux CRDs are compatible with the Azure Flux Configuration CRDs. This means that there are no compatibility issues between WGE and Azure Flux.
Create a GitopsCluster object. It must NOT be bootstrapped. Remove the annotation for bootstrap so it will not deploy Flux.
Commit to your fleet repo and sync.
Log in to your WGE management cluster to see if the cluster has appeared.
MSFT maintains CAPZ, the Azure CAPI provider. Currently there is no support for Azure Flux. A CAPI-based cluster will continue to run the Flux bootstrap process on cluster creation when managed by WGE, because there is no Azure Flux option.
"},{"location":"enterprise/join-cluster-azure-flux/#with-terraform-provider","title":"With Terraform Provider","text":"WGE uses TF-controller to deploy Terraform resources. For WGE to use the cluster as a target requires A) a resource created in the management cluster and B) a kubeconfig that maps to a service account in the target cluster. The Terraform cluster build typically creates this service account and then outputs to a secret store or local secret so that WGE can use it as a cluster. The Flux bootstrap process can be initiated directly with the Flux Terraform module, which deploys CNCF Flux to the target cluster.
Alternatively, you can apply an Azure Policy to provide the Azure Flux add-on. This is an example of how you can use the policy controls. This means you could come across clusters that are deployed with Terraform with the Azure Flux add-on already installed and would not run the Flux bootstrap process.
Either way, it is typical that Terraform-deployed clusters do not run the Flux bootstrap process at all, because it is usually already installed.
"},{"location":"enterprise/join-cluster-azure-flux/#with-crossplane","title":"With Crossplane","text":"The Azure Flux add-on is supported under Crossplane-deployed Azure clusters. Any clusters deployed with Crossplane that have the Azure Flux add-on enabled would also be added to WGE without running the bootstrap process.
"},{"location":"enterprise/releases-enterprise/","title":"Releases ENTERPRISE","text":"Info
This page details the changes for Weave GitOps Enterprise and its associated components. For Weave GitOps OSS, please see the release notes on GitHub.
"},{"location":"enterprise/releases-enterprise/#v0310","title":"v0.31.0","text":"2023-08-31
"},{"location":"enterprise/releases-enterprise/#highlights","title":"Highlights","text":"gitops connect cluster
.2023-08-17
"},{"location":"enterprise/releases-enterprise/#highlights_1","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#ui","title":"UI","text":"GitRepository
or OCIRepository
has no artifact, stop generating with an error.2023-08-04
Warning
This release builds upon Weave GitOps v0.29.0 that has breaking changes from Flux v2.0.0. Please make sure that you read these release notes.
"},{"location":"enterprise/releases-enterprise/#dependency-versions_2","title":"Dependency versions","text":"2023-08-03
Danger
"},{"location":"enterprise/releases-enterprise/#breaking-changes","title":"\u26a0\ufe0f Breaking changes","text":"We introduced a breaking change in this release by upgrading to Flux v2 APIs, notably GitRepository
v1, Kustomization
v1, and Receiver
v1. This means that this version of Weave GitOps Enterprise is not compatible with previous versions of Flux v2, such as v0.41.x and earlier.
Follow Flux or Weave GitOps to upgrade to Flux v2 GA before upgrading Weave GitOps Enterprise.
"},{"location":"enterprise/releases-enterprise/#highlights_2","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#flux","title":"Flux","text":"GitRepository
v1, Kustomization
v1, and Receiver
v1 resources. See Breaking Changes.2023-07-20
"},{"location":"enterprise/releases-enterprise/#highlights_3","title":"Highlights","text":"v1alpha1
of the CAPITemplate
and GitopsTemplate
CRDs. Please migrate to v1alpha2
of these CRDs. See the migration guide2023-07-07
"},{"location":"enterprise/releases-enterprise/#highlights_4","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#explorer_2","title":"Explorer","text":"2023-06-22
"},{"location":"enterprise/releases-enterprise/#highlights_5","title":"Highlights","text":"2023-06-08
Bug fixes
"},{"location":"enterprise/releases-enterprise/#dependency-versions_7","title":"Dependency versions","text":"2023-05-25
"},{"location":"enterprise/releases-enterprise/#highlights_6","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#gitopssets_2","title":"GitOpsSets","text":"ImagePolicy
. This allows you to include the latest version of an image in your templates, for example to keep a Deployment
up to date.\"weave.works/helm-version-filter\": \"> 0.0.0\"
to filter out rc releases\"weave.works/helm-version-filter\": \"> 1.0.0\"
to filter any pre 1.0 releases\"weave.works/helm-version-filter\": \"> 3.0.0-0\"
to filter any pre 3.0 releases but include rc releases(none)
"},{"location":"enterprise/releases-enterprise/#known-issues","title":"Known issues","text":""},{"location":"enterprise/releases-enterprise/#explorer_4","title":"Explorer","text":"2023-05-12
"},{"location":"enterprise/releases-enterprise/#highlights_7","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#application-details","title":"Application Details","text":"metadata.name
field.2023-04-27
"},{"location":"enterprise/releases-enterprise/#highlights_8","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#explorer_7","title":"Explorer","text":"2023-04-13
"},{"location":"enterprise/releases-enterprise/#highlights_9","title":"Highlights","text":"2023-03-30
"},{"location":"enterprise/releases-enterprise/#dependency-versions_12","title":"Dependency versions","text":"2023-03-16
"},{"location":"enterprise/releases-enterprise/#highlights_10","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#ui_4","title":"UI","text":"2023-03-02
"},{"location":"enterprise/releases-enterprise/#highlights_11","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#ui_5","title":"UI","text":"cluster
generator allows you to interact with the Weave GitOps Cluster inventory. GitOps Clusters that are added and removed to the inventory are reflected by the generator. That can be used to target for example to manage applications across a fleet of clusters.gitRepository
generator can now scan directories and paths with the new directory
option, which enables you to create for example dynamically Flux Kustomizations , based on your repository.apiClient
generator allows you to query and endpoint, and provide data for your template./metrics
endpoint ready to be collected2023-02-16
"},{"location":"enterprise/releases-enterprise/#highlights_12","title":"Highlights","text":"This release contains dependency upgrades and bug fixes. For a larger list of updates, check out the Weave GitOps v0.17.0 release.
"},{"location":"enterprise/releases-enterprise/#v0160","title":"v0.16.0","text":"2023-02-02
"},{"location":"enterprise/releases-enterprise/#highlights_13","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#create-external-secrets-via-wge-ui","title":"Create External Secrets via WGE UI","text":"No breaking changes
"},{"location":"enterprise/releases-enterprise/#v0151","title":"v0.15.1","text":"2023-01-19
"},{"location":"enterprise/releases-enterprise/#highlights_14","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#multi-repository-support-weave-gitops-enterprise-adapts-and-scales-to-your-repository-structure","title":"Multi Repository support. Weave GitOps Enterprise adapts and scales to your repository structure","text":"gitops create template
supporting --config
allows you to read command line flags from a config file and --output-dir
allows you to write files out to a directory instead of just stdoutNo breaking changes
"},{"location":"enterprise/releases-enterprise/#v0141","title":"v0.14.1","text":"2023-01-05
"},{"location":"enterprise/releases-enterprise/#highlights_15","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#secrets-management","title":"Secrets management","text":"[UI] \"Tenant\" is renamed to \"Workspace\" on details page.
[UI] Use time.RFC3339 format for all timestamps of the workspaces tabs.
"},{"location":"enterprise/releases-enterprise/#other","title":"Other","text":"[UI] Error notification boundary does not allow user to navigate away from the page.
[Gitops run] GitOps Run doesn't ask to install dashboard twice
"},{"location":"enterprise/releases-enterprise/#dependency-versions_17","title":"Dependency versions","text":"No breaking changes
"},{"location":"enterprise/releases-enterprise/#v0130","title":"v0.13.0","text":"2022-12-22
"},{"location":"enterprise/releases-enterprise/#highlights_16","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#gitops-templates-path-feature","title":"GitOps Templates Path feature","text":"spec:\nresourcetemplates:\n- path: ./clusters/${CLUSTER_NAME}/definition/cluster.yaml\ncontent:\n- apiVersion: cluster.x-k8s.io/v1alpha4\nkind: Cluster\nmetadata:\nname: ${CLUSTER_NAME}\n...\n- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4\nkind: AWSCluster\nmetadata:\nname: ${CLUSTER_NAME}\n...\n- path: ./clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml\ncontent:\n- apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: ${CLUSTER_NAME}-nginx\n...\n- apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: ${CLUSTER_NAME}-cert-manager\n...\n
"},{"location":"enterprise/releases-enterprise/#workspace-ui","title":"Workspace UI","text":"github.com/mattn/go-tty
package (other options required pressing Enter after a keypress, this catches just a single numeric keypress) and opening URLs with the github.com/pkg/browser
package.[UI] Notifications Fixed provider page showing a 404.
"},{"location":"enterprise/releases-enterprise/#dependency-versions_18","title":"Dependency versions","text":"No breaking changes
"},{"location":"enterprise/releases-enterprise/#v0120","title":"v0.12.0","text":"2022-12-09
"},{"location":"enterprise/releases-enterprise/#highlights_17","title":"Highlights","text":"We highly recommend users of v0.11.0 upgrade to this version as it includes fixes for a number of UI issues.
"},{"location":"enterprise/releases-enterprise/#gitops-templates_1","title":"GitOps Templates","text":"spec:\ncharts:\nitems:\n- chart: cert-manager\nversion: v1.5.3\neditable: false\nrequired: true\nvalues:\ninstallCRDs: ${CERT_MANAGER_INSTALL_CRDS}\ntargetNamespace: cert-manager\nlayer: layer-1\ntemplate:\ncontent:\nmetadata:\nlabels:\napp.kubernetes.io/name: cert-manager\nspec:\nretries: ${CERT_MANAGER_RETRY_COUNT}\n
Supporting custom OIDC groups claims for azure/okta integration Support for OIDC custom username and group claims:
config\noidc:\nclaimUsername: \"\"\nclaimGroups: \"\"\n
"},{"location":"enterprise/releases-enterprise/#policy-commit-time-agent","title":"Policy commit-time agent","text":"Terraform CRD Error Users of the Terraform Controller will be pleased to know we\u2019ve addressed the issue where an error would be displayed if it had not been installed on all connected clusters.
Management cluster renaming If the name of the cluster where Weave GitOps Enterprise is installed, was changed from the default of management through the config.cluster.name parameter, certain workflows could fail such as fetching profiles, this has now been resolved.
"},{"location":"enterprise/releases-enterprise/#dependency-versions_19","title":"Dependency versions\u200b","text":"weave-gitops v0.12.0 cluster-controller v1.4.1 cluster-bootstrap-controller v0.3.0 (optional) pipeline-controller v0.0.11 (optional) policy-agent 2.1.1
"},{"location":"enterprise/releases-enterprise/#known-issues_2","title":"Known issues","text":"2022-11-25
"},{"location":"enterprise/releases-enterprise/#highlights_18","title":"Highlights","text":""},{"location":"enterprise/releases-enterprise/#gitopstemplates","title":"GitOpsTemplates","text":"enablePipelines
flag.weave.works/template-type=pipeline
label.This release incorporates anonymous aggregate user behavior analytics to help us continuously improve the product. As an Enterprise customer, this is enabled by default. You can learn more about this here.
"},{"location":"enterprise/releases-enterprise/#dependency-versions_20","title":"Dependency versions","text":"We are making these changes to provide a unified and intuitive self-service experience within Weave GitOps Enterprise, removing misleading and potentially confusing terminology born from when only Clusters were backed by Templates.
New API Group for the GitOpsTemplate CRD - old: clustertemplates.weave.works
- new: templates.weave.works
After upgrading Weave GitOps Enterprise which includes the updated CRD: 1. Update all your GitOpsTemplates in Git changing all occurrences of apiVersion: clustertemplates.weave.works/v1alpha1
to apiVersion: templates.weave.works/v1alpha1
. 2. Commit, push and reconcile. They should now be viewable in the Templates view again. 3. Clean up the old CRD. As it stands: - kubectl get gitopstemplate -A
will be empty as it is pointing to the old clustertemplates.weave.works
CRD. - kubectl get gitopstemplate.templates.weave.works -A
will work To fix the former of the commands, remove the old CRD (helm does not do this automatically for safety reasons): - kubectl delete crd gitopstemplates.clustertemplates.weave.works
- You may have to wait up to 5 minutes for your local kubectl CRD cache to invalidate, then kubectl get gitopstemplate -A
should be working as usual
Template Profiles / Applications / Credentials sections are hidden by default
For both CAPITemplates
and GitopsTemplates
the default visibility for all sections in a template has been set to \"false\"
. To re-enable profiles or applications on a template you can tweak the annotations
annotations:\ntemplates.weave.works/profiles-enabled: \"true\" # enable profiles\ntemplates.weave.works/kustomizations-enabled: \"true\" # enable applications\ntemplates.weave.works/credentials-enabled: \"true\" # enable CAPI credentials\n
The default values for a profile are not fetched and included in a pull-request
Prior to this release WGE would fetch the default values.yaml for every profile installed and include them in the HelmReleases
in the Pull Request when rendering out the profiles of a template.
This was an expensive operation and occasionally led to timeouts.
The new behaviour is to omit the values and fall back to the defaults included in the helm-chart. This sacrifices some UX (being able to see all the defaults in the PR and tweak them) to improve performance. There should not be any final behaviour changes to the installed charts.
You can still view and tweak the values.yaml
when selecting profiles to include on the \"Create resource (cluster)\" page. If changes are made here the updated values.yaml will be included.
2022-11-15
"},{"location":"enterprise/releases-enterprise/#highlights_19","title":"Highlights","text":"2022-11-10
"},{"location":"enterprise/releases-enterprise/#highlights_20","title":"Highlights","text":"Adds support for showing policy modes and policy configs in the UI
Show suspended status on pipelines detail
Align and link logo
Actually remove the watcher from the helm-watcher-cache
UI 1817 disable create target name space if name space is flux system
Adding edit capi cluster resource acceptance test
2022-10-17
"},{"location":"enterprise/releases-enterprise/#highlights_21","title":"Highlights","text":"2022-09-22
"},{"location":"enterprise/releases-enterprise/#highlights_22","title":"Highlights","text":"gitops create tenant
now supports --prune
to remove old resources from the cluster if you're not using --export
with GitOps.deploymentRBAC
section in tenancy.yaml
allows you to specify the permissions given to the flux Kustomizations
that will apply the resources from git to your tenants' namespaces in the cluster.OCIRepository
sources when restricting/allowing the sources that can be applied into tenants' namespaces.{{ .params.CLUSTER_NAME | upper }}
namespace
can be specified in the template profile annotation that will be provided as the HelmRelease
's targetNamespace
by default.phase=\"Provisioned\"
, rather than ControlPlaneReady=True
status.If using the policy-agent included in the weave-gitops-enterprise helm chart, the configuration should now be placed under the config
key.
old
policy-agent:\nenabled: true\naccountId: \"my-account\"\nclusterId: \"my-cluster\"\n
new
policy-agent:\nenabled: true\nconfig:\naccountId: \"my-account\"\nclusterId: \"my-cluster\"\n
"},{"location":"explorer/","title":"Explorer ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
As platform engineer or as developer, your applications and platform services will likely span multiple kubernetes clusters or infrastructure components. In order to manage and operate them you require a platform capability that allows you to discover the resources from a single place.
Explorer is that capability that allows any platform user to discover platform resources from a single place across all your kubernetes clusters.
"},{"location":"explorer/#faq","title":"FAQ","text":""},{"location":"explorer/#which-journeys-would-be-able-to-use-explorer-for","title":"Which journeys would be able to use explorer for?","text":"Explorer is better suited for journeys matching the discovery of resources across the platform resources inventory.
"},{"location":"explorer/#which-journeys-would-be-better-using-other-weave-gitops-capabilities-for","title":"Which journeys would be better using other weave gitops capabilities for?","text":"If you have a particular resources you want to manage, weave gitops offers single resource experience for almost every resource.
"},{"location":"explorer/#which-kinds-does-explorer-support","title":"Which Kinds does explorer support?","text":"Explorer support all Flux Applications and Sources CRDs
See Supported Kinds for more details.
"},{"location":"explorer/#next-steps","title":"Next Steps","text":"Now that you know what Explorer is, follow getting started to quickly have a feeling of what Explorer can do for you.
"},{"location":"explorer/configuration/","title":"Configuration ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
This page helps you to understand the options available to configure Explorer
"},{"location":"explorer/configuration/#prerequisites","title":"Prerequisites","text":"Before using Explorer, please ensure that: - You have Weave Gitops Enterprise v0.23.0
"},{"location":"explorer/configuration/#setup","title":"Setup","text":"The following configuration options are available for you to setup Explorer.
.spec.values.enableExplorer
: feature flag to control whether Explorer is enabled..spec.values.useQueryServiceBackend
: feature flag to control whether you want to leverage Explorer backend capabilities for other UI experiences like Applications or Sources.spec.values.explorer.collector.serviceAccount
: ServiceAccount name
and namespace
that explorer collector will use to impersonate in leaf clusters. Make sure you read authz for collector before setting it. Default values are name: collector
, namespace: flux-system
.You should specify them in your HelmRelease values:
---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\n# ... other spec components\nvalues:\nenableExplorer: true # feature flag to enable explorer\nuseQueryServiceBackend: true # uses explorer query backend in collection UIs\nexplorer:\ncollector:\nserviceAccount: # service account that collector will impersonate in leaf clusters\nname: collector\nnamespace: flux-system\n
"},{"location":"explorer/configuration/#configuration","title":"Configuration","text":""},{"location":"explorer/configuration/#clusters","title":"Clusters","text":"Explorer watches the GitopsClusters that you have connected to Weave Gitops Enterprise, as well as your Management cluster.
"},{"location":"explorer/configuration/#kinds","title":"Kinds","text":"Explorer watches for the following kind resources out of the box:
Flux GitOps Toolkit
Weave Gitops - GitopsSets - Templates - Policy Audit Violations
"},{"location":"explorer/configuration/#data-layer","title":"Data Layer","text":"Explorer take a simple approach to manage resource views. It leverages a Data Store for caching the views and query them. The storage lifecycle is bounded to Weave Gitops Enterprise app and does not provide persistence guarantees. Instead, it requests data as required to the leaf clusters. In its simplest form, the data store used is SQLite.
"},{"location":"explorer/configuration/#authentication-and-authorization","title":"Authentication and Authorization","text":"There are two main paths to consider within Explorer in the context of authentication and authorization (authN/authZ):
We look into them separately.
"},{"location":"explorer/configuration/#authentication-and-authorization-for-querying","title":"Authentication and Authorization for querying","text":"Explorer leverages existing authentication and authorization built-in the application. It identifies for a user logged in the application: its identity and the access permissions via Kuberentes RBAC. Query results are filtered honouring the access determined via RBAC.
"},{"location":"explorer/configuration/#authentication-and-authorization-for-collecting","title":"Authentication and Authorization for collecting","text":"GitopsClusters define the connection and security context that Explorer leverages to collect data from leaf clusters. Given that you have followed the indications in setup RBAC, the GitopsCluster service account is able to impersonate any user or group.
Tip
Collector RBAC resources are part of your leaf clusters common RBAC configuration. It is commonly located in your clusters/bases
folder, as described in Getting started.
To configure collection, you would need to extend this configuration with the following:
.spec.values.explorer.collector.serviceAccount
.apiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: collector # should match .spec.values.explorer.collector.serviceAccount.name\nnamespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace\n
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: collector # could be .spec.values.explorer.collector.serviceAccount.name\nrules:\n- apiGroups: [ \"rbac.authorization.k8s.io\" ]\nresources: [ \"roles\", \"clusterroles\", \"rolebindings\", \"clusterrolebindings\" ]\nverbs: [ \"list\", \"watch\" ]\n- apiGroups: [ \"kustomize.toolkit.fluxcd.io\" ]\nresources: [ \"kustomizations\" ]\nverbs: [ \"list\", \"watch\" ]\n- apiGroups: [ \"helm.toolkit.fluxcd.io\" ]\nresources: [ \"helmreleases\" ]\nverbs: [ \"list\", \"watch\" ]\n- apiGroups: [ \"source.toolkit.fluxcd.io\" ]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"list\", \"watch\" ]\n
ServiceAccount
.apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: collector # could be .spec.values.explorer.collector.serviceAccount.name\nsubjects:\n- kind: ServiceAccount\nname: collector # should match .spec.values.explorer.collector.serviceAccount.name\nnamespace: flux-system # should match .spec.values.explorer.collector.serviceAccount.namespace\nroleRef:\nkind: ClusterRole\nname: collector # name of the cluster role created earlier\napiGroup: rbac.authorization.k8s.io\n
If you want the collector to watch a particular namespace use a RoleBinding instead.
collector
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: clusters-service-impersonator-role\nrules:\n- apiGroups: [\"\"]\nresources: [\"users\", \"groups\"]\nverbs: [\"impersonate\"]\n- apiGroups: [ \"\" ]\nresources: [ \"serviceaccounts\" ]\nverbs: [ \"impersonate\" ]\nresourceNames:\n- \"collector\" # should match .spec.values.explorer.collector.serviceAccount.name\n
"},{"location":"explorer/configuration/#next-steps","title":"Next Steps","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
This guide shows you the basics steps to start using Explorer.
"},{"location":"explorer/getting-started/#pre-requisites","title":"Pre-requisites","text":"Before using Explorer, please ensure that:
Explorer is enabled via configuration through the feature flag explorer.enabled
that you could configure in your Weave Gitops Enterprise HelmRelease values:
---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: weave-gitops-enterprise\nnamespace: flux-system\nspec:\n# ... other spec components\nvalues:\nexplorer:\nenabled: true # global enable/disable flag\ncollector:\n# ServiceAccount that explorer will use to watch clusters for resources\nserviceAccount:\nname: \"collector\"\nnamespace: \"flux-system\"\ncleaner:\ndisabled: false\nenabledFor: # controls which parts of the UI utilize the Explorer UI/Server components\n- applications\n- sources\n- gitopssets\n- templates\n
The enabledFor
field will control which parts of the UI utilize the Explorer backend for performant queries. Note that this does not control the collection of these objects, only the presentation of the objects in the UI.
For a complete overview on the configuration you could see configuration.
"},{"location":"explorer/getting-started/#explorer-ui","title":"Explorer UI","text":"Login to Weave Gitops and Explorer will be shown in the navigation menu Explorer
.
Explorer UI looks as follows:
It has two main components:
For a more detailed view on the UI you could see querying.
"},{"location":"explorer/operations/","title":"Operations ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
As platform engineer you could need to have a finer understanding on the underlying logic for Explorer. The following options are available to you to operate and troubleshoot it.
"},{"location":"explorer/operations/#debug-access-rules","title":"Debug Access Rules","text":"It is a debugging tool to make visible explorer authorization logic. You could find it as tab Access Rules
alongside the Query
tab.
You could discover by Cluster
and Subject
the Kinds
it is allowed to read. These are the rules that will be the source of truth doing authorization when a user does a query.
Explorer provides the following telemetry to use for operations.
"},{"location":"explorer/operations/#metrics","title":"Metrics","text":"Explorer exports Prometheus metrics. See setup to get started.
"},{"location":"explorer/operations/#querying","title":"Querying","text":"Explorer querying path is composed of three components exporting metrics:
Based on go-http-metrics, the following metrics are generated.
Request Duration: histogram with the latency of the HTTP requests.
http_request_duration_seconds_bucket{handler=\"/v1/query\",method=\"POST\",le=\"0.05\"} 0\nhttp_request_duration_seconds_sum{handler=\"/v1/query\",method=\"POST\"} 10.088081923\nhttp_request_duration_seconds_count{handler=\"/v1/query\",method=\"POST\"} 51\n
Response Size: histogram with the size of the HTTP responses in bytes
http_response_size_bytes_bucket{handler=\"/v1/query\",method=\"POST\",le=\"0.05\"} 10\nhttp_response_size_bytes_sum{handler=\"/v1/query\",method=\"POST\"} 120\nhttp_response_size_bytes_count{handler=\"/v1/query\",method=\"POST\"} 10\n
Requests In Flight: gauge with the number of inflight requests being handled at the same time.
http_requests_inflight{handler=\"/v1/query\"} 0\n
"},{"location":"explorer/operations/#datastore-reads","title":"Datastore Reads","text":"Request Latency: histogram with the latency of the datastore read requests.
action
is the datastore read operation that could be either GetObjects
, GetAccessRules
, GetObjectByID
, GetRoles
or GetRoleBindings
.status
is the result of the operation. It could be either read operation that could be either success
or error
.datastore_latency_seconds_bucket{action=\"GetObjectByID\", le=\"+Inf\", status=\"success\"} 1175\ndatastore_latency_seconds_bucket{action=\"GetObjectByID\", le=\"0.01\", status=\"success\"} 1174\n
datastore_latency_seconds_count{action=\"GetObjectByID\", status=\"success\"} 1175\ndatastore_latency_seconds_count{action=\"GetRoleBindings\", status=\"success\"} 47\ndatastore_latency_seconds_count{action=\"GetRoles\", status=\"success\"} 47\n
datastore_latency_seconds_sum{action=\"GetObjectByID\", status=\"success\"} 0.6924557999999995\ndatastore_latency_seconds_sum{action=\"GetRoleBindings\", status=\"success\"} 1.329158916\ndatastore_latency_seconds_sum{action=\"GetRoles\", status=\"success\"} 3.942473879999999\n
Requests In Flight: gauge with the number of inflight requests being handled at the same time.
action
is the datastore read operation that could be either GetObjects
, GetAccessRules
, GetObjectByID
, GetRoles
or GetRoleBindings
datastore_inflight_requests{action=\"GetObjectByID\"} 0\ndatastore_inflight_requests{action=\"GetRoleBindings\"} 0\ndatastore_inflight_requests{action=\"GetRoles\"} 0\n
"},{"location":"explorer/operations/#indexer-reads","title":"Indexer Reads","text":"Request Latency: histogram with the latency of the indexer read requests.
action
is the index read operation that could be either ListFacets
or Search
status
is the result of the operation. It could be either read operation that could be either success
or error
indexer_latency_seconds_bucket{action=\"ListFacets\", le=\"+Inf\", status=\"success\"} 1\nindexer_latency_seconds_bucket{action=\"Search\", le=\"+Inf\", status=\"success\"} 47\n
indexer_latency_seconds_sum{action=\"ListFacets\", status=\"success\"} 0.008928666\nindexer_latency_seconds_sum{action=\"Search\", status=\"success\"} 0.06231312599999999\n
indexer_latency_seconds_count{action=\"ListFacets\", status=\"success\"} 1\nindexer_latency_seconds_count{action=\"Search\", status=\"success\"} 47\n
Requests In Flight: gauge with the number of inflight requests being handled at the same time.
action
is the index read operation that could be either ListFacets
or Search
indexer_inflight_requests{action=\"ListFacets\"} 0\nindexer_inflight_requests{action=\"Search\"} 0\n
"},{"location":"explorer/operations/#collecting","title":"Collecting","text":"Explorer collecting path is composed of three components exporting metrics:
The following metrics are available to monitor its health.
"},{"location":"explorer/operations/#cluster-watcher","title":"Cluster Watcher","text":"The metric collector_cluster_watcher
provides the number of the cluster watchers it the following status
: - Starting: a cluster watcher is starting at the back of detecting that a new cluster has been registered. - Started: cluster watcher has been started and collecting events from the remote cluster. This is the stable state. - Stopping: a cluster has been deregistered so its cluster watcher is no longer required. In the process of stopping it. - Failed: a cluster watcher has failed during the creation or starting process and cannot collect events from the remote clusters. This is the unstable state.
Where collector
is the type of collector, it could be - rbac: for collecting RBAC resources (ie roles) - objects: for collecting non-rbac resources (ie kustomizations)
collector_cluster_watcher{collector=\"objects\", status=\"started\"} 1\ncollector_cluster_watcher{collector=\"objects\", status=\"starting\"} 0\ncollector_cluster_watcher{collector=\"rbac\", status=\"started\"} 1\ncollector_cluster_watcher{collector=\"rbac\", status=\"starting\"} 0\n
A sum on collector_cluster_watcher
gives the total number of cluster watchers that should be equal to the number of clusters
Request Latency: histogram with the latency of the datastore write requests.
action
is the datastore write operation that could be either StoreRoles
, StoreRoleBindings
, StoreObjects
, DeleteObjects
, DeleteAllObjects
, DeleteRoles
, DeleteAllRoles
, DeleteRoleBindings
, DeleteAllRoleBindings
status
is the result of the operation. It could be either read operation that could be either success
or error
datastore_latency_seconds_bucket{action=\"StoreRoles\", le=\"+Inf\", status=\"success\"} 1175\ndatastore_latency_seconds_bucket{action=\"StoreRoles\", le=\"0.01\", status=\"success\"} 1174\n
datastore_latency_seconds_count{action=\"StoreRoles\", status=\"success\"} 1175\ndatastore_latency_seconds_count{action=\"DeleteRoles\", status=\"success\"} 47\ndatastore_latency_seconds_count{action=\"DeleteAllRoleBindings\", status=\"success\"} 47\n
datastore_latency_seconds_sum{action=\"StoreRoles\", status=\"success\"} 0.6924557999999995\ndatastore_latency_seconds_sum{action=\"DeleteRoles\", status=\"success\"} 1.329158916\ndatastore_latency_seconds_sum{action=\"DeleteAllRoleBindings\", status=\"success\"} 3.942473879999999\n
Requests In Flight: gauge with the number of inflight write requests being handled at the same time.
action
is the datastore write operation that could be either StoreRoles
, StoreRoleBindings
, StoreObjects
, DeleteObjects
, DeleteAllObjects
, DeleteRoles
, DeleteAllRoles
, DeleteRoleBindings
, DeleteAllRoleBindings
datastore_inflight_requests{action=\"StoreRoles\"} 0\ndatastore_inflight_requests{action=\"StoreRoleBindings\"} 0\ndatastore_inflight_requests{action=\"DeleteAllRoleBindings\"} 0\n
"},{"location":"explorer/operations/#indexer-writes","title":"Indexer Writes","text":"Request Latency: histogram with the latency of the indexer write requests.
action
is the index write operation that could be either Add
, Remove
or RemoveByQuery
status
is the result of the operation. It could be either success
or error
indexer_latency_seconds_bucket{action=\"Add\",status=\"success\",le=\"+Inf\"} 109\nindexer_latency_seconds_bucket{action=\"Remove\",status=\"success\",le=\"+Inf\"} 3\n
indexer_latency_seconds_sum{action=\"Add\",status=\"success\"} 8.393912168\nindexer_latency_seconds_sum{action=\"Remove\",status=\"success\"} 0.012298476\n
indexer_latency_seconds_count{action=\"Add\",status=\"success\"} 109\nindexer_latency_seconds_count{action=\"Remove\",status=\"success\"} 3\n
Requests In Flight: gauge with the number of inflight requests being handled at the same time.
action
is the index write operation that could be either Add
, Remove
or RemoveByQuery
indexer_inflight_requests{action=\"Add\"} 0\nindexer_inflight_requests{action=\"Remove\"} 0\n
"},{"location":"explorer/operations/#dashboard","title":"Dashboard","text":"Use Explorer dashboard to monitor its golden signals
Explorer dashboard is part of Weave GitOps Dashboards
"},{"location":"explorer/querying/","title":"Querying ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
Explorer recommended way to discover resources is via its search dialog. This guide provides the background to understand it and set how to use it.
"},{"location":"explorer/querying/#schema","title":"Schema","text":"Every resource is normalised to the following common schema:
Key Description Cluster Name of cluster where the resource exists. As gitops cluster<GitopsClusterNamespace,GitopsClusterName>
Namespace Namespace name where the resource exists. Kind Resource kubernetes type or kind Name Resource name as specified in its manifest. Status Resource health status. Indicates the status of its reconciliation. Message Resource health status message. It extends status field with information about the status. For a podinfo
helm release from a cluster default/progress-delivery-demo2-32
like this:
apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: podinfo\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: podinfo\ninterval: 1m\nreconcileStrategy: ChartVersion\nsourceRef:\nkind: HelmRepository\nname: podinfo\nversion: 6.0.0\ninterval: 1m\nstatus:\nconditions:\n- message: Release reconciliation succeeded\nreason: ReconciliationSucceeded\nstatus: \"True\"\ntype: Ready\n
The schema looks like
Cluster Namespace Kind Name Status Messagedefault/progress-delivery-demo2-32
flux-system
HelmRelease
podinfo
Success
Release reconciliation succeeded
You can open the query filter settings by clicking on the filter button:
"},{"location":"explorer/querying/#filtering-and-searching","title":"Filtering and Searching","text":"The Search
field allows for free-form text entry to query objects across all fields. For example, if we enter the term \"podinfo\", we will get matches for not only object names, but also strings from the Message
field:
To filter the results by cluster, kind, namespace, enable the checkbox filters:
Note that the free-form terms only apply to the filtered results from the kind filter. In this case, we only match the \"podinfo\" string on results that are Kustomizations
.
We can also \"OR\" filters together. Note that filters within a category are OR'd together, but terms are AND'd across categories. For example, selecting the Kind=Kustomization
and Kind=HelmRelease
filters will show both Kustomizations
and HelmReleases
:
A GitOpsTemplate
enables application developers to self-service components and services easily through the Weave GitOps Dashboard. It's a simple YAML file that you can enrich with parameters, variables, metadata, and conditions.
Use a GitOpsTemplate
to template any resource that can be expressed in YAML (basic Kubernetes resources, Flux primitives, Terraform controller, Crossplane, Cluster API, etc.) into a standardised definition.
Application developers can use a template through our GUI. The rendered template is added to their GitOps repository via a pull request. When merged and reconciled, the resources in the template are created. A resource can be a MachinePool
for CAPI objects, a Flux Kustomization, or a Terraform Controller resource, to name a few examples.
Tip
A GitOpsTemplate
must be valid yaml
. Beyond this, a rendered template can create any resource you need .
Info
GitOpsTemplate or CAPITemplate?
The only difference between CAPITemplate
and GitOpsTemplate
is the default value of these two annotations:
CAPITemplate
default value for GitOpsTemplate
templates.weave.works/add-common-bases
\"true\"
\"false\"
templates.weave.works/inject-prune-annotations
\"true\"
\"false\"
"},{"location":"gitops-templates/annotations/","title":"Annotations ENTERPRISE","text":""},{"location":"gitops-templates/annotations/#the-add-common-bases-annotation","title":"The add-common-bases
annotation","text":"The templates.weave.works/add-common-bases: \"true\"
annotation can be used to enable and disable the addition of a \"common bases\" Kustomization
to the list of rendered files. This kustomization will sync a path that is common to all clusters (clusters/bases
).
An example usecase would be to ensure that certain RBAC or policies are applied to all clusters using this template.
"},{"location":"gitops-templates/annotations/#the-inject-prune-annotation-annotation","title":"Theinject-prune-annotation
annotation","text":"The templates.weave.works/inject-prune-annotation: \"true\"
annotation can be used to enable and disable the injection of Flux's prune
annotation into certain resources.
When enabled, GitOps automatically injects a kustomize.toolkit.fluxcd.io/prune: disabled
annotation into every resource in the spec.resourcetemplates
that is not a cluster.x-k8s.io.Cluster
and not a gitops.weave.works.GitopsCluster
.
The intention here is stop Flux from explicitly deleting subresources of the Cluster
like AWSCluster
, KubeadmControlPlane
, AWSMachineTemplate
etc and let the CAPI controllers handle their removal.
This is the pattern recommended in the capi-quickstart guide https://cluster-api.sigs.k8s.io/user/quick-start.html#clean-up.
"},{"location":"gitops-templates/cli/","title":"Template CLI ENTERPRISE","text":"The Enterprise gitops
CLI tool provides a set of commands to help you manage your templates.
Here we're going to talk about the gitops create template
command that allows you to render templates locally and airgapped, without a full WGE installation in a Kubernetes cluster.
The gitops create template
command only works with GitOpsTemplate
objects. It does not work with CAPITemplate
objects. You should be able to migrate any CAPITemplate
objects to GitOpsTemplate
with some small tweaks.
Info
GitOpsTemplate or CAPITemplate?
The only difference between CAPITemplate
and GitOpsTemplate
is the default value of these two annotations:
CAPITemplate
default value for GitOpsTemplate
templates.weave.works/add-common-bases
\"true\"
\"false\"
templates.weave.works/inject-prune-annotations
\"true\"
\"false\"
"},{"location":"gitops-templates/cli/#installation","title":"Installation","text":"See the Weave Gitops Enterprise installation instructions for details on how to install the EE gitops
CLI tool.
Using a local GitOpsTemplate
manifest with required parameters exported in the environment, the command can render the template to one of the following: 1. The current kubecontext directly (default) 1. stdout with --export
1. The local file system with --output-dir
, this will use the spec.resourcestemplates[].path
fields in the template to determine where to write the rendered files. This is the recommended approach for GitOps as you can then commit the rendered files to your repository.
gitops create template \\\n--template-file capd-template.yaml \\\n--output-dir ./clusters/ \\\n--values CLUSTER_NAME=foo\n
"},{"location":"gitops-templates/cli/#profiles","title":"Profiles","text":"As in the UI you can add profiles to your template. However instead of reading the latest version of a profile and its layers from a HelmRepository
object in the cluster, we instead read from your local helm cache.
helm repo add weaveworks-charts https://raw.githubusercontent.com/weaveworks/weave-gitops-profile-examples/gh-pages\nhelm repo update\n
This particular helm repo provides a version of the cert-manager
repo and others.
You can supply a values.yaml
file to a profile using the values
parameter. For example we can supply cert-manager
's values.yaml
with:
gitops create template \\\n--template-file capd-template.yaml \\\n--output-dir ./out \\\n--values CLUSTER_NAME=foo \\\n--profiles \"name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml\"\n
"},{"location":"gitops-templates/cli/#using-a-config-file","title":"Using a config file","text":"Instead of specifying the parameters on the command line you can supply a config file. For example the above invocation can be replaced like so:
```yaml title=config.yaml template-file: capd-capi-template.yaml output-dir: ./out values: - CLUSTER_NAME=foo profiles: - name=cert-manager,namespace=foo,version=>0.1,values=cert-manager-values.yaml
and executed with:\n\n```bash\ngitops create template --config config.yaml\n
"},{"location":"gitops-templates/create-cluster-example/","title":"CAPI Cluster Template Example ENTERPRISE","text":"GitOps template objects need to be wrapped with the GitOpsTemplate
custom resource and then loaded into the management cluster.
apiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: cluster-template-development\nlabels:\nweave.works/template-type: cluster\nspec:\ndescription: This is the std. CAPD template\nrenderType: templating\nparams:\n- name: CLUSTER_NAME\ndescription: This is used for the cluster naming.\nresourcetemplates:\n- apiVersion: cluster.x-k8s.io/v1alpha3\nkind: Cluster\nmetadata:\nname: \"{{ .params.CLUSTER_NAME }}\"\n
"},{"location":"gitops-templates/creating-templates/","title":"Creating GitOpsTemplates ENTERPRISE","text":"Tip
For complete examples of widely-used templates, see the Quickstart guide.
GitOps Templates were originally introduced to enable self-service operations for the the cluster creation workflow.
We have since extended this capability to cover Terraform, Crossplane and general Kubernetes resources.
An example template could, upon merging to a GitOps repository and reconciling in a cluster, provide a running developer environment consisting of an EKS cluster, an RDS database, and a branch and revision of the current application through single template.
Templates can be loaded into the cluster by Platform Operator by adding them to the Flux-manage GitOps repository for the target cluster. Alternatively, they can be applied directly to the cluster with kubectl
.
Info
Weave GitOps will search for templates in the default
namespace. This can be changed by configuring the config.capi.namespace
value in the Weave GitOps Enterprise Helm Chart.
Template types are used by Weave GitOps to group the templates nicely in the Dashboard UI.
There are 4 recommended template types:
application
- for application templatescluster
- for cluster templatesterraform
- for Terraform templatespipeline
- for Pipeline templatesDeclare this in the object manifest by using the weave.works/template-type
label and setting the value as the name of the type.
---\napiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: example-template\nnamespace: default\nlabels:\nweave.works/template-type: pipeline\nspec:\n# ...\n
"},{"location":"gitops-templates/creating-templates/#template-components","title":"Template Components","text":"The rendering of certain component sections in a template can be enabled or disabled with annotations. The annotation keys are of the form templates.weave.works/COMPONENT-enabled
and have boolean
values.
Supported components:
profiles
kustomizations
credentials
Example:
annotations:\ntemplates.weave.works/profiles-enabled: \"true\"\ntemplates.weave.works/kustomizations-enabled: \"false\"\ntemplates.weave.works/credentials-enabled: \"true\"\n
"},{"location":"gitops-templates/creating-templates/#in-ui-template-editing","title":"In-UI Template Editing","text":"When rendering a template, a templates.weave.works/create-request
annotation is added by default to the first resource in the resourcetemplates
.
It can be added to any other resource by simply adding the annotation in empty form. This annotation holds information about which template generated the resource and the parameter values used as a json string.
If the resource type is one of the following and has this annotation, an Edit resource
button will appear in the GitOps UI which allows the editing of the resource by users, after which it will be re-rendered:
HelmRelease
Kustomization
HelmRepository
GitRepository
GitopsCluster
Example:
spec:\nresourcetemplates:\n- apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: my-configmap\ndata:\nmy-key: my-value\n- apiVersion: source.toolkit.fluxcd.io/v1beta1\nkind: HelmRepository\nmetadata:\n# This annotation will add an `Edit resource` button in the UI for this resource\nannotations:\ntemplates.weave.works/create-request: ''\nname: nginx\nnamespace: default\n
"},{"location":"gitops-templates/params/","title":"Parameters ENTERPRISE","text":"When users have chosen a template, they will be presented with a form to complete.
This form will collect the specific resource configuration which they would like applied to their instance.
Resource variables, or parameters, are set by the template author in the template object manifest under spec.params
.
Some params are required for all resources as they will be used to generate paths for the eventually rendered resources.
These are: - CLUSTER_NAME
- RESOURCE_NAME
The following metadata fields can be added for each parameter under spec.params
. These will get rendered nicely in the UI form allowing users to understand what each field is for.
name
: The variable name within the resource templates.description
: Description of the parameter. This will be rendered in both the UI and CLI.options
: The list of possible values this parameter can be set to.required
- Whether the parameter must contain a non-empty value.default
- Default value of the parameter.Example:
spec:\nparams:\n- name: IP_ADDRESS\ndescription: 'The IP address of this service'\noptions: [1.2.3.4, 5.6.7.8]\ndefault: 1.2.3.4\n
"},{"location":"gitops-templates/profiles/","title":"Adding Profiles to Templates ENTERPRISE","text":"Profiles are enhanched Helm Charts which allow operators to make additional components either optional or required to developers using self-service templates.
Default and required profiles can be added via the template spec.charts
section.
spec:\ncharts:\nitems:\n- name: nginx\nversion: 1.0.0\ntargetNamespace: nginx\n- name: cert-manager\ntargetNamespace: cert-manager\n
A template with the above profiles would offer Application Developers the option to add nginx
and cert-manager
resources to their templated resources, ready for deployment to their cluster.
Keys available in the spec.charts
section and the template variables available to them.
helmRepositoryTemplate.path
Path the HelmRepository
will be written to params
items
list of charts to configure, see below Keys available in the spec.charts.items
entries and the template variables available to them.
template.content
Full or partial HelmRelease
CR template params
template.path
Path the HelmRelease will be written to params
chart
Shortcut to HelmRelease.spec.chart.spec.chart
version
Shortcut to HelmRelease.spec.chart.spec.version
targetNamespace
Shortcut to HelmRelease.spec.targetNamespace
values
Shortcut to HelmRelease.spec.values
params
layer
Layer to install as required
(default=false) Allow the user to de-select this profile editable
(default=false) Allow the user to edit the values.yaml of this profile Expand for a complete yaml example spec:\ncharts:\nhelmRepositoryTemplate:\npath: clusters/${CLUSTER_NAME}/helm-repositories.yaml\nitems:\n- chart: cert-manager\nversion: v1.5.3\neditable: false\nrequired: true\nvalues:\ninstallCRDs: ${CERT_MANAGER_INSTALL_CRDS}\ntargetNamespace: cert-manager\nlayer: layer-1\ntemplate:\npath: clusters/${CLUSTER_NAME}/cert-manager.yaml\ncontent:\nmetadata:\nlabels:\napp.kubernetes.io/name: cert-manager\nspec:\nretries: ${CERT_MANAGER_RETRY_COUNT}\n
Tip
template.content
will be merged over the top of a default HelmRelease
CR so it does not need to be complete.
Deprecated feature
Where possible please use the spec.charts
section as detailed above to declare profiles.
Profiles can also be included within templates by the capi.weave.works/profile-INDEX
annotation.
annotations:\ncapi.weave.works/profile-0: '{\"name\": \"NAME\", \"version\": \"VERSION\", \"editable\": EDITABLE, \"namespace\": \"NAMESPACE\"}'\n
Where:
name
- is the name of the profile in the default profiles repositoryversion
- (optional) will choose the default versionnamespace
- (optional) is the default target namespace for the profileeditable
- (optional, default=false
), allow the user to de-select this profile, making it a default instead of a requirement.Quickstart
templates are GitOpsTemplate
s that you could use when getting started with Weave Gitops Enterprise It aims to provide a simplified basic experience.
The templates exist as a Helm Chart in the weave-gitops-quickstart github repo.
To get started, add the following HelmRelease
object to your Weave GitOps Enterprise configuration repo for your management cluster.
---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: GitRepository\nmetadata:\nname: weave-gitops-quickstart\nnamespace: flux-system\nspec:\ninterval: 10m0s\nref:\nbranch: main\nurl: https://github.com/weaveworks/weave-gitops-quickstart\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: quickstart-templates\nnamespace: flux-system\nspec:\nchart:\nspec:\nchart: \"quickstart-templates\"\nversion: \">=0.1.0\"\nsourceRef:\nkind: GitRepository\nname: weave-gitops-quickstart\nnamespace: flux-system\ninterval: 10m0s\n
Commit and merge the above file. Once the HelmRelease
has been successfully deployed to your cluster, navigate to your Weave GitOps UI Dashboard. You will see that the templates
Chart is now deployed to your cluster.
If you click on the Templates
tab in the sidebar, you will see the Quickstart templates are now available for use:
The following pipeline templates have been made available on your Weave GitOps Enterprise instance:
pipeline-view
: A template to create a sample pipeline to visualize a HelmRelease
application delivered to dev, test and prod environments.pipeline-promotion-resources
: A template to create the Flux Notification Controller resources required for promoting applications via pipelines.pipeline-view-promote-by-cluster
: A template to create pipelines for hard tenancy when applications are isolated by cluster.pipeline-view-promote-by-namespace
: A template to create pipelines for soft tenancy when applications are isolated by namespace.GitOpsTemplate
s as a Platform Engineer","text":"The above Quickstart templates are designed to provide a practical getting started experience. We encourage Platform Operators to start off with these templates within their team to ramp up on using Weave GitOps.
If the need arises later, operators can always expand on these templates to develop their own set of self-service capabilities.
"},{"location":"gitops-templates/quickstart-templates/#using-gitopstemplates-as-an-application-developer","title":"UsingGitOpsTemplate
s as an Application Developer","text":"As a developer using Weave GitOps Enterprise, use the templates to explore GitOps's capabilities. For example, to create a pipeline for your application: use the above template provided by your Operations team to create required resources. Once they have been added to your GitOps repository, you can adapt the rendered resources to meet your needs.
Want to contribute?
The Quickstart templates are maintained by the Weave Gitops team. If you would like to make alterations, suggest fixes, or even contribute a new template which you find cool, just head to the repo and open a new issue or PR!
"},{"location":"gitops-templates/repo-rendered-paths/","title":"Rendered Template Paths ENTERPRISE","text":"Template authors can configure the eventual locatation of the rendered template in the user's GitOps repository.
This allows for more control over where different resources in the template are rendered.
"},{"location":"gitops-templates/repo-rendered-paths/#configuring-paths","title":"Configuring Paths","text":"The path for rendered resources is configured via the spec.resourcetemplates[].path
field.
Important to note
spec:\nresourcetemplates:\n// highlight-next-line\n- path: clusters/${CLUSTER_NAME}/definition/cluster.yaml\ncontent:\n- apiVersion: cluster.x-k8s.io/v1alpha4\nkind: Cluster\nmetadata:\nname: ${CLUSTER_NAME}\n...\n- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha4\nkind: AWSCluster\nmetadata:\nname: ${CLUSTER_NAME}\n...\n// highlight-next-line\n- path: clusters/${CLUSTER_NAME}/workloads/helmreleases.yaml\ncontent:\n- apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: ${CLUSTER_NAME}-nginx\n...\n- apiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: ${CLUSTER_NAME}-cert-manager\n...\n
"},{"location":"gitops-templates/repo-rendered-paths/#configuring-paths-for-charts","title":"Configuring paths for charts
","text":"The spec.charts.helmRepositoryTemplate.path
and spec.charts.items[].template.path
fields can be used to specify the paths of these resources:
Example
spec:\ncharts:\nhelmRepositoryTemplate:\n// highlight-next-line\npath: clusters/${CLUSTER_NAME}/workloads/helm-repo.yaml\nitems:\n- chart: cert-manager\nversion: 0.0.8\ntemplate:\n// highlight-next-line\npath: clusters/${CLUSTER_NAME}/workloads/cert-manager.yaml\n
"},{"location":"gitops-templates/repo-rendered-paths/#default-paths","title":"Default Paths","text":"If the spec.resourcetemplates[].path
is omitted, a default path for the rendered template is calculated.
In this case some of the submitted params are used. Users must provide one of the following parameters: - CLUSTER_NAME
- RESOURCE_NAME
To ensure users supply these values, set the parameters to required
in the the template definition:
spec:\nparams:\n- name: RESOURCE_NAME\nrequired: true\n# or\n- name: CLUSTER_NAME\nrequired: true\n
Important
The kustomization feature and the add-common-bases
annotation feature always use a calculated default path. If you are using these features one of CLUSTER_NAME
or RESOURCE_NAME
must be provided, even if you specify a path
for all the other resources in the template.
The default path for a template has a few components: - From the params: CLUSTER_NAME
or RESOURCE_NAME
, required. - From the params: NAMESPACE
, default: default
- From values.yaml
for the Weave GitOps Enterprise mccp
chart: values.config.capi.repositoryPath
, default: clusters/management/clusters
These are composed to create the path: ${repositoryPath}/${NAMESPACE}/${CLUSTER_OR_RESOURCE_NAME}.yaml
Using the default values and supplying CLUSTER_NAME
as my-cluster
will result in the path: clusters/management/clusters/default/my-cluster.yaml
Resource templates are used to create Kubernetes resources. They are defined in the spec.resourcetemplates
section of the template.
content
key","text":"The content
key is used to define a list of resources:
spec:\nresourcetemplates:\n- content:\n- apiVersion: v1\nkind: Namespace\nmetadata:\nname: nginx\n- apiVersion: v1\nkind: Namespace\nmetadata:\nname: cert-manager\n
"},{"location":"gitops-templates/resource-templates/#the-raw-key","title":"The raw
key","text":"The raw
key is used to define a raw string that will written to the specified path.
This can be useful to preserve comments or formatting in the rendered resource.
spec:\nresourcetemplates:\n- path: \"helm-release.yaml\"\nraw: |\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: podinfo\nnamespace: prod-github\nspec:\ninterval: 1m\nchart:\nspec:\nchart: podinfo\nversion: \"6.0.0\" # {\"$promotion\": \"flux-system:podinfo-github:prod\"}\nsourceRef:\nkind: HelmRepository\nname: podinfo\ninterval: 1m\n
Info
raw
key is not compatible with the content
key. Only one of the two can be used.raw
key data must still be a valid kubernetes unstructured object.The following templating languages are supported: - envsubst (default) - templating
Declare the templating language to be used to render the template by setting spec.renderType
.
envsubst
, which is short for 'environment substitution', uses envsubst for rendering. This templating format is used by clusterctl.
Variables can be set for rendering into the template in the ${VAR_NAME}
syntax.
${var}
Value of $var
${#var}
String length of $var
${var^}
Uppercase first character of $var
${var^^}
Uppercase all characters in $var
${var,}
Lowercase first character of $var
${var,,}
Lowercase all characters in $var
${var:n}
Offset $var
n
characters from start ${var:n:len}
Offset $var
n
characters with max length of len
${var#pattern}
Strip shortest pattern
match from start ${var##pattern}
Strip longest pattern
match from start ${var%pattern}
Strip shortest pattern
match from end ${var%%pattern}
Strip longest pattern
match from end ${var-default}
If $var
is not set, evaluate expression as $default
${var:-default}
If $var
is not set or is empty, evaluate expression as $default
${var=default}
If $var
is not set, evaluate expression as $default
${var:=default}
If $var
is not set or is empty, evaluate expression as $default
${var/pattern/replacement}
Replace as few pattern
matches as possible with replacement
${var//pattern/replacement}
Replace as many pattern
matches as possible with replacement
${var/#pattern/replacement}
Replace pattern
match with replacement
from $var
start ${var/%pattern/replacement}
Replace pattern
match with replacement
from $var
end"},{"location":"gitops-templates/supported-langs/#templating","title":"Templating","text":"Templating uses text/templating for rendering, using go-templating style syntax {{ .params.CLUSTER_NAME }}
where params are provided by the .params
variable. Template functions can also be used with the syntax {{ .params.CLUSTER_NAME | FUNCTION }}
.
As taken (from the Sprig library)
Function Type Functions String Functions trim, wrap, randAlpha, plural String List Functions splitList, sortAlpha Integer Math Functions add, max, mul Integer Slice Functions until, untilStep Float Math Functions addf, maxf, mulf Date Functions now, date Defaults Functions default, empty, coalesce, fromJson, toJson, toPrettyJson, toRawJson, ternary Encoding Functions b64enc, b64dec Lists and List Functions list, first, uniq Dictionaries and Dict Functions get, set, dict, hasKey, pluck, dig, deepCopy Type Conversion Functions atoi, int64, toString Flow Control Functions fail UUID Functions uuidv4 Version Comparison Functions semver, semverCompare Reflection typeOf, kindIs, typeIsLike"},{"location":"gitops-templates/supported-langs/#custom-delimiters","title":"Custom Delimiters","text":"The default delimiters for renderType: templating
are {{
and }}
. These can be changed by setting the templates.weave.works/delimiters
annotation on the template. For example:
templates.weave.works/delimiters: \"{{,}}\"
- defaulttemplates.weave.works/delimiters: \"${{,}}\"
${{
and }}
, for example \"${{ .params.CLUSTER_NAME }}\"
{{
in yaml is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like replicas: 3
you should use these delimiters. - replicas: {{ .params.REPLICAS }}
Invalid yaml - replicas: \"{{ .params.REPLICAS }}\"
Valid yaml, incorrect type. The type is a string
not a number
and will fail validation. - replicas: ${{ .params.REPLICAS }}
Valid yaml and correct number
type.templates.weave.works/delimiters: \"<<,>>\"
<<
and >>
, for example << .params.CLUSTER_NAME >>
There are now multiple published versions of the template CRD.
"},{"location":"gitops-templates/versions/#migration-notes","title":"Migration notes","text":""},{"location":"gitops-templates/versions/#v1alpha1-to-v1alpha2","title":"v1alpha1
to v1alpha2
","text":"When manually migrating a template from v1alpha1
to v1alpha2
(for example in git) you will need to: 1. Update the apiVersion
: 1. for GitopsTemplate
update the apiVersion to templates.weave.works/v1alpha2
1. for CAPITemplate
update the apiVersion to capi.weave.works/v1alpha2
1. Move the spec.resourcetemplates
field to spec.resourcetemplates[0].content
1. Either leave the spec.resourcetemplates[0].path
field empty or give it a sensible value.
If you experience issues with the path not being recognised when Flux reconciles the new template versions, try manually applying the new template to the cluster directly with: 1. Run kubectl apply -f capi-template.yaml
1. Run flux reconcile kustomization --with-source flux-system
twice.
As of Weave Gitops Enterprise 0.28.0 the conversion webhook has been removed.
This removed the need for cert-manager to be installed, but you will now have to convert any v1alpha1
templates to v1alpha2
manually in git.
v1alpha2
(default) notes","text":"This version changes the type of spec.resourcetemplates
from a list of objects to a list of files with a path
and content
:
Example:
spec:\nresourcetemplates:\n- path: \"clusters/{{ .params.CLUSTER_NAME }}.yaml\"\ncontent:\n- apiVersion: cluster.x-k8s.io/v1alpha3\nkind: Cluster\nmetadata:\nname: \"{{ .params.CLUSTER_NAME }}\"\npath: \"clusters/{{ .params.CLUSTER_NAME }}.yaml\"\n
"},{"location":"gitops-templates/versions/#v1alpha1-notes","title":"v1alpha1
notes","text":"The original version of the template. This version no longer works with Weave Gitops Enterprise 0.28.0 and above.
It uses spec.resourcetemplates
as a list of resources to render.
Example:
spec:\nresourcetemplates:\n- apiVersion: cluster.x-k8s.io/v1alpha3\nkind: Cluster\nmetadata:\nname: \"{{ .params.CLUSTER_NAME }}\"\n
"},{"location":"gitopssets/","title":"GitOpsSets ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change
We're very excited for people to use this feature. However, please note that some changes will be made to the API and behavior, particularly to enhance security by implementing impersonation for more fine-grained control over how the generated resources are applied.
"},{"location":"gitopssets/#introduction","title":"Introduction","text":"GitOpsSets enable Platform Operators to have a single definition for an application for multiple environments and a fleet of clusters. A single definition can be used to generate the environment and cluster-specific configuration.
As an example, we can take an application that needs to be deployed to various environments (Dev, Test, Prod) built by a fleet of clusters. Each of those environments + clusters requires a specialized configuration powering the same Application. With GitOpsSets and the generators you just declare the template you want to use, the selector that will match the cluster of the inventory, and where to get the special configuration.
GitOpsSets will create out of the single resource all the objects and Flux primitives that are required to successfully deploy this application. An operation that required the editing of hundreds of files can now be done with a single command.
The initial generators that are coming with the preview release are:
The gitopssets-controller can be installed in two ways:
The standalone installation can be useful for leaf clusters that don't have Weave GitOps Enterprise installed.
"},{"location":"gitopssets/gitopssets-installation/#prerequisites","title":"Prerequisites","text":"Before installing the gitopssets-controller, ensure that you've installed Flux.
"},{"location":"gitopssets/gitopssets-installation/#installing-the-gitopssets-controller","title":"Installing the gitopssets-controller","text":"To install the gitopssets-controller using a Helm chart, use the following HelmRelease:
apiVersion: v1\nkind: Namespace\nmetadata:\nname: gitopssets-system\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: weaveworks-oci-charts\nnamespace: gitopssets-system\nspec:\ninterval: 1m\ntype: oci\nurl: oci://ghcr.io/weaveworks/charts\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: gitopssets-controller\nnamespace: gitopssets-system\nspec:\ninterval: 10m\nchart:\nspec:\nchart: gitopssets-controller\nsourceRef:\nkind: HelmRepository\nname: weaveworks-oci-charts\nnamespace: gitopssets-system\nversion: 0.15.3\ninstall:\ncrds: CreateReplace\nupgrade:\ncrds: CreateReplace\n
After adding the Namespace, HelmRepository and HelmRelease to a Git repository synced by Flux, commit the changes to complete the installation process.
"},{"location":"gitopssets/gitopssets-installation/#customising-the-generators","title":"Customising the Generators","text":"Not all generators are enabled by default, this is because not all CRDs are required by the generators.
You might want to enable or disable individual generators via the Helm Chart:
gitopssets-controller:\nenabled: true\ncontrollerManager:\nmanager:\nargs:\n- --health-probe-bind-address=:8081\n- --metrics-bind-address=127.0.0.1:8080\n- --leader-elect\n# enable the cluster generator which is not enabled by default\n- --enabled-generators=GitRepository,Cluster,PullRequests,List,APIClient,Matrix,Config\n
"},{"location":"gitopssets/gitopssets-releases/","title":"Gitopssets Controller Releases ENTERPRISE","text":""},{"location":"gitopssets/gitopssets-releases/#v0161","title":"v0.16.1","text":"2023-09-06
2023-09-05
2023-08-17
2023-08-17
2023-08-17
2023-08-10
2023-07-26
2023-07-14
2023-06-26
2023-06-21
2023-06-20
2023-05-24
2023-05-10
repeat
mechanism within maps not just arrays2023-04-28
2023-04-27
2023-04-13
2023-03-30
2023-03-20
Currently rendering templates operates in two phases:
Please read the security information below before using this.
"},{"location":"gitopssets/templating-from-generators/#general-behaviour","title":"General behaviour","text":"GitOpsSets can be suspended, by setting the spec.suspend
flag to be true.
When this is the case, updates will not be applied, no resources created or deleted.
In addition, a manual reconciliation can be requested by annotating a GitOpsSet with the reconcile.fluxcd.io/requestedAt
annotation.
The simplest generator is the List
generator.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: gitopsset-sample\nspec:\ngenerators:\n- list:\nelements:\n- env: dev\nteam: dev-team\n- env: production\nteam: ops-team\n- env: staging\nteam: ops-team\n
The elements in there are a set JSON of objects[^yaml], there are three in this example, and each of them has two keys, env
and team
.
Other generators provide different sets of keys and values.
The generators documentation below provides more information on what the other generators output.
"},{"location":"gitopssets/templating-from-generators/#rendering-templates","title":"Rendering templates","text":"Templates are Kubernetes resources in YAML format.
Each template is rendered for each element generated by the generators.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: gitopsset-sample\nspec:\ngenerators:\n- list:\nelements:\n- env: dev\nteam: dev-team\n- env: production\nteam: ops-team\n- env: staging\nteam: ops-team\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.env }}\"\ncom.example/team: \"{{ .Element.team }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
The generated elements are provided to the template in the Element
scope, so .Element.dev
refers to the dev
field from the List element.
The output from all generators is exposed in the Element
scope, not just List generators.
In addition to the .Element
field, a .ElementIndex
is also available, this represents the zero-based index into the set of generated elements.
NOTE: It's not recommended that you use this to name resources where the ordering of the queries for generating the elements is not guaranteed to be ordered, otherwise you could generate churn in resources as we look for resources by name when updating them, so, .ElementIndex
1 may not be the same as .ElementIndex
1 was the previous time, and this could cause resources to be updated unnecessarily with undesirable effects.
The output from a generator is an array of JSON objects[^yaml], the keys of which can contain repeating elements, either further JSON objects, or scalar values.
It can be desirable to repeat a template for a repeated element in a generated value.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: repeated-gitopsset-sample\nspec:\ngenerators:\n- list:\nelements:\n- env: dev\nteam: dev-team\nteams:\n- name: \"team1\"\n- name: \"team2\"\n- name: \"team3\"\n- env: staging\nteam: staging-team\nteams:\n- name: \"team4\"\n- name: \"team5\"\n- name: \"team6\"\ntemplates:\n- repeat: \"{ .teams }\"\ncontent:\nkind: ConfigMap\napiVersion: v1\nmetadata:\nname: \"{{ .Repeat.name }}-demo\"\ndata:\nname: \"{{ .Repeat.name }}-demo\"\nteam: \"{{ .Element.team }}\"\n
The template repeat
field is a JSONPath expression that is applied to each element during the template rendering.
Templates that use repeat
will have two separate scopes for the template params, .Element
which is the top-level element generated by the generator, and the additional .Repeat
scope, which is the repeating element.
In this case, six different ConfigMaps
are generated, three for the \"dev-team\" and three for the \"staging-team\".
As with the .ElementIndex
, for repeated elements both .ElementIndex
and .RepeatIndex
are available.
The default delimiters for the template engine are {{
and }}
, which is the same as the Go template engine.
These can be changed by adding an annotation to the GitOpsSet
:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: gitopsset-sample\nannotations:\ntemplates.weave.works/delimiters: \"${{,}}\"\n
Changing the delimiters can useful for:
In yaml {{
is invalid syntax and needs to be quoted. If you need to provide a un-quoted number value like replicas: 3
you should use the ${{,}}
delimiters.
replicas: {{ .params.REPLICAS }}
Invalid yamlreplicas: \"{{ .params.REPLICAS }}\"
Valid yaml, incorrect type. The type is a string not a number and will fail validation.replicas: ${{ .params.REPLICAS }}
Valid yaml and correct number type.Unquoted values allow you to include objects in your templates too.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: gitopsset-sample\nannotations:\ntemplates.weave.works/delimiters: \"${{,}}\"\nspec:\ngenerators:\n- list:\nelements:\n- env: dev\nresources:\nlimits:\ncpu: 100m\nmemory: 128Mi\n- env: staging\nresources:\nlimits:\ncpu: 100m\nmemory: 128Mi\ntemplates:\n- content:\nkind: Deployment\napiVersion: apps/v1\nmetadata:\nname: go-demo\nspec:\ntemplate:\nspec:\ncontainers:\n- name: go-demo\nimage: weaveworks/go-demo:0.2.0\nresources: ${{ .Element.resources | toJson }}\n
With the default {{,}}
delimiters this would fail as the \"resources\" field would need to be quoted.
Again, if we quote them we would get a string value, not an object.
"},{"location":"gitopssets/templating-from-generators/#generators","title":"Generators","text":"We currently provide these generators: - list - pullRequests - gitRepository - ociRepository - matrix - apiClient - cluster - imagepolicy - config
"},{"location":"gitopssets/templating-from-generators/#list-generator","title":"List generator","text":"This is the simplest generator, which is a hard-coded array of JSON objects, described as YAML mappings.
"},{"location":"gitopssets/templating-from-generators/#gitrepository-generator","title":"GitRepository generator","text":"The GitRepository
generator operates on Flux GitRepositories.
When a GitRepository
is updated, this will trigger a regeneration of templates.
The generator operates in two different ways, you can parse files (YAML or JSON) into Elements, or you can scan directories for subdirectories.
"},{"location":"gitopssets/templating-from-generators/#generation-from-files","title":"Generation from files","text":"apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: repository-sample\nspec:\ngenerators:\n- gitRepository:\nrepositoryRef: go-demo-repo\nfiles:\n- path: examples/generation/dev.yaml\n- path: examples/generation/production.yaml\n- path: examples/generation/staging.yaml\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.env }}\"\ncom.example/team: \"{{ .Element.team }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
In this example, a Flux GitRepository
called go-demo-repo
in the same namespace as the GitOpsSet
will be tracked, and Kustomization
resources will be generated from the three files listed.
These files can be JSON or YAML.
In this example we expect to find the following structure in the files:
env: dev\nteam: developers\n
Changes pushed to the GitRepository
will result in rereconciliation of the templates into the cluster.
For security reasons, you need to explicitly list out the files that the generator should parse.
"},{"location":"gitopssets/templating-from-generators/#generation-from-directories","title":"Generation from directories","text":"apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/managed-by: kustomize\napp.kubernetes.io/created-by: gitopssets-controller\nname: repository-sample\nspec:\ngenerators:\n- gitRepository:\nrepositoryRef: go-demo-repo\ndirectories:\n- path: examples/kustomize/environments/*\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.Base }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.Base }}\"\ncom.example/team: \"{{ .Element.Base }}\"\nspec:\ninterval: 5m\npath: \"{{ .Element.Directory }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
In this example, a Flux GitRepository
called go-demo-repo
in the same namespace as the GitOpsSet
will be tracked, and Kustomization
resources are generated from paths within the examples/kustomize/environments/*
directory within the repository.
Each generated element has two keys, .Element.Directory
which will be a repo-relative path and .Element.Base
which contains the last element of the path, for example, for a directory ./examples/kustomize/environments/production
this will be production
.
It is also possible to exclude paths from the generated list, for example, if you do not want to generate for a directory you can exclude it with:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: repository-sample\nspec:\ngenerators:\n- gitRepository:\nrepositoryRef: go-demo-repo\ndirectories:\n- path: examples/kustomize/environments/*\n- path: examples/kustomize/environments/production\nexclude: true\ntemplates:\n- content:\n
In this case, all directories that are subdirectories of examples/kustomize/environments
will be generated, but not examples/kustomize/environments/production
.
Note: The directory tree detection is restricted to the same directory as the path, no recursion is done.
In fact the path is treated as a Glob.
"},{"location":"gitopssets/templating-from-generators/#ocirepository-generator","title":"OCIRepository generator","text":"The OCIRepository
generator operates on Flux OCIRepositories.
When an OCIRepository
is updated, this will trigger a regeneration of templates.
The OCIRepository
generator operates in exactly the same way as the GitRepository generator, except it operates on OCIRepositories.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: oci-repository-sample\nspec:\ngenerators:\n- ociRepository:\nrepositoryRef: go-demo-oci-repo\nfiles:\n- path: examples/generation/dev.yaml\n- path: examples/generation/production.yaml\n- path: examples/generation/staging.yaml\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.env }}\"\ncom.example/team: \"{{ .Element.team }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
"},{"location":"gitopssets/templating-from-generators/#pullrequests-generator","title":"PullRequests generator","text":"This will require to make authenticated requests to your Git hosting provider e.g. GitHub, GitLab, Bitbucket etc.
It does only require read-only access, but all API tokens should be guarded as carefully as possible, what is a \"read-only\" token today, might become a token with higher-privilege in the future.
There have been many security compromises using API access tokens, do not let this happen to you!
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: pull-requests-sample\nspec:\ngenerators:\n- pullRequests:\ninterval: 5m\ndriver: github\nrepo: bigkevmcd/go-demo\nsecretRef:\nname: github-secret\ntemplates:\n- content:\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: GitRepository\nmetadata:\nname: \"pr-{{ .Element.Number }}-gitrepository\"\nnamespace: default\nspec:\ninterval: 5m0s\nurl: \"{{ .Element.CloneURL }}\"\nref:\nbranch: \"{{ .Element.Branch }}\"\n- content:\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: \"pr-{{ .Element.Number }}-demo\"\nnamespace: default\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/dev\"\nprune: true\ntargetNamespace: \"{{ .Element.Branch }}-ns\"\nsourceRef:\nkind: GitRepository\nname: \"pr-{{ .Element.Number }}-gitrepository\"\n
This example will poll \"github.com/bigkevmcd/go-demo\" for open pull requests and trigger the deployment of these by creating a Flux GitRepository
and a Kustomization
to deploy.
As the generator only queries open pull requests, when a PR is closed, the generated resources will be removed.
For non-public installations, you can configure the serverURL
field and point it to your own installation.
The driver
field can be github
or gitlab
or bitbucketserver
, other options can be supported from go-scm.
The forks
flag field can be used to indicate whether to include forks in the target pull requests or not. If set to true
any pull request from a fork repository will be included, otherwise if false
or not indicated the pull requests from fork repositories are discarded.
Additionally labels can be provided for querying pull requests with matching labels e.g.
- pullRequests:\ninterval: 5m\ndriver: github\nrepo: bigkevmcd/go-demo\nsecretRef:\nname: github-secret\nforks: false\nlabels:\n- deploy\n
The fields emitted by the pull-request are as follows:
Number
this is generated as a string representationBranch
this is the source branchHeadSHA
this is the SHA of the commit in the merge branchCloneURL
this is the HTTPS clone URL for this repositoryCloneSSHURL
this is the SSH clone URL for this repositoryFork
this indicates whether the pull request is from a fork (true) or not (false)Create a read-only token that can list Pull Requests, and store it in a secret:
$ kubectl create secret generic github-secret \\\n--from-literal password=<insert access token here>\n
"},{"location":"gitopssets/templating-from-generators/#matrix-generator","title":"Matrix generator","text":"The matrix generator doesn't generate resources by itself. It combines the results of generation from other generators e.g.:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: matrix-sample\nspec:\ngenerators:\n- matrix:\ngenerators:\n- gitRepository:\nrepositoryRef: go-demo-repo\nfiles:\n- path: examples/generation/dev.yaml\n- path: examples/generation/production.yaml\n- path: examples/generation/staging.yaml\n- list:\nelements:\n- cluster: dev-cluster\nversion: 1.0.0\n
Given the files mentioned all have the following structure:
env: dev\nteam: developers\n
This will result in three sets of generated parameters, which are a combination of the maps in the files in the gitRepository, and the elements in the list generator, this can result in a combinatorial explosion of resources being created in your cluster.
- env: dev\nteam: developers\ncluster: dev-cluster\nversion: 1.0.0\n- env: staging\nteam: staging-team\ncluster: dev-cluster\nversion: 1.0.0\n- env: production\nteam: production-team\ncluster: dev-cluster\nversion: 1.0.0\n
These can be referenced in the templates, note that all keys in the merged generators from the Matrix are contained in the Element
scope.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: matrix-sample\nspec:\ngenerators:\n- matrix:\ngenerators:\n- gitRepository:\nrepositoryRef: go-demo-repo\nfiles:\n- path: examples/generation/dev.yaml\n- path: examples/generation/production.yaml\n- path: examples/generation/staging.yaml\n- list:\nelements:\n- cluster: dev-cluster\nversion: 1.0.0\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.env }}\"\ncom.example/team: \"{{ .Element.team }}\"\ncom.example/cluster: \"{{ .Element.cluster }}\"\ncom.example/version: \"{{ .Element.version }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
"},{"location":"gitopssets/templating-from-generators/#optional-name-for-matrix-elements","title":"Optional Name for Matrix elements","text":"If you want to use two generators in a Matrix that output the same fields, they will collide, for example, the ImagePolicy
generator outputs a latestImage
field, if you have two, they will collide.
You can provide a name for the generator in the Matrix:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: matrix-sample\nspec:\ngenerators:\n- matrix:\ngenerators:\n- name: gen1\ngitRepository:\nrepositoryRef: go-demo-repo\nfiles:\n- path: examples/generation/dev.yaml\n- path: examples/generation/production.yaml\n- path: examples/generation/staging.yaml\n- name: gen2\nlist:\nelements:\n- cluster: dev-cluster\nversion: 1.0.0\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.gen1.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.gen1.env }}\"\ncom.example/team: \"{{ .Element.gen1.team }}\"\ncom.example/cluster: \"{{ .Element.gen2.cluster }}\"\ncom.example/version: \"{{ .Element.gen2.version }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.gen1.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
The name value is used as a key in the generated results. The example above will yield:
- gen1:\nenv: dev\nteam: developers\ngen2:\ncluster: dev-cluster\nersion: 1.0.0\n- gen1:\nenv: staging\nteam: staging-team\ngen2:\ncluster: dev-cluster\nversion: 1.0.0\n- gen1:\nenv: production\nteam: production-team\ngen2:\ncluster: dev-cluster\nversion: 1.0.0\n
"},{"location":"gitopssets/templating-from-generators/#single-element-for-matrix","title":"Single Element for Matrix","text":"A matrix generator will normally generate a cartesian result, but you can also generate a single result.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: single-element-matrix-sample\nspec:\ngenerators:\n- matrix:\nsingleElement: true\ngenerators:\n- name: staging\ncluster:\nselector:\nmatchLabels:\nenv: staging\n- name: production\ncluster:\nselector:\nmatchLabels:\nenv: production\n
This would query for clusters matching the respective labels.
The resulting output would look like this (in YAML):
- production:\n- ClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster1\nClusterNamespace: clusters\n- ClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster2\nClusterNamespace: clusters\nstaging:\n- ClusterAnnotations: {}\nClusterLabels:\nenv: staging\nClusterName: staging-cluster1\nClusterNamespace: clusters\n- ClusterAnnotations: {}\nClusterLabels:\nenv: staging\nClusterName: staging-cluster2\nClusterNamespace: clusters\n
Compare this with the alternative without the singleElement
flag:
- production:\nClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster1\nClusterNamespace: clusters\nstaging:\nClusterAnnotations: {}\nClusterLabels:\nenv: staging\nClusterName: staging-cluster1\nClusterNamespace: clusters\n- production:\nClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster2\nClusterNamespace: clusters\nstaging:\nClusterAnnotations: {}\nClusterLabels:\nenv: staging\nClusterName: staging-cluster1\nClusterNamespace: clusters\n- production:\nClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster1\nClusterNamespace: clusters\nstaging:\nClusterAnnotations: {}\nClusterLabels:\nenv: staging\nClusterName: staging-cluster2\nClusterNamespace: clusters\n- production:\nClusterAnnotations: {}\nClusterLabels:\nenv: production\nClusterName: production-cluster2\nClusterNamespace: clusters\nstaging:\nClusterAnnotations: # omitted\nClusterLabels:\nenv: staging\nClusterName: staging-cluster2\nClusterNamespace: clusters\n
In the singleElement
case, there is only one generated element, only one template will be rendered for each content item.
If the Matrix generators are unnamed, they will be grouped under a top-level .Matrix
name.
This generator is configured to poll an HTTP endpoint and parse the result as the generated values.
This will poll an endpoint on the interval, instead of using the simpler to use PullRequest generator, you can access GitHub's API with the APIClient generator.
The PullRequest generator is simpler to use, and works across multiple different git-providers.
The GitHub documentation for the API endpoint shows:
curl \\\n-H \"Accept: application/vnd.github+json\" \\\n-H \"Authorization: Bearer <YOUR-TOKEN>\"\\\n-H \"X-GitHub-Api-Version: 2022-11-28\" \\\nhttps://api.github.com/repos/OWNER/REPO/pulls\n
This can be translated into...
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/managed-by: kustomize\napp.kubernetes.io/created-by: gitopssets-controller\nname: api-client-sample\nspec:\ngenerators:\n- apiClient:\ninterval: 5m\nendpoint: https://api.github.com/repos/bigkevmcd/go-demo/pulls\nheadersRef:\nname: github-secret\nkind: Secret\ntemplates:\n- content:\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: GitRepository\nmetadata:\nname: \"pr-{{ .Element.id | toJson}}-gitrepository\"\nnamespace: default\nspec:\ninterval: 5m0s\nurl: \"{{ .Element.head.repo.clone_url }}\"\nref:\nbranch: \"{{ .Element.head.ref }}\"\n- content:\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: \"pr-{{ .Element.id | toJson }}-demo\"\nnamespace: default\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/dev\"\nprune: true\ntargetNamespace: \"{{ .Element.head.ref }}-ns\"\nsourceRef:\nkind: GitRepository\nname: \"pr-{{ .Element.id | toJson }}-gitrepository\"\n
As with the Pull Request generator, this also requires a secret token to be able to access the API
We need to pass this as an HTTP header.
apiVersion: v1\nkind: Secret\nmetadata:\nname: github-secret\nnamespace: default\ntype: Opaque\nstringData:\nAccept: application/vnd.github+json\nAuthorization: Bearer ghp_<redacted>\nX-GitHub-Api-Version: \"2022-11-28\"\n
The keys in the secret match the command-line example using curl.
Unlike the Pull Request generator, you need to figure out the paths to the elements yourself.
"},{"location":"gitopssets/templating-from-generators/#apiclient-jsonpath","title":"APIClient JSONPath","text":"Not all APIs return an array of JSON objects, sometimes it's nested within a result type structure e.g.
{\n\"things\": [\n{\n\"env\": \"dev\",\n\"team\": \"dev-team\"\n},\n{\n\"env\": \"production\",\n\"team\": \"opts-team\"\n},\n{\n\"env\": \"staging\",\n\"team\": \"opts-team\"\n}\n]\n}\n
You can use JSONPath to extract the fields from this data...
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/managed-by: kustomize\napp.kubernetes.io/created-by: gitopssets-controller\nname: api-client-sample\nspec:\ngenerators:\n- apiClient:\ninterval: 5m\nendpoint: https://api.example.com/demo\njsonPath: \"{ $.things }\"\n
This will generate three maps for templates, with just the env and team keys.
"},{"location":"gitopssets/templating-from-generators/#apiclient-post-body","title":"APIClient POST body","text":"Another piece of functionality in the APIClient generator is the ability to POST JSON to the API.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/managed-by: kustomize\napp.kubernetes.io/created-by: gitopssets-controller\nname: api-client-sample\nspec:\ngenerators:\n- apiClient:\ninterval: 5m\nendpoint: https://api.example.com/demo\nbody:\nname: \"testing\"\nvalue: \"testing2\"\n
This will send a request body as JSON (Content-Type \"application/json\") to the server and interpret the result.
The JSON body sent will look like this:
{ \"name\": \"testing\", \"value\": \"testing2\" }\n
"},{"location":"gitopssets/templating-from-generators/#apiclient-simple-results","title":"APIClient simple results","text":"Instead of using the JSONPath to extract from a complex structure, you can configure the result to be a single element.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/created-by: gitopssets-controller\nname: api-client-sample\nspec:\ngenerators:\n- apiClient:\nsingleElement: true\ninterval: 5m\nendpoint: https://api.example.com/demo\n
Whatever result is parsed from the API endpoint will be returned as a map in a single element.
For generation, you might need to use the repeat
mechanism to generate repeating results.
If the API endpoint you are accessing requires a custom CA you can provide this via the secret field.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nlabels:\napp.kubernetes.io/name: gitopsset\napp.kubernetes.io/instance: gitopsset-sample\napp.kubernetes.io/part-of: gitopssets-controller\napp.kubernetes.io/created-by: gitopssets-controller\nname: api-client-sample\nspec:\ngenerators:\n- apiClient:\nsingleElement: true\ninterval: 5m\nendpoint: https://api.example.com/demo\nsecretRef:\nname: https-ca-credentials\n
This secret should look like this:
---\napiVersion: v1\nkind: Secret\nmetadata:\nname: https-ca-credentials\ntype: Opaque\ndata:\ncaFile: <BASE64>\n
The request will be made with the custom CA.
"},{"location":"gitopssets/templating-from-generators/#cluster-generator","title":"Cluster generator","text":"The cluster generator generates from in-cluster GitOpsCluster resources.
For example, this GitOpsSet
will generate a Kustomization
resource for each cluster matching the Label selector.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: cluster-sample\nspec:\ngenerators:\n- cluster:\nselector:\nmatchLabels:\nenv: dev\nteam: dev-team\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.ClusterName }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.ClusterName }}\"\ncom.example/team: \"{{ .Element.ClusterLabels.team }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.ClusterLabels.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
The following fields are generated for each GitOpsCluster.
ClusterName
the name of the clusterClusterNamespace
the namespace that this cluster is fromClusterLabels
the labels from the metadata field on the GitOpsClusterClusterAnnotations
the annotations from the metadata field on the GitOpsClusterIf the selector is not provided, all clusters from all namespaces will be returned:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: cluster-sample\nspec:\ngenerators:\n- cluster: {}\n
Otherwise if the selector is empty, no clusters will be generated:
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: cluster-sample\nspec:\ngenerators:\n- cluster:\nselector: {}\n
"},{"location":"gitopssets/templating-from-generators/#imagepolicy-generator","title":"ImagePolicy generator","text":"The ImagePolicy
generator works with the Flux Image Automation.
When an ImagePolicy
is updated, this will trigger a regeneration of templates.
The generated elements have the following fields:
ImagePolicy
ImagePolicy
This can be used simply, to create a deployment with an image...or, combined with a Matrix generator, to manage multiple workloads with the same image.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: imagepolicy-example\nnamespace: default\nspec:\ngenerators:\n- imagePolicy:\npolicyRef: podinfo\ntemplates:\n- content:\nkind: ConfigMap\napiVersion: v1\nmetadata:\nname: \"demo-configmap\"\ndata:\nimage: \"{{ .Element.latestImage }}\"\n
In this example, a ConfigMap
is generated containing the latest image whenever an ImagePolicy
called podinfo
is updated.
Combined in a Matrix, like this, it will generate two ConfigMaps
with the values.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: imagepolicy-matrix-example\nnamespace: default\nspec:\ngenerators:\n- matrix:\ngenerators:\n- imagePolicy:\npolicyRef: podinfo\n- list:\nelements:\n- cluster: dev-cluster\nversion: 1.0.0\n- cluster: prod-cluster\nversion: 1.0.0\ntemplates:\n- content:\nkind: ConfigMap\napiVersion: v1\nmetadata:\nname: \"demo-configmap-{{ .Element.cluster }}\"\ndata:\nimage: \"{{ .Element.latestImage }}\"\ncluster: \"{{ .Element.cluster }}\"\nversion: \"{{ .Element.version }}\"\n
The resulting ConfigMaps look like this:
$ kubectl get configmaps\nNAME DATA AGE\ndemo-configmap-dev-cluster 3 3m19s\ndemo-configmap-prod-cluster 3 3m19s\n
With the templated fields like this:
apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: demo-configmap-dev-cluster\nnamespace: default\ndata:\ncluster: dev-cluster\nimage: stefanprodan/podinfo:5.1.4\nversion: 1.0.0\n
apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: demo-configmap-prod-cluster\nnamespace: default\ndata:\ncluster: prod-cluster\nimage: stefanprodan/podinfo:5.1.4\nversion: 1.0.0\n
"},{"location":"gitopssets/templating-from-generators/#config-generator","title":"Config generator","text":"The Config
generator with Kubernetes ConfigMaps and Secrets.
When an ConfigMap
or Secret
is updated, this will trigger a regeneration of templates.
This can be used simply, to create a resource with an config variable...or, combined with a Matrix generator, to manage multiple workloads with the same values.
With the existing ConfigMap
apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: test-cm\ndata:\nname: test-config\ndemo: test-value\n
And the GitOpsSet below apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: config-sample\nspec:\ngenerators:\n- config:\nkind: ConfigMap\nname: test-cm\ntemplates:\n- content:\nkind: ConfigMap\napiVersion: v1\nmetadata:\nname: \"{{ .Element.name }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.name }}\"\ndata:\ngeneratedValue: \"{{ .Element.demo }}\"\n
In this example, a new ConfigMap
is generated containing the value of the \"demo\" field from the existing ConfigMap
test-cm. As with the other generators, the Config
generator can be combined with other generators:
This will generate two ConfigMaps
with the values.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: imagepolicy-matrix-example\nnamespace: default\nspec:\ngenerators:\n- matrix:\ngenerators:\n- config:\nkind: ConfigMap\nname: test-cm\n- list:\nelements:\n- cluster: dev-cluster\nversion: 1.0.0\n- cluster: prod-cluster\nversion: 1.0.0\ntemplates:\n- content:\nkind: ConfigMap\napiVersion: v1\nmetadata:\nname: \"demo-configmap-{{ .Element.cluster }}\"\ndata:\ngeneratedValue: \"{{ .Element.demo }}\"\ncluster: \"{{ .Element.cluster }}\"\nversion: \"{{ .Element.version }}\"\n
The resulting ConfigMaps look like this:
$ kubectl get configmaps\nNAME DATA AGE\ndemo-configmap-dev-cluster 3 3m19s\ndemo-configmap-prod-cluster 3 3m19s\n
With the templated fields like this:
apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: demo-configmap-dev-cluster\nnamespace: default\ndata:\ncluster: dev-cluster\ngeneratedValue: test-value\nversion: 1.0.0\n
apiVersion: v1\nkind: ConfigMap\nmetadata:\nname: demo-configmap-prod-cluster\nnamespace: default\ndata:\ncluster: prod-cluster\ngeneratedValue: test-value\nversion: 1.0.0\n
"},{"location":"gitopssets/templating-from-generators/#templating-functions","title":"Templating functions","text":"Currently, the Sprig functions are available in the templating, with some functions removed[^sprig] for security reasons.
In addition, we also provide two additional functions:
.Element
or defaults to another value.The examples below assume an element that looks like this:
{\n\"team\": \"engineering dev\"\n}\n
"},{"location":"gitopssets/templating-from-generators/#sanitize-template-function","title":"sanitize template function","text":"And a template that looks like this:
kind: Service\nmetadata:\nname: {{ sanitize .Element.team }}-demo\n
This would output:
kind: Service\nmetadata:\nname: engineeringdev-demo\n
"},{"location":"gitopssets/templating-from-generators/#getordefault","title":"getordefault","text":"For template that looks like this:
kind: Service\nmetadata:\nname: {{ getordefault .Element \"name\" \"defaulted\" }}-demo\n
This would output:
kind: Service\nmetadata:\nname: defaulted-demo\n
If the key to get does exist in the .Element
it will be inserted, the \"default\" is only inserted if it doesn't exist.
Warning
Generating resources and applying them directly into your cluster can be dangerous to the health of your cluster.
This is especially true for the GitRepository
generator, where it may not be obvious to the author of the files, or the author of the template the consequences of the template rendering.
The default ServiceAccount
that is used by the gitopssets-controller is extremely limited, and can not create resources, you will need to explicitly grant permissions to create any of the resources you declare in the template, missing permissions will appear in the controller logs.
It is not recommended that you create a role with blanket permissions, under the right circumstances, someone could accidentally or maliciously overwrite the cluster control-plane, which could be very dangerous.
"},{"location":"gitopssets/templating-from-generators/#limiting-via-service-accounts","title":"Limiting via service-accounts","text":"You can configure the service-account that is used to create resources.
apiVersion: templates.weave.works/v1alpha1\nkind: GitOpsSet\nmetadata:\nname: matrix-sample\nspec:\n# the controller will impersonate this service account\nserviceAccountName: test-sa\ngenerators:\n- list:\nelements:\n- env: dev\nteam: dev-team\n- env: production\nteam: ops-team\n- env: staging\nteam: ops-team\ntemplates:\n- content:\nkind: Kustomization\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nmetadata:\nname: \"{{ .Element.env }}-demo\"\nlabels:\napp.kubernetes.io/name: go-demo\napp.kubernetes.io/instance: \"{{ .Element.env }}\"\ncom.example/team: \"{{ .Element.team }}\"\nspec:\ninterval: 5m\npath: \"./examples/kustomize/environments/{{ .Element.env }}\"\nprune: true\nsourceRef:\nkind: GitRepository\nname: go-demo-repo\n
"},{"location":"gitopssets/templating-from-generators/#gitopsset-controller-configuration","title":"gitopsset-controller configuration","text":"The enabled generators can be configured via the --enabled-generators
flag, which takes a comma separated list of generators to enable.
The default is to enable all generators.
For example to enable only the List
and GitRepository
generators:
--enabled-generators=List,GitRepository\n
When a GitOpsSet that uses disabled generators is created, the disabled generators will be silently ignored.
"},{"location":"gitopssets/templating-from-generators/#kubernetes-process-limits","title":"Kubernetes Process Limits","text":"GitOpsSets can be memory-hungry, for example, the Matrix generator will generate a cartesian result with multiple copies of data.
The OCI and GitRepository generators will extract tarballs, the API Generator queries upstream APIs and parses the JSON, and the Config generators will load Secret
and ConfigMap
resources, all these can lead to using significant amounts of memory.
Extracting tarballs can also prove to be CPU intensive, especially where there are lots of files, and you have a very frequent regeneration period.
To this end, you will need to monitor the controller metrics, and maybe increase the limits available to the controller.
For example, to increase the amount of memory available to the controller:
resources:\nlimits:\ncpu: 1000m\nmemory: 2Gi\nrequests:\ncpu: 100m\nmemory: 64Mi\n
"},{"location":"gitopssets/templating-from-generators/#notifications","title":"Notifications","text":"Events are enabled which will trigger Kubernetes events when successful reconciliation occurs with a Normal
event or when reconciliation fails with an Error
event. Fluxcd's Events package is used including the EventRecorder
to record these events.
To configure receiving the recorded events on a specific host, this can be provided via the --events-addr
flag in RUN_ARGS
when starting the controller. This can be any HTTP endpoint.
See fluxcd event for the struct of the event created.
[^yaml]: These are written as YAML mappings [^sprig]: The following functions are removed \"env\", \"expandenv\", \"getHostByName\", \"genPrivateKey\", \"derivePassword\", \"sha256sum\", \"base\", \"dir\", \"ext\", \"clean\", \"isAbs\", \"osBase\", \"osDir\", \"osExt\", \"osClean\", \"osIsAbs\"
"},{"location":"guides/anonymous-access/","title":"Anonymous Access","text":"Important
Alone, this is an insecure method of securing your dashboard.
It is designed to be used with other external authentication systems like auth proxies.
"},{"location":"guides/anonymous-access/#configuring-anonymous-access","title":"Configuring Anonymous access","text":"Set the following values in the Helm Chart:
#\nadditionalArgs:\n- --insecure-no-authentication-user=gitops-test-user\n#\n
The value of the --insecure-no-authentication-user
flag is the kubernetes User
to be impersonated to make requests into the cluster.
When this flag is set all other authentication methods (e.g. those specified via --auth-methods
) are disabled.
No login screen will be displayed when accessing the dashboard.
"},{"location":"guides/anonymous-access/#example-clusterrole","title":"Example ClusterRole","text":"You can bind the user provided to a ClusterRole with a ClusterRoleBinding.
---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: minimum-weavegitops-role\nrules:\n- apiGroups: [\"\"]\nresources: [\"secrets\",\"pods\",\"events\"]\nverbs: [\"get\",\"list\"]\n- apiGroups: [\"apps\"]\nresources: [\"deployments\", \"replicasets\"]\nverbs: [\"get\",\"list\"]\n- apiGroups: [\"kustomize.toolkit.fluxcd.io\"]\nresources: [\"kustomizations\"]\nverbs: [\"get\",\"list\"]\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [\"helmreleases\"]\nverbs: [\"get\",\"list\"]\n- apiGroups: [\"source.toolkit.fluxcd.io\"]\nresources: [\"*\"]\nverbs: [\"get\",\"list\"]\n- apiGroups: [\"\"]\nresources: [\"events\"]\nverbs: [\"get\",\"list\",\"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: gitops-test-user-binding\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: ClusterRole\nname: minimum-weavegitops-role\nsubjects:\n- kind: User\nname: gitops-test-user\n
This would allow access to any resource.
"},{"location":"guides/displaying-custom-metadata/","title":"Displaying Custom Metadata","text":"Weave GitOps lets you add annotations with custom metadata to your Flux automations and sources, and they will be displayed in the main UI.
For example, you might use this to add links to dashboards, issue systems, or documentation and comments that you wish to be directly visible in the GitOps UI.
We will use the podinfo
application that we installed in the getting started guide as an example. Open up the podinfo kustomization and add annotations to it so it looks like this:
---\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: podinfo\nnamespace: flux-system\n// highlight-start\nannotations:\nmetadata.weave.works/description: |\nPodinfo is a tiny web application made with Go that showcases best practices of running microservices in Kubernetes.\nPodinfo is used by CNCF projects like Flux and Flagger for end-to-end testing and workshops.\nmetadata.weave.works/grafana-dashboard: https://grafana.my-org.example.com/d/podinfo-dashboard\n// highlight-end\nspec:\ninterval: 5m0s\npath: ./kustomize\nprune: true\nsourceRef:\nkind: GitRepository\nname: podinfo\ntargetNamespace: flux-system\n
Close the file and commit and push your changes.
Back in your GitOps dashboard, navigate to the 'Applications' tab and select the podinfo
kustomization. At the bottom of the 'Details' section you will see the new 'Metadata' entries:
Restrictions
metadata.weave.works
. Any other annotations will be ignored.metadata.weave.works/grafana-dashboard
was displayed as \"Grafana Dashboard\".-._
.We are very excited for the release of the Flux v2.0 GA!
This guide aims to answer some common questions before starting the upgrade, and provides step-by-step instructions.
"},{"location":"guides/fluxga-upgrade/#before-starting-the-upgrade","title":"Before Starting the Upgrade","text":"Useful terms used in this guide:
Flux Beta or Flux v0.x
as the latest Flux Beta Release.Flux GA
as the latest Flux GA Release CandidateWeave GitOps
as the latest Weave GitOps Enterprise releaseHere you can find the most common questions around upgrading.
"},{"location":"guides/fluxga-upgrade/#why-upgrade-to-flux-ga","title":"Why Upgrade to Flux GA","text":"Although Flux Beta APIs have been stable and used in production for quite some time, Flux GA is the main supported API version for new features and development. Features like horizontal scaling are only available in Flux GA. Also, beta APIs will be removed after six months.
"},{"location":"guides/fluxga-upgrade/#can-i-use-weave-gitops-with-flux-ga","title":"Can I Use Weave GitOps with Flux GA?","text":"Yes. This has been possible since Weave Gitops v0.22.0. Use the latest available release for the best experience.
"},{"location":"guides/fluxga-upgrade/#can-i-use-weave-gitops-enterprise-with-flux-ga","title":"Can I Use Weave GitOps Enterprise with Flux GA?","text":"Yes. This has been possible since Weave GitOps Enterprise v0.22.0. Use the latest available release for the best experience.
The following limitations are knowns by version:
"},{"location":"guides/fluxga-upgrade/#v0230-onwards","title":"v0.23.0 onwards","text":"No limitations
"},{"location":"guides/fluxga-upgrade/#v0220","title":"v0.22.0","text":"If you are using GitOpsSets, upgrade that component to v0.10.0 for Flux GA compatibility. Update the Weave GitOps Enterprise HelmRelease values to use the new version.
gitopssets-controller:\ncontrollerManager:\nmanager:\nimage:\ntag: v0.10.0\n
"},{"location":"guides/fluxga-upgrade/#can-i-use-weave-gitops-with-flux-v2-0x-pre-ga-versions","title":"Can I Use Weave GitOps with Flux v2 0.x (pre-GA versions)?","text":"As of Weave GitOps v0.29, only Flux v2.0 GA is supported. Please follow the Upgrade section to help you with the process.
Earlier versions of Weave GitOps work with both Flux v2 GA and Flux v2 0.x (the pre-GA ones), but it is encouraged that you upgrade to the latest version for the best experience.
"},{"location":"guides/fluxga-upgrade/#upgrade","title":"Upgrade","text":"Hosted flux?
If you are using a hosted Flux version, please check with your provider if they support Flux GA before upgrading following this guide. Known hosted Flux providers:
As of writing they do not yet support the new version, so please wait before upgrading to Flux GA.
Below, we'll take you through the multiple steps required to migrate to your system to Flux GA. After each step the cluster will be in a working state, so you can take your time to complete the migration.
ClusterBootstrapConfig
s.Follow the upgrade instructions from the Flux v2.0.0 release notes.
At minimum, you'll need to rerun the flux bootstrap
command on your leaf clusters and management clusters.
You'll also need to bump API versions in your manifests to v1
as described in the Flux upgrade instructions:
Bumping the APIs version in manifests can be done gradually. It is advised to not delay this procedure as the beta versions will be removed after 6 months.
At this stage all clusters are running Flux GA.
"},{"location":"guides/fluxga-upgrade/#2-upgrade-to-flux-ga-in-clusterbootstrapconfigs","title":"2. Upgrade to Flux GA in ClusterBootstrapConfigs","text":"First, we ensure any new clusters are bootstrapped with Flux GA. Then we'll upgrade the existing clusters.
ClusterBootstrapConfig
will most often contain an invocation of flux bootstrap
. Make sure the image is using v2
.
diff --git a/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml b/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml\nindex bd41ec036..1b21df860 100644\n--- a/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml\n+++ b/tools/dev-resources/user-guide/cluster-bootstrap-config.yaml\n@@ -1,34 +1,34 @@\napiVersion: capi.weave.works/v1alpha1\nkind: ClusterBootstrapConfig\nmetadata:\nname: capi-gitops\nnamespace: default\nspec:\nclusterSelector:\n matchLabels:\n weave.works/capi: bootstrap\njobTemplate:\n generateName: \"run-gitops-{{ .ObjectMeta.Name }}\"\n spec:\n containers:\n- - image: ghcr.io/fluxcd/flux-cli:v0.34.0\n+ - image: ghcr.io/fluxcd/flux-cli:v2.0.0\n name: flux-bootstrap\n ...\n
At this stage, your new bootstrapped clusters will run Flux GA.
"},{"location":"guides/fluxga-upgrade/#3-upgrade-to-latest-wge","title":"3. Upgrade to latest WGE","text":"Use your regular WGE upgrade procedure to bring it to the latest version
At this stage you have Weave GitOps running Flux GA.
"},{"location":"guides/fluxga-upgrade/#4-upgrade-gitopstemplates-gitopssets-and-clusterbootstrapconfigs","title":"4. Upgrade GitOpsTemplates, GitOpsSets, and ClusterBootstrapConfigs","text":"Bumping the APIs version in manifests can be done gradually. We advise against delaying this procedure as the Beta versions will be removed after six months.
"},{"location":"guides/fluxga-upgrade/#gitopstemplate-and-capitemplate","title":"GitOpsTemplate
and CAPITemplate
","text":"Update GitRepository
and Kustomization
CRs in the spec.resourcetemplates
to v1
as described in the flux upgrade instructions.
GitOpsSets
","text":"Update GitRepository
and Kustomization
CRs in the spec.template
of your GitOpsSet
resources to v1
as described in the Flux upgrade instructions.
If you haven't done it yet, plan to update your Kustomization
, GitRepository
and Receiver
resources to v1
, you can also upgrade to the future release of Flux that will drop support for < v1
APIs.
If you find any issues, please let us know via support.
"},{"location":"open-source/aws-marketplace/","title":"AWS Marketplace","text":""},{"location":"open-source/aws-marketplace/#aws-marketplace","title":"AWS Marketplace","text":"Weave GitOps is also available via the AWS Marketplace.
The following steps will allow you to deploy the Weave GitOps product to an EKS cluster via a Helm Chart.
These instructions presume you already have installed kubectl
, eksctl
, helm
and the Helm S3 Plugin.
To deploy the managed Weave GitOps solution, first subscribe to the product on AWS Marketplace.
Note: it may take ~20 minutes for your Subscription to become live and deployable.
"},{"location":"open-source/aws-marketplace/#step-2-configure-an-eks-cluster","title":"Step 2: Configure an EKS Cluster","text":"Create a new EKS ClusterUse an existing EKS ClusterIf you do not have a cluster on EKS, you can use eksctl
to create one.
Copy the contents of the sample file below into cluster-config.yaml
and replace the placeholder values with your settings. See the eksctl
documentation for more configuration options.
---\napiVersion: eksctl.io/v1alpha5\nkind: ClusterConfig\nmetadata:\nname: CLUSTER_NAME # Change this\nregion: REGION # Change this\n\n# This section is required\niam:\nwithOIDC: true\nserviceAccounts:\n- metadata:\nname: wego-service-account # Altering this will require a corresponding change in a later command\nnamespace: flux-system\nroleOnly: true\nattachPolicy:\nVersion: \"2012-10-17\"\nStatement:\n- Effect: Allow\nAction:\n- \"aws-marketplace:RegisterUsage\"\nResource: '*'\n\n# This section will create a single Managed nodegroup with one node.\n# Edit or remove as desired.\nmanagedNodeGroups:\n- name: ng1\ninstanceType: m5.large\ndesiredCapacity: 1\n
Create the cluster:
eksctl create cluster -f cluster-config.yaml\n
In order to use the Weave GitOps container product, your cluster must be configured to run containers with the correct IAM Policies.
The recommended way to do this is via IRSA.
Use this eksctl
configuration below (replacing the placeholder values) to: - Associate an OIDC provider - Create the required service account ARN
Save the example below as oidc-config.yaml
---\napiVersion: eksctl.io/v1alpha5\nkind: ClusterConfig\nmetadata:\nname: CLUSTER_NAME # Change this\nregion: REGION # Change this\n\n# This section is required\niam:\nwithOIDC: true\nserviceAccounts:\n- metadata:\nname: wego-service-account # Altering this will require a corresponding change in a later command\nnamespace: flux-system\nroleOnly: true\nattachPolicy:\nVersion: \"2012-10-17\"\nStatement:\n- Effect: Allow\nAction:\n- \"aws-marketplace:RegisterUsage\"\nResource: '*'\n
eksctl utils associate-iam-oidc-provider -f oidc-config.yaml --approve\neksctl create iamserviceaccount -f oidc-config.yaml --approve\n
"},{"location":"open-source/aws-marketplace/#step-3-fetch-the-service-account-role-arn","title":"Step 3: Fetch the Service Account Role ARN","text":"First retrieve the ARN of the IAM role which you created for the wego-service-account
:
# replace the placeholder values with your configuration\n# if you changed the service account name from wego-service-account, update that in the command\nexport SA_ARN=$(eksctl get iamserviceaccount --cluster <cluster-name> --region <region> | awk '/wego-service-account/ {print $3}')\n\necho $SA_ARN\n# should return\n# arn:aws:iam::<account-id>:role/eksctl-<cluster-name>-addon-iamserviceaccount-xxx-Role1-1N41MLVQEWUOF\n
This value will also be discoverable in your IAM console, and in the Outputs of the Cloud Formation template which created it.
"},{"location":"open-source/aws-marketplace/#step-4-install-weave-gitops","title":"Step 4: Install Weave GitOps","text":"Copy the Chart URL from the Usage Instructions in AWS Marketplace, or download the file from the Deployment template to your workstation.
To be able to log in to your new installation, you need to set up authentication. Create a new file values.yaml
where you set your username, and a bcrypt hash of your desired password, like so:
gitops:\nadminUser:\ncreate: true\nusername: <UPDATE>\npasswordHash: <UPDATE>\n
Then install it:
Using the default Service Account NameUsing a configured Service Account Namehelm install wego <URL/PATH> \\\n--namespace=flux-system \\\n--create-namespace \\\n--set serviceAccountRole=\"$SA_ARN\" \\\n--values ./values.yaml\n
helm install wego <URL/PATH> \\\n--namespace=flux-system \\\n--create-namespace \\\n--set serviceAccountName='<name>' \\\n--set serviceAccountRole=\"$SA_ARN\" \\\n--values ./values.yaml\n
"},{"location":"open-source/aws-marketplace/#step-5-check-your-installation","title":"Step 5: Check your installation","text":"Run the following from your workstation:
kubectl get pods -n flux-system\n# you should see something like the following returned\nflux-system helm-controller-5b96d94c7f-tds9n 1/1 Running 0 53s\nflux-system kustomize-controller-8467b8b884-x2cpd 1/1 Running 0 53s\nflux-system notification-controller-55f94bc746-ggmwc 1/1 Running 0 53s\nflux-system source-controller-78bfb8576-stnr5 1/1 Running 0 53s\nflux-system wego-metering-f7jqp 1/1 Running 0 53s\nflux-system ww-gitops-weave-gitops-5bdc9f7744-vkh65 1/1 Running 0 53s\n
Your Weave GitOps installation is now ready!
"},{"location":"open-source/deploy-oss/","title":"Step 3: Deploy an Application","text":"Now that you have a feel for how to navigate the dashboard, let's deploy a new application. In this section we will use podinfo as our sample web application.
"},{"location":"open-source/deploy-oss/#deploying-podinfo","title":"Deploying podinfo","text":"git clone https://github.com/$GITHUB_USER/fleet-infra\ncd fleet-infra\n
GitRepository
Source for podinfo. This will allow you to use different authentication methods for different repositories.flux create source git podinfo \\\n --url=https://github.com/stefanprodan/podinfo \\\n --branch=master \\\n --interval=30s \\\n --export > ./clusters/management/podinfo-source.yaml\n
More information about GitRepository
is available here.
If you get stuck here, try the ls
command to list your files and directories. If that doesn\u2019t work, try ls -l ./clusters
.
podinfo-source
to your fleet-infra
repositorygit add -A && git commit -m \"Add podinfo source\"\ngit push\n
kustomization
to build and apply the podinfo manifestflux create kustomization podinfo \\\n --target-namespace=flux-system \\\n --source=podinfo \\\n --path=\"./kustomize\" \\\n --prune=true \\\n --interval=5m \\\n --export > ./clusters/management/podinfo-kustomization.yaml\n
podinfo-kustomization
to your fleet-infra
repositorygit add -A && git commit -m \"Add podinfo kustomization\"\ngit push\n
"},{"location":"open-source/deploy-oss/#view-the-application-in-weave-gitops","title":"View the Application in Weave GitOps","text":"Flux will detect the updated fleet-infra
and add podinfo. Navigate back to the dashboard to make sure that the podinfo application appears.
Click on podinfo to find details about the deployment. There should be two pods available.
Info
Podinfo comes with a HorizontalPodAutoscaler, which uses the metrics-server
. We don't use the metrics-server
in this tutorial, but note that it's the reason why HorizontalPodAutoscaler will report as Not ready
in your dashboard. We recommend ignoring the warning.
To customize a deployment from a repository you don\u2019t control, you can use Flux in-line patches. The following example shows how to use in-line patches to change the podinfo deployment.
patches
section as shown below to the field spec of your podinfo-kustomization.yaml
file so it looks like this:---\napiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: podinfo\nnamespace: flux-system\nspec:\ninterval: 60m0s\npath: ./kustomize\nprune: true\nsourceRef:\nkind: GitRepository\nname: podinfo\ntargetNamespace: flux-system\n// highlight-start\npatches:\n- patch: |-\napiVersion: autoscaling/v2beta2\nkind: HorizontalPodAutoscaler\nmetadata:\nname: podinfo\nspec:\nminReplicas: 3\ntarget:\nname: podinfo\nkind: HorizontalPodAutoscaler\n// highlight-end\n
podinfo-kustomization.yaml
changes:git add -A && git commit -m \"Increase podinfo minimum replicas\"\ngit push\n
Suspending updates to a kustomization allows you to directly edit objects applied from a kustomization, without your changes being reverted by the state in Git.
To suspend updates for a kustomization, from the details page, click on the suspend button at the top, and you should see it be suspended:
This shows in the applications view with a yellow warning status indicating it is now suspended
To resume updates, go back to the details page, click the resume button, and after a few seconds reconsolidation will continue.
"},{"location":"open-source/deploy-oss/#delete-podinfo","title":"Delete Podinfo","text":"To delete Podinfo in the GitOps way, run this command from the root of your working directory:
rm ./clusters/management/podinfo-kustomization.yaml\n rm ./clusters/management/podinfo-source.yaml\n git add -A && git commit -m \"Remove podinfo kustomization and source\"\n git push\n
"},{"location":"open-source/deploy-oss/#complete","title":"Complete!","text":"Congratulations \ud83c\udf89\ud83c\udf89\ud83c\udf89
You've now completed the getting started guide. We welcome any and all feedback, so please let us know how we could have made your experience better.
"},{"location":"open-source/install-oss/","title":"Step 1: Install Weave GitOps Open Source on Your Cluster","text":"Tip
These instructions only apply to Weave GitOps Open Source. To install Weave GitOps Enterprise, go here.
This page covers Weave GitOps Open Source installation and is adapted from the Flux - Getting Started guide.
If you haven't already, please check out our Introduction to Weave GitOps page for additional information about Weave GitOps Open Source as well as our Enterprise version.
"},{"location":"open-source/install-oss/#prerequisites","title":"Prerequisites","text":"Before you can install Weave GitOps Open Source, you will need:
fleet-infra
. To create this, follow GitHub\u2019s instructions\u2014using fleet-infra
instead of hello-world
.We also recommend taking a look at the Flux Core Concepts page if you need to brush up on terminology.
"},{"location":"open-source/install-oss/#check-your-clusters-kubernetes-version","title":"Check your Cluster's Kubernetes Version","text":"No matter which version of Weave GitOps you install, you need to have a Kubernetes cluster up and running. We test Weave GitOps against the latest supported Kubernetes releases.
Note that the version of Flux that you use might impose further minimum version requirements.
"},{"location":"open-source/install-oss/#install-flux","title":"Install Flux","text":"Weave GitOps is an extension to Flux. Therefore, it requires that Flux 0.32 or a later version has already been installed on your Kubernetes cluster. Full documentation is available here.
In this section we are going to do the following:
./clusters/my-cluster/
in the repositoryLet's get into it...
"},{"location":"open-source/install-oss/#install-the-flux-cli","title":"Install the Flux CLI","text":"brew install fluxcd/tap/flux\n
To upgrade to the latest version, run this command:
brew upgrade fluxcd/tap/flux\n
We recommend upgrading the CLI before running bootstrap to upgrade the controllers with flux bootstrap
.
Find which version is installed with flux -v
, and use that for flux bootstrap --version=v<CLI-VERSION>
.
With Bash, you can run sudo curl -s https://fluxcd.io/install.sh | sudo FLUX_VERSION=<VERSION> bash
.
Tip
If you want to install an older version of Flux CLI, you can download the binary for your OS from the releases page.
For other installation methods, see the relevant Flux documentation.
"},{"location":"open-source/install-oss/#export-your-credentials","title":"Export your credentials","text":"Ensure your PAT has repo
scope.
export GITHUB_TOKEN=<your-token>\nexport GITHUB_USER=<your-username>\n
"},{"location":"open-source/install-oss/#check-your-kubernetes-cluster","title":"Check your Kubernetes cluster","text":"flux check --pre\n
The output is similar to:
\u25ba checking prerequisites\n\u2714 kubernetes 1.22.2 >=1.20.6\n\u2714 prerequisites checks passed\n
"},{"location":"open-source/install-oss/#install-flux-onto-your-cluster-with-the-flux-bootstrap-command","title":"Install Flux onto your cluster with the flux bootstrap
command","text":"flux bootstrap
creates a flux system
folder in your repository that includes the manifests Flux needs to operate. It also generates a key value pair for Flux to access the repo.
The command below assumes the Git provider is github
. If you would rather use GitLab, change this to gitlab
.
flux bootstrap github \\\n --owner=$GITHUB_USER \\\n --repository=fleet-infra \\\n --branch=main \\\n --path=./clusters/my-cluster \\\n --personal \\\n --components-extra image-reflector-controller,image-automation-controller\n
Full installation documentation, including how to work with other Git providers, is available here.
"},{"location":"open-source/install-oss/#install-the-gitops-cli","title":"Install thegitops
CLI","text":"Weave GitOps includes a command-line interface to help users create and manage resources. The gitops
CLI is currently supported on Mac (x86 and Arm) and Linux, including Windows Subsystem for Linux (WSL). Windows support is a planned enhancement.
There are multiple ways to install the gitops
CLI:
curl --silent --location \"https://github.com/weaveworks/weave-gitops/releases/download/v0.36.0/gitops-$(uname)-$(uname -m).tar.gz\" | tar xz -C /tmp\nsudo mv /tmp/gitops /usr/local/bin\ngitops version\n
brew tap weaveworks/tap\nbrew install weaveworks/tap/gitops\n
"},{"location":"open-source/install-oss/#deploy-weave-gitops","title":"Deploy Weave GitOps","text":"In this section we will:
HelmRelease
and HelmRepository
objects.fleet-infra
repo.git clone https://github.com/$GITHUB_USER/fleet-infra\ncd fleet-infra\n
If you have difficulty saving the YAML to the correct path, run the command mkdir -p ./clusters/my-cluster
.
Run the following command, which will create a HelmRepository
and HelmRelease
to deploy Weave GitOps:
PASSWORD=\"<A new password you create, removing the brackets and including the quotation marks>\"\ngitops create dashboard ww-gitops \\\n --password=$PASSWORD \\\n --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml\n
Warning
This command stores a hash of a password. This is relatively safe for demo and testing purposes, but we strongly recommend using a more secure method of storing secrets (such as Flux's SOPS integration) for production systems.
Our docs on securing access to the dashboard provide additional guidance and alternative login methods.
You will use the password you've just created when you've finished Weave GitOps Open Source installation and are ready to login to the dashboard UI.
Tip
If you need to customize the Weave GitOps Helm release, you can use the --values
CLI flag to supply one or more values files.
weave-gitops-dashboard.yaml
to the fleet-infra
repository","text":"git add -A && git commit -m \"Add Weave GitOps Dashboard\"\ngit push\n
"},{"location":"open-source/install-oss/#validate-that-weave-gitops-and-flux-are-installed","title":"Validate that Weave GitOps and Flux are installed","text":"Note: this wont be instantaneous. Give the Flux controllers a couple of minutes to pull the latest commit.
kubectl get pods -n flux-system\n
You should see something similar to:
NAME READY STATUS RESTARTS AGE\nhelm-controller-5bfd65cd5f-gj5sz 1/1 Running 0 10m\nkustomize-controller-6f44c8d499-s425n 1/1 Running 0 10m\nnotification-controller-844df5f694-2pfcs 1/1 Running 0 10m\nsource-controller-6b6c7bc4bb-ng96p 1/1 Running 0 10m\nww-gitops-weave-gitops-86b645c9c6-k9ftg 1/1 Running 0 5m\n
If you wait for a while and still nothing happens, it might be that your manifests haven\u2019t been exported to the repository. This means that Weave GitOps won't install.
Tip
You can use the Weave GitOps Helm Chart to customize your installation. Find the full Chart reference here.
"},{"location":"open-source/install-oss/#next-steps","title":"Next steps","text":"Now let's explore the Weave GitOps Open Source UI. Then, we'll deploy an application.
"},{"location":"open-source/run-ui-subpath/","title":"Optional: Running the UI on a Subpath","text":""},{"location":"open-source/run-ui-subpath/#running-the-ui-on-a-subpath","title":"Running the UI on a subpath","text":"By default, the UI is served on the root path /
. It is possible to run the UI on a subpath, for example /weave-gitops
. This is useful if you want to run weave-gitops alongside other applications on the same domain.
To run the UI on a subpath, you need to set the --route-prefix
flag on the weave-gitops server. For example, if you want to run the UI on /weave-gitops
, you can set the flag to --route-prefix=/weave-gitops
.
To set the flag we use the additionalArgs
field in the spec.values
section of the weave-gitops HelmRelease
.
spec:\nvalues:\nadditionalArgs:\n- --route-prefix=/weave-gitops\n
"},{"location":"open-source/run-ui-subpath/#ingress","title":"Ingress","text":"Ingress
is a Kubernetes resource that allows you to expose your application to the internet. Please refer to the Kubernetes documentation for more information about a complete Ingress
configuration. It often depends on the Kubernetes provider you are using and your particular setup.
The Weave GitOps Helm chart can generate an Ingress
resource to integrate with the ingress controller you have configured for your cluster. To enable ingress generation set the ingress.enabled
field to true
.
path
field to the same subpath specified in the --route-prefix
flag./
.spec:\nvalues:\ningress:\nenabled: true\nhosts:\n- host: \"\"\npaths:\n- path: /wego # set the path to `/` if you have not set the `--route-prefix` flag \npathType: Prefix\n
See the Helm chart reference for a list of all supported ingress options.
"},{"location":"open-source/ui-oss/","title":"Step 2: Explore the Weave GitOps Open Source UI","text":"The Weave GitOps user interface enables you to manage and view all of your applications in one place. This documentation gives you an overview of the Weave GitOps Open Source UI.
Tip
To check out Weave GitOps Enterprise's UI, which provides an even richer user experience, please contact sales@weave.works.
"},{"location":"open-source/ui-oss/#overview","title":"Overview","text":"A quick preview of what the Weave GitOps Open Source UI provides: * an Applications view that shows summary information from\u00a0Kustomization
\u00a0and\u00a0HelmRelease
\u00a0objects so that you can quickly understand the state of your deployments across a cluster. * a Sources view that shows summary information from\u00a0gitrepository,\u00a0helmrepository\u00a0and\u00a0bucket\u00a0objects and tells you the current status of resources that are synchronizing content from where you\u2019ve declared the desired state of your system\u2014for example, Git repositories. * a Flux Runtime view that provides the status of the GitOps engine that continuously reconciles your desired and live state. It shows your installed GitOps Toolkit Controllers and version. * an Image Automation view that reduces GitOps friction, particularly in non-production environments, by enabling you to discover repositories, policies, and updates on your cluster. Deploy the latest image in a dev or staging environment with minimal fuss, and keep your platform updated with the latest approved versions\u2014for example, patch releases to reduce exposure to CVEs. Auto-deploy when approval is gated before the image is added to an internal registry. * A Notifications View that leverages Flux's notification controller to show which notifications are already configured within the UI. This enables WeGO users to set up and receive notifications from Weave GitOps. Here you can find the list of providers. If you\u2019re a platform operator, this view will help you to understand your egress topology across clusters so you\u2019ll know where events are being sent beyond your clusters. * multiple views for debugging. * a dark mode option.
It also enables you to: * sync your latest Git commits directly from the UI * leverage Kubernetes RBAC to control permissions in the dashboard
Let's dive in.
"},{"location":"open-source/ui-oss/#login-to-the-gitops-dashboard","title":"Login to the GitOps Dashboard","text":"First, expose the service running on the cluster with this command:
kubectl port-forward svc/ww-gitops-weave-gitops -n flux-system 9001:9001\n
Next, open the dashboard and login using either the emergency cluster user or OIDC, based on your configuration. (Note: The same directions for WGE apply to OSS for this step.) If you followed the example above, the emergency user will be configured with the username set to admin
. This means that you can use \u201cadmin\u201d as your user name, and the password that you set earlier during installation as $PASSWORD
.
The label of the OIDC button on the login screen is configurable via a feature flag environment variable. This can give your users a more familiar experience when logging in.
Adjust the configuration in the Helm values.yaml
file or the spec.values
section of the Weave GitOps HelmRelease
resource:
envVars:\n- name: WEAVE_GITOPS_FEATURE_OIDC_BUTTON_LABEL\nvalue: \"Login with ACME\"\n
"},{"location":"open-source/ui-oss/#the-applications-view","title":"The Applications View","text":"Upon login you're taken to the Applications view, which allows you to quickly understand the state of your deployments and shows summary information from Kustomization
and HelmRelease
objects. You can apply dark mode using the toggle switch in the top right corner.
In the above screenshot you can see: - two Kustomizations
called podinfo
and canaries
corresponding to the applications with the same names. The source referenced by podinfo
is shipping-service-podinfo
which has been verified whereas the one referenced by canaries
does not have verification set up. - three HelmReleases
called weave-gitops-enterprise
, tf-controller
and podinfo
which deploys the respective Helm Charts.
The table view shows you the reported status so you can understand whether a reconciliation has been successful, and when it was last updated. You can also see where the Flux objects are deployed, which Source
object they are reconciling from and whether or not that Source
is verified (this requires verification to have been set up for the source). Clicking the name of the Source will take you to a detail view for the given Source object. The view automatically updates every few seconds so you know the current state of your system.
Tip
For more information about Sources, please take a look at the Flux documentation.
For information on Source verification, you can check: - Flux documentation - GitRepository verification - Flux documentation - OCIRepository verification
If verification is not set up for the repository, this will appear blank in the UI.
More actions you can take: * Click the magnifying glass icon to search for and filter objects by Name
. * Filter by Type
by clicking the strawberry icon to its right. * Click the Name
of an object to get a detailed view for the given Kustomization
or HelmRelease
. (You'll see this again in the Sources view.) * In the main Applications view, you can use the checkbox to the left of your listed applications to select them and perform actions from the actions menu at the top. These actions are Sync (reconcile), Suspend, and Resume, and they affect Flux resources.
Let's explore the flux-system
Kustomization. Navigate back to the Applications
view, and click on the flux-system
object.
It might take a few moments for the data to load. Once it does, you should get a result that resembles the above screenshot. Here you can find key information about how the resource is defined: * which Source
it is reading from * the latest applied commit * the exact path with the Source repository that is being deployed * the Interval
where Flux will look to reconcile any differences between the declared and live state. For example, if a kubectl
patch has been applied on the cluster, it will effectively be reverted. If a longer error message is reported by this object, you'll be able to see it in its entirety on this page.
Underneath the summary information you'll find:
kustomization
.Events tab
Reconciliation Graph tab
Yaml tab
"},{"location":"open-source/ui-oss/#the-sources-view","title":"The Sources View","text":"In the left-hand menu of the UI, click on the Sources view. This will show you where Flux pulls its application definitions from\u2014for example, Git repositories\u2014and the current state of that synchronization. Sources shows summary information from GitRepository
, HelmRepository
, HelmChart
, and Bucket
objects.
In the above screenshot you can see: - a GitRepository
called shipping-service-podinfo
- an OCIRepository
called podinfo-oci
These have both had verification set up on them which has been completed successfully.
The Sources table view displays information about status so that you can see whether Flux has been able to successfully pull from a given source, and which specific commit was last detected. It shows you key information like the Interval
\u2014namely, how frequently Flux will check for updates in a given source location. You can also see whether or not that source is verified (if this is something that you have set up for the specific source).
Actions you can take: * Apply filtering as you did the Applications view. * Click a URL
to navigate to a given source\u2014i.e. a repository in GitHub\u2014or the Name
of a Source
to view more details about it.
Go back to the Details tab, and click GitRepository/flux-system
from the summary at the top of the page.
As with an Application detail view, you can see key information about how the resource is defined.
"},{"location":"open-source/ui-oss/#the-image-automation-view","title":"The Image Automation View","text":"Maybe you're an app developer who wants to deploy the latest image in a dev/staging environment with as minimal fuss as possible and reduce GitOps friction. Or you might be a platform engineer who wants to keep your platform up-to-date with the latest approved versions\u2014for example, patch releases to reduce exposure to CVEs\u2014or auto-deploy when approval is gated before adding an image to an internal registry. The Image Automation view can help.
WeGO's Image Automation view allows users to configure automatic updates to their workloads based on the detection of a new image tag in a repository. For application developers, this means faster deployments and shorter feedback cycles to easily verify changes to an application in a Kubernetes environment. The view still supports GitOps workflows as the changes are committed back to Git\u2014either to the branch already reconciled by Flux, or to an alternative branch so that a Pull Request can be generated and peer review can occur.
Image Automation refers to Flux's ability to update the image tag specified in a manifest based on detection of a newer image and automatically deploy to a cluster. It involves three required objects\u2014ImageRepositories, ImagePolicies, and ImageUpdateAutomations\u2014which WeGO OSS users can discover on their clusters. Users can also view object details either through a YAML-like view, as we do for most non-Flux objects, or a details view. The UI makes it possible to suspend or resume ImageRepositories and ImageUpdateAutomations so that Flux stops looking for new updates or committing these to Git. Also, the UI shows whether all required resources are configured and assists with Image Policy to show the latest image.
ImageRepositories, ImagePolicies, and ImageUpdateAutomations are used by Flux's Image Automation Controllers. The Image Reflector controller and the Image Automation controller work together to update a Git repository when new container images are available. In WeGO OSS, if the image-reflector-controller and or image-automation-controller are not installed on a cluster, a warning message will display.
If you make a mistake configuring one of the resources, you can use WeGO to easily trace from the Image Repository scan, see whether it is able to select the image based on the Image Policy, and detect whether an Image Update has successfully run. This provides greater visibility into the machinery provided by Flux and enables quicker troubleshooting than what's possible by hunting via the Flux CLI. App devs can triage issues without depending on their platform teams.
"},{"location":"open-source/ui-oss/#the-flux-runtime-view","title":"The Flux Runtime View","text":"Let's go back to the left-hand menu of the UI and click on Flux Runtime
. This view provides information on the GitOps engine, which continuously reconciles your desired and live state, and helps users to know which apiVersion to use in manifests. It comes with two tabs: one for controllers, and other for custom resource definitions (CRDs).
The Controllers tab shows your installed GitOps Toolkit Controllers and their version.
By default, flux bootstrap
will install the following controllers: - helm-controller - kustomize-controller - notification-controller - source-controller
From this view you can see whether the controllers are healthy and which version of a given component is currently deployed.
"},{"location":"open-source/ui-oss/#crds","title":"CRDs","text":"The CRD tab lists the custom resources that the GitOps Toolkit Controllers use. This allows you to see which resources you will be able to create.
"},{"location":"open-source/ui-oss/#moving-on","title":"Moving On","text":"Now that we are familiar with the dashboard, let's deploy a new application .
"},{"location":"pipelines/","title":"Pipelines ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
Weave GitOps Enterprise Pipelines enables teams to increase the velocity, stability, and security of software systems via automated deployment pipelines. It provides insights into new application versions that are being rolled out across clusters and environments, which allows you to implement security guardrails and track metrics to assess if the application is working as desired. In instances of failures, the change is abandoned with an automatic rollout of the older version.
With Pipelines, you define a release pipeline for a given application as a custom resource. The pipeline can comprise any number of environments through which an application is expected to be deployed. Push a change to your application in your dev environment, for example, and watch the update roll out across staging and production environments all from a single PR (or an external process like Jenkins)\u2014with Weave GitOps Enterprise orchestrating everything.
Designed with flexibility in mind, Pipelines can be easily integrated within your existing CI setup\u2014for example, CircleCI, Jenkins, Tekton, or GitHub Actions.
"},{"location":"pipelines/#benefits-to-developers","title":"Benefits to Developers","text":"The Pipelines feature: - reduces toil and errors when setting up a new pipeline or reproducing previous pipelines through YAML constructs - saves time and overhead with automated code rollout from one environment to another, with minimal intervention from the Ops team - enables users to observe code progression and track application versions through different environments from the Weave GitOps UI - streamlines code deployment from one environment to another, and minimizes friction between application development and Ops teams - enables you to easily define which Helm charts are part of the environments you create\u2014saving lots of time through automated package management
Now that you know what delivery pipelines can do for you, follow the guide to get started.
"},{"location":"pipelines/authorization/","title":"Authorization ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
To view pipelines, users need read access to the pipeline
resource and the underlying application
resources. This sample configuration shows a recommended way to configure RBAC to provide such access. The pipeline-reader
role and the search-pipeline-reader
role-binding allow a group search-developer
to access pipeline resources within the search
namespace.
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: pipeline-reader\nrules:\n- apiGroups: [ \"pipelines.weave.works\" ]\nresources: [ \"pipelines\" ]\nverbs: [ \"get\", \"list\", \"watch\"]\n- apiGroups: [\"helm.toolkit.fluxcd.io\"]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\", \"watch\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: search-pipeline-reader\nnamespace: search\nsubjects:\n- kind: Group\nname: search-developer\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: pipeline-reader\napiGroup: rbac.authorization.k8s.io\n
"},{"location":"pipelines/pipelines-getting-started/","title":"Getting Started with Pipelines ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
"},{"location":"pipelines/pipelines-getting-started/#prerequisites","title":"Prerequisites","text":"Before using Pipelines, please ensure that: - You have Weave GitOps Enterprise installed on a cluster. - You have configured Weave GitOps Enterprise RBAC for Pipelines. - The Pipelines feature flag enablePipelines
has been enabled. This flag is part of the Weave GitOps Enterprise Helm chart values and is enabled by default. - Any leaf clusters running workloads that you need to visualise using Pipelines have been added to Weave GitOps Enterprise. - You have exposed the promotion webhook on the management cluster and leaf clusters can reach that webhook endpoint over the network.
A pipeline allows you to define the route your application is taking, so that you can get it to production. Three main concepts are at play: - the application
to deliver - the environments
that your app will go through on its way to production (general). An environment describes the different stages of a pipeline and consists of one or more deployment targets. - the deployment targets
, the clusters that each environment has. A deployment target consists of a namespace and a GitOpsCluster
reference and is used to specify where the application is running in your fleet.
You can define a delivery pipeline using a Pipeline
custom resource. An example of such a CR is shown here:
---\napiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: podinfo-02\nnamespace: flux-system\nspec:\nappRef:\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nname: podinfo\nenvironments:\n- name: dev\ntargets:\n- namespace: podinfo-02-dev\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- name: test\ntargets:\n- namespace: podinfo-02-qa\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- namespace: podinfo-02-perf\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- name: prod\ntargets:\n- namespace: podinfo-02-prod\nclusterRef:\nkind: GitopsCluster\nname: prod\nnamespace: flux-system\n
In the example above, the podinfo
application is delivered to a traditional pipeline composed of dev
, test
, and prod
environments. In this case, the test
environment consists of two deployment targets, qa
and perf
. This is to indicate that, although both targets are part of the same stage (testing), they can evolve separately and may run different versions of the application. Note that two clusters, dev
and prod
, are used for the environments; both are defined in the flux-system
namespace.
For more details about the spec of a pipeline, go here.
"},{"location":"pipelines/pipelines-getting-started/#view-your-list-of-pipelines","title":"View Your List of Pipelines","text":"Once Flux has reconciled your pipeline, you can navigate to the Pipelines view in the WGE UI to see the list of pipelines to which you have access.
For each pipeline, the WGE UI shows a simplified view with the application Type
and Environments
it goes through.
Once you have selected a pipeline from the list, navigate to its details view where you can see the current status of your application by environment and deployment target.
"},{"location":"pipelines/pipelines-templates/","title":"Using GitOpsTemplates for Pipelines ENTERPRISE","text":"To create new Pipelines and their required resources from within Weave GitOps Enterprise, you can leverage GitOpsTemplates, which help platform teams scale for developer self-service.
This document provides example configuration that you can adapt and use within your own organization, based on your tenancy model.
We will cover the creation of:
Secrets, required for authentication and authorization between leaf and management clusters as well as to Git, are out of scope for this document and must be handled by your chosen secret management solution.
For advice on Secrets Management, refer to the Flux guide.
Templates can include a single resource or multiple resources, depending on your use case. For example, you may want to only create the Pipeline custom resource to associate existing HelmReleases. Or, you can create the HelmReleases, notification controller resources, and Pipeline all in a single template. They are highly customizable to meet the needs of your teams.
"},{"location":"pipelines/pipelines-templates/#adding-new-resources-from-within-the-weave-gitops-enterprise-dashboard","title":"Adding New Resources From Within the Weave GitOps Enterprise Dashboard","text":"GitOpsTemplates are custom resources installed onto a management cluster where Weave GitOps Enterprise resides. To add a new Pipeline, click Create a Pipeline
from within the Pipeline view. This will take you to a pre-filtered list of templates with the label: weave.works/template-type: pipeline
.
The Templates
view (shown below) lists all templates for which a given user has the appropriate permission to view. You can install GitOpsTemplates into different namespaces and apply standard Kubernetes RBAC to limit which teams can utilize which templates. You can also configure policy to enforce permitted values within a template.
This section provides examples to help you build your own templates for Pipelines.
"},{"location":"pipelines/pipelines-templates/#pipeline-visualization-only","title":"Pipeline: Visualization Only","text":"Included Sample
This default template is shipped with Weave GitOps Enterprise to help you get started with Pipelines.
For flexibility, this allows the template user to specify the names of the clusters where the application is deployed, and to vary the namespace per cluster. This works even in a tenancy model where environments coexist on the same cluster and use namespaces for isolation.
Expand to view example template---\napiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: pipeline-sample\nnamespace: default # Namespace where the GitOpsTemplate is installed, consider that a team will need READ access to this namespace and the custom resource\nlabels:\nweave.works/template-type: pipeline\nspec:\ndescription: Sample Pipeline showing visualization of two helm releases across two environments.\nparams:\n- name: RESOURCE_NAME # This is a required parameter name to enable Weave GitOps to write to your Git Repository\ndescription: Name of the Pipeline\n- name: RESOURCE_NAMESPACE\ndescription: Namespace for the Pipeline on the management cluster\ndefault: flux-system # default values make it easier for users to fill in a template\n- name: FIRST_CLUSTER_NAME\ndescription: Name of GitopsCluster object for the first environment\n- name: FIRST_CLUSTER_NAMESPACE\ndescription: Namespace where this object exists\ndefault: default\n- name: FIRST_APPLICATION_NAME\ndescription: Name of the HelmRelease for your application in the first environment\n- name: FIRST_APPLICATION_NAMESPACE\ndescription: Namespace for this application\ndefault: flux-system\n- name: SECOND_CLUSTER_NAME\ndescription: Name of GitopsCluster object for the second environment\n- name: SECOND_CLUSTER_NAMESPACE\ndescription: Namespace where this object exists\ndefault: default\n- name: SECOND_APPLICATION_NAME\ndescription: Name of the HelmRelease for your application in the second environment\n- name: SECOND_APPLICATION_NAMESPACE\ndescription: Namespace for this application\ndefault: flux-system\nresourcetemplates:\n- content:\n- apiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: ${RESOURCE_NAMESPACE}\nspec:\nappRef:\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nname: ${APPLICATION_NAME}\nenvironments:\n- name: First-Environment\ntargets:\n- namespace: ${FIRST_APPLICATION_NAMESPACE}\nclusterRef:\nkind: GitopsCluster\nname: ${FIRST_CLUSTER_NAME}\nnamespace: ${FIRST_CLUSTER_NAMESPACE}\n- name: Second-Environment\ntargets:\n- namespace: ${SECOND_APPLICATION_NAMESPACE}\nclusterRef:\nkind: GitopsCluster\nname: ${SECOND_CLUSTER_NAME}\nnamespace: ${SECOND_CLUSTER_NAMESPACE}\n
"},{"location":"pipelines/pipelines-templates/#pipeline-multi-cluster-promotion","title":"Pipeline - Multi-Cluster Promotion","text":"This example extends the above to add a promotion strategy. In this case, it will raise a pull request to update the application version in subsequent environments.
Expand to view example template---\napiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: pipeline-sample\nnamespace: default\nlabels:\nweave.works/template-type: pipeline\nspec:\ndescription: Sample Pipeline showing visualization of two helm releases across two environments.\nparams:\n- name: RESOURCE_NAME\ndescription: Name of the Pipeline\n- name: RESOURCE_NAMESPACE\ndescription: Namespace for the Pipeline on the management cluster\ndefault: flux-system\n- name: FIRST_CLUSTER_NAME\ndescription: Name of GitopsCluster object for the first environment\n- name: FIRST_CLUSTER_NAMESPACE\ndescription: Namespace where this object exists\ndefault: default\n- name: FIRST_APPLICATION_NAME\ndescription: Name of the HelmRelease for your application in the first environment\n- name: FIRST_APPLICATION_NAMESPACE\ndescription: Namespace for this application\ndefault: flux-system\n- name: SECOND_CLUSTER_NAME\ndescription: Name of GitopsCluster object for the second environment\n- name: SECOND_CLUSTER_NAMESPACE\ndescription: Namespace where this object exists\ndefault: default\n- name: SECOND_APPLICATION_NAME\ndescription: Name of the HelmRelease for your application in the second environment\n- name: SECOND_APPLICATION_NAMESPACE\ndescription: Namespace for this application\ndefault: flux-system\n- name: APPLICATION_REPO_URL\ndescription: URL for the git repository containing the HelmRelease objects\n- name: APPLICATION_REPO_BRANCH\ndescription: Branch to update with new version\n- name: GIT_CREDENTIALS_SECRET\ndescription: Name of the secret in RESOURCE_NAMESPACE containing credentials to create pull requests\nresourcetemplates:\n- content:\n- apiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: ${RESOURCE_NAMESPACE}\nspec:\nappRef:\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nname: ${APPLICATION_NAME}\nenvironments:\n- name: First-Environment\ntargets:\n- namespace: ${FIRST_APPLICATION_NAMESPACE}\nclusterRef:\nkind: GitopsCluster\nname: ${FIRST_CLUSTER_NAME}\nnamespace: ${FIRST_CLUSTER_NAMESPACE}\n- name: Second-Environment\ntargets:\n- namespace: ${SECOND_APPLICATION_NAMESPACE}\nclusterRef:\nkind: GitopsCluster\nname: ${SECOND_CLUSTER_NAME}\nnamespace: ${SECOND_CLUSTER_NAMESPACE}\npromotion:\npull-request:\nurl: ${APPLICATION_REPO_URL}\nbaseBranch: ${APPLICATION_REPO_BRANCH}\nsecretRef:\nname: ${GIT_CREDENTIALS_SECRET}\n
"},{"location":"pipelines/pipelines-templates/#git-credentials","title":"Git Credentials","text":"For guidance on configuring credentials, find instructions in the Promoting Applications documentation.
"},{"location":"pipelines/pipelines-templates/#promotion-marker-added-to-helmrelease-in-second-environment","title":"Promotion Marker Added to HelmRelease inSecond-Environment
","text":"You must add a comment to the HelmRelease or Kustomization patch where the spec.chart.spec.version
is defined. For example, if the values used in the above template were as follows:
RESOURCE_NAME=my-app\nRESOURCE_NAMESPACE=pipeline-01\n
Then the marker would be:
# {\"$promotion\": \"pipeline-01:my-app:Second-Environment\"}\n
Find more guidance on adding markers here.
"},{"location":"pipelines/pipelines-templates/#alerts-and-providers","title":"Alerts and Providers","text":"This example shows you how you can configure multiple resources in a single template and simplify creation through common naming strategies. The notification controller communicates update events from the leaf clusters where applications are deployed to the management cluster, where the Pipeline Controller resides and orchestrates.
For the Alert
, this template filters events to detect when an update has occurred. Depending on your use case, you can use different filtering.
For the Provider
, this template uses authenticated (HMAC) communication to the promotion endpoint, where a secret must be present on both the management cluster and the leaf cluster(s). For simplicity's sake, you can use a generic
provider instead; this will not require the secret.
---\napiVersion: templates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: pipeline-notification-resources\nnamespace: default\nlabels:\nweave.works/template-type: application # These are generic Flux resources rather than Pipeline-specific\nspec:\ndescription: Creates flux notification controller resources for a cluster, required for promoting applications via pipelines.\nparams:\n- name: RESOURCE_NAME\ndescription: Name for the generated objects, should match the target Application (HelmRelease) name.\n- name: RESOURCE_NAMESPACE\ndescription: Namespace for the generated objects, should match the target Application (HelmRelease) namespace.\n- name: PROMOTION_HOST\ndescription: Host for the promotion webhook on the management cluster, i.e. \"promotions.example.org\"\n- name: SECRET_REF\ndescription: Name of the secret containing HMAC key in the token field\n- name: ENV_NAME\ndescription: Environment the cluster is a part of within a pipeline.\nresourcetemplates:\n- content:\n- apiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Provider\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: ${RESOURCE_NAMESPACE}\nspec:\naddress: http://${PROMOTION_HOST}/promotion/${APP_NAME}/${ENV_NAME}\ntype: generic-hmac\nsecretRef: ${SECRET_REF}\n- apiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Alert\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: ${RESOURCE_NAMESPACE}\nspec:\nproviderRef:\nname: ${RESOURCE_NAME}\neventSeverity: info\neventSources:\n- kind: HelmRelease\nname: ${RESOURCE_NAME}\nexclusionList:\n- \".*upgrade.*has.*started\"\n- \".*is.*not.*ready\"\n- \"^Dependencies.*\"\n
s
"},{"location":"pipelines/pipelines-templates/#summary","title":"Summary","text":"GitOpsTemplates provide a highly flexible way for platform and application teams to work together with Pipelines.
You can hard-code values, offer a range of accepted values, or allow the template consumer to provide input based on your organization's requirements.
Templates are subject to RBAC as with any Kubernetes resource, enabling you to easily control which tenants have access to which templates.
For full details on GitOpsTemplates, read our documentation.
"},{"location":"pipelines/pipelines-with-jenkins/","title":"Setting Up Pipelines to Notify a Jenkins Webhook ENTERPRISE","text":"Using Flux's Notification Controller, a Jenkins Webhook can be invoked on Pipeline promotion events.
"},{"location":"pipelines/pipelines-with-jenkins/#configuring-jenkins","title":"Configuring Jenkins","text":"To enable external callers to trigger a build on a job, an additional \"Generic Webhook Trigger\" plugin is required as Jenkins does not have this functionality built-in.
After the plugin is installed a new \"Generic Webhook Trigger\" job configuration option is available.
The only mandatory field is the \"Token\". Without this token, Jenkins will not know which build should be triggered.
"},{"location":"pipelines/pipelines-with-jenkins/#post-content-parameters","title":"Post content parameters","text":"To access fields from the pipeline event payload, each field has to be defined as a \"Post content parameters\".
Expand to see an example Promotion Event payload{\n\"involvedObject\": {\n\"kind\": \"Pipeline\",\n\"namespace\": \"flux-system\",\n\"name\": \"podinfo-pipeline\",\n\"uid\": \"74d9e3b6-0269-4c12-9051-3ce8cfb7886f\",\n\"apiVersion\": \"pipelines.weave.works/v1alpha1\",\n\"resourceVersion\": \"373617\"\n},\n\"severity\": \"info\",\n\"timestamp\": \"2023-02-08T12:34:13Z\",\n\"message\": \"Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5\",\n\"reason\": \"Promote\",\n\"reportingController\": \"pipeline-controller\",\n\"reportingInstance\": \"chart-pipeline-controller-8549867565-7822g\"\n}\n
"},{"location":"pipelines/pipelines-with-jenkins/#configure-notification-provider","title":"Configure Notification Provider","text":"In order to be able to invoke a generic webhook, a notification provider has to be defined. Jenkins expects the secret token which you configured above as a GET parameter or in the request header. The secret token can be stored in a Secret:
apiVersion: v1\nkind: Secret\ntype: Opaque\nmetadata:\nname: jenkins-token\nnamespace: podinfo\nstringData:\nheaders: |\ntoken: epicsecret\n
Now we can define a Notification Provider using this secret:
apiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Provider\nmetadata:\nname: jenkins-promotion\nnamespace: podinfo\nspec:\ntype: generic\naddress: https://jenkins.domain.tld/generic-webhook-trigger/invoke\nsecretRef:\nname: jenkins-token\n
"},{"location":"pipelines/pipelines-with-jenkins/#set-up-alerts","title":"Set Up Alerts","text":"We can configure an Alert to use the jenkins-promotion
provider. For example an Alert for the podinfo-pipeline
in the flux-system
namespace:
apiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Alert\nmetadata:\nname: podinfo-pipeline-promotion\nnamespace: podinfo\nspec:\neventSeverity: info\neventSources:\n- kind: Pipeline\nname: podinfo-pipeline\nnamespace: flux-system\nproviderRef:\nname: jenkins-promotion\n
"},{"location":"pipelines/pipelines-with-tekton/","title":"Setting up Pipelines to Trigger a Tekton Pipeline ENTERPRISE","text":"Using Flux's Notification Controller, a Tekton EventListener can be triggered on Pipeline promotion events.
"},{"location":"pipelines/pipelines-with-tekton/#configuring-tekton-pipelines","title":"Configuring Tekton Pipelines","text":""},{"location":"pipelines/pipelines-with-tekton/#tekton-tasks","title":"Tekton Tasks","text":"In this tutorial, we have two tasks to demonstrate how to use parameter values from the Pipeline event payload. Both tasks print out messages with information about the pipeline promotion. Each task has three parameters: name
, namespace
, and message
.
---\napiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\nname: hello\nnamespace: ww-pipeline\nspec:\nparams:\n- name: name\ntype: string\n- name: namespace\ntype: string\n- name: message\ntype: string\nsteps:\n- name: echo\nimage: alpine\nscript: |\n#!/bin/sh\necho \"Hello $(params.namespace)/$(params.name)!\"\necho \"Message: $(params.message)\"\n---\napiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\nname: goodbye\nnamespace: ww-pipeline\nspec:\nparams:\n- name: name\ntype: string\n- name: namespace\ntype: string\n- name: message\ntype: string\nsteps:\n- name: goodbye\nimage: ubuntu\nscript: |\n#!/bin/bash\necho \"Goodbye $(params.namespace)/$(params.name)!\"\necho \"Message: $(params.message)\"\n
"},{"location":"pipelines/pipelines-with-tekton/#tekton-pipeline","title":"Tekton Pipeline","text":"The hello-goodbye
Tekton Pipeline has the same three parameters as the tasks and it passes down the values to them.
---\napiVersion: tekton.dev/v1beta1\nkind: Pipeline\nmetadata:\nname: hello-goodbye\nnamespace: ww-pipeline\nspec:\nparams:\n- name: name\ntype: string\n- name: namespace\ntype: string\n- name: message\ntype: string\ntasks:\n- name: hello\ntaskRef:\nname: hello\nparams:\n- name: namespace\nvalue: $(params.namespace)\n- name: name\nvalue: $(params.name)\n- name: message\nvalue: $(params.message)\n- name: goodbye\nrunAfter:\n- hello\ntaskRef:\nname: goodbye\nparams:\n- name: namespace\nvalue: $(params.namespace)\n- name: name\nvalue: $(params.name)\n- name: message\nvalue: $(params.message)\n
"},{"location":"pipelines/pipelines-with-tekton/#configuring-tekton-pipline-automation","title":"Configuring Tekton Pipline Automation","text":"In order to be able to trigger a Pipeline from an external source, we need three Tekton resources.
TriggerBinding
: This resource binds the incoming JSON message to parameter variables.TriggerTemplate
: This resource is the template of the PipelineRun
that will be started.EventListener
: This resource glues the above two resources together and creates an http listener service.A JSON payload from the Notification Service about a Pipeline promotion looks like this:
{\n\"involvedObject\": {\n\"kind\": \"Pipeline\",\n\"namespace\": \"flux-system\",\n\"name\": \"podinfo-pipeline\",\n\"uid\": \"74d9e3b6-0269-4c12-9051-3ce8cfb7886f\",\n\"apiVersion\": \"pipelines.weave.works/v1alpha1\",\n\"resourceVersion\": \"373617\"\n},\n\"severity\": \"info\",\n\"timestamp\": \"2023-02-08T12:34:13Z\",\n\"message\": \"Promote pipeline flux-system/podinfo-pipeline to prod with version 6.1.5\",\n\"reason\": \"Promote\",\n\"reportingController\": \"pipeline-controller\",\n\"reportingInstance\": \"chart-pipeline-controller-8549867565-7822g\"\n}\n
In our tasks, we are using only the involvedObject.name
, involvedObject.namespace
and message
fields:
---\napiVersion: triggers.tekton.dev/v1beta1\nkind: TriggerBinding\nmetadata:\nname: ww-pipeline-binding\nnamespace: ww-pipeline\nspec:\nparams:\n- name: namespace\nvalue: $(body.involvedObject.namespace)\n- name: name\nvalue: $(body.involvedObject.name)\n- name: message\nvalue: $(body.message)\n
"},{"location":"pipelines/pipelines-with-tekton/#tekton-triggertemplate","title":"Tekton TriggerTemplate","text":"The template has the same parameters as the Pipeline
resources:
---\napiVersion: triggers.tekton.dev/v1beta1\nkind: TriggerTemplate\nmetadata:\nname: ww-pipeline-template\nnamespace: ww-pipeline\nspec:\nparams:\n- name: namespace\ndefault: \"Unknown\"\n- name: name\ndefault: \"Unknown\"\n- name: message\ndefault: \"no message\"\nresourcetemplates:\n- apiVersion: tekton.dev/v1beta1\nkind: PipelineRun\nmetadata:\ngenerateName: hello-goodbye-run-\nspec:\npipelineRef:\nname: hello-goodbye\nparams:\n- name: name\nvalue: $(tt.params.name)\n- name: namespace\nvalue: $(tt.params.namespace)\n- name: message\nvalue: $(tt.params.message)\n
"},{"location":"pipelines/pipelines-with-tekton/#tekton-eventlistener","title":"Tekton EventListener","text":"To access all required resources, we need an extra service account:
---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: tekton-ww-pipeline-robot\nnamespace: ww-pipeline\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: triggers-example-eventlistener-binding\nnamespace: ww-pipeline\nsubjects:\n- kind: ServiceAccount\nname: tekton-ww-pipeline-robot\nnamespace: ww-pipeline\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: ClusterRole\nname: tekton-triggers-eventlistener-roles\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: triggers-example-eventlistener-clusterbinding\nsubjects:\n- kind: ServiceAccount\nname: tekton-ww-pipeline-robot\nnamespace: ww-pipeline\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: ClusterRole\nname: tekton-triggers-eventlistener-clusterroles\n
With this ServiceAccount
, we can create the EventListener
using the TriggerBinding
and TriggerTemplate
:
---\napiVersion: triggers.tekton.dev/v1beta1\nkind: EventListener\nmetadata:\nname: ww-pipeline-listener\nnamespace: ww-pipeline\nspec:\nserviceAccountName: tekton-ww-pipeline-robot\ntriggers:\n- name: ww-pipeline-trigger\nbindings:\n- ref: ww-pipeline-binding\ntemplate:\nref: ww-pipeline-template\n
At this point, we should have a Service
for our EventListener
.
\u276f kubectl get service -n ww-pipeline\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nel-ww-pipeline-listener ClusterIP 10.96.250.23 <none> 8080/TCP,9000/TCP 3d\n
"},{"location":"pipelines/pipelines-with-tekton/#configure-notification-provider","title":"Configure Notification Provider","text":"In this case, we are using Tekton in the same cluster, so we can use an internal address to access the EventListener
service. If they are not in the same cluster, exposing the service may be required through an ingress or a service mesh.
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Provider\nmetadata:\nname: tekton-promotion\nnamespace: hello-podinfo\nspec:\ntype: generic\naddress: http://el-ww-pipeline-listener.ww-pipeline:8080/\n
"},{"location":"pipelines/pipelines-with-tekton/#set-up-alerts","title":"Set Up Alerts","text":"We can configure an Alert to use the tekton-promotion
provider. For example, an Alert for the podinfo-pipeline
in the flux-system
namespace:
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Alert\nmetadata:\nname: tekton-promotion-podinfo\nnamespace: hello-podinfo\nspec:\neventSeverity: info\neventSources:\n- kind: Pipeline\nname: hello-podinfo\nnamespace: flux-system\nproviderRef:\nname: tekton-promotion\n
"},{"location":"pipelines/promoting-applications/","title":"Promoting applications through pipeline environments","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
Pipelines allow you to configure automatic promotions of applications through a consecutive set of environments, e.g. from dev to staging to production. The environments are defined in the Pipeline
resource itself so that each pipeline governs a single application and all the environments to which it is deployed.
Info
At the moment only applications defined as Flux HelmReleases
are supported in automatic promotions.
An example of a pull request for an application promotion
The Getting Started Guide describes how to create a basic pipeline for an application so you can visualize its deployments across a series of environments. You may also configure a pipeline in order to promote applications across a series of environments. There are currently two supported strategies for application promotions:
Before configuring any of the above promotion strategies, you need to setup notifications from all your environments so that whenever a new version gets deployed, the promotion webhook component of the pipeline controller is notified and takes an action based on the pipeline definition. The rest of this guide describes the configuration needed to setup application promotion via pipelines.
"},{"location":"pipelines/promoting-applications/#expose-the-promotion-webhook","title":"Expose the promotion webhook","text":"Applications deployed in leaf clusters use the Flux notification controller running on each leaf cluster, to notify the management cluster of a successful promotion. This requires network connectivity to be established between the leaf cluster and the management cluster.
The component responsible for listening to incoming notifications from leaf clusters is the pipeline controller. It hosts a webhook service that needs to be exposed via an ingress resource to make it available for external calls. Exposing the webhook service is done via the Weave GitOps Enterprise Helm chart values and the configuration used depends on your environment. The example below shows the configuration for NGINX ingress controller and needs to be adjusted if another ingress controller is used:
spec:\nvalues:\nenablePipelines: true\npipeline-controller:\npromotion:\ningress:\nenabled: true\nclassName: nginx\nannotations:\ncert-manager.io/cluster-issuer: letsencrypt\nhosts:\n- host: promotions.example.org\npaths:\n- path: /?(.*)\npathType: ImplementationSpecific\ntls:\n- secretName: promotions-tls\nhosts:\n- promotions.example.org\n
You will need the externally reachable URL of this service later on in this guide.
"},{"location":"pipelines/promoting-applications/#setup-notifications-from-leaf-clusters","title":"Setup notifications from leaf clusters","text":"Once the webhook service is exposed over HTTP/S, you need to create alert/provider resources to send notifications to it from leaf clusters. These notifications represent successful promotions for applications running on the leaf clusters.
Successful promotion events are triggered by Flux's notification controller. You create a Provider pointing to the promotion webhook exposed earlier and an Alert targeting the app's HelmRelease:
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Provider\nmetadata:\nname: promotion-my-app\nspec:\naddress: \"https://promotions.example.org/promotion/pipeline-01/my-app/dev\"\ntype: generic-hmac\nsecretRef:\nname: hmac-secret\n
In the example above, the generic-hmac
Provider is used to ensure notifications originate from authenticated sources. The referenced Secret, should include a token
field which holds the HMAC key. The same HMAC key must be specified in the Secret referenced by the .spec.promotion.strategy.secretRef.name
field, so that the pipeline controller can verify any incoming notifications. For more information on the generic-hmac
Provider, please refer to the notification controller docs.
Note that by default, the promotion webhook endpoint is exposed at /promotion
as shown in the example above. However you may use rewrite rules in your ingress configuration to omit it, if desired. For example, if using NGINX ingress controller, you may use the following annotation:
annotations:\nnginx.ingress.kubernetes.io/rewrite-target: /promotion/$1\n
The Provider address can then be set as https://promotions.example.org/pipeline-01/my-app/dev
. Tip
You may also use the generic webhook provider type that supports HMAC verification to ensure incoming notifications originate from authenticated sources.
The address
field's URL path is comprised of 3 components again:
Weave GitOps Enterprise can then parse the incoming URL path to identify the pipeline resource and look up the next environment for the defined promotion action.
An example Alert might look like this:
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Alert\nspec:\neventSeverity: info\neventSources:\n- kind: HelmRelease\nname: my-app\nexclusionList:\n- .*upgrade.*has.*started\n- .*is.*not.*ready\n- ^Dependencies.*\nproviderRef:\nname: promotion-my-app\n
Tip
Be sure to create the Provider/Alert tuple on each of the leaf clusters targeted by a pipeline.
Now as soon as the HelmRelease
on the first environment defined in the pipeline is bumped (e.g. by Flux discovering a new version in the Helm repository), an event is sent to the promotion webhook which will determine the next action based on the pipeline definition and chosen strategy. The rest of this guide describes how to setup up any of the available strategies depending on your requirements.
Danger
Creating pull requests requires a personal access token with write access to your git repo. If the secret containing the token is compromised (and you could assume it as a likely scenario), it could in principle allow someone to delete your production applications.
Please make sure you understand the Security section below before taking the steps to enable automated pull requests.
This section covers adding a promotion by pull request (PR) strategy, so that whenever the application defined in a pipeline is upgraded in one of the pipeline's environments, a PR is created that updates the manifest file setting the application version in the next environment.
The dynamic nature of GitOps deployments requires you to assist Weave GitOps a little with information on which repository hosts the manifest files, how to authenticate with the repository and the Git provider API, and which file hosts the version definition for each environment.
"},{"location":"pipelines/promoting-applications/#security","title":"Security","text":""},{"location":"pipelines/promoting-applications/#environments-and-repositories","title":"Environments and Repositories","text":"Only allow creation of RBAC resources from paths where compliance controls are in place. For example, do not allow regular users to create or update RBAC resources; or, if users must create RBAC resources, restrict them by namespace.
Follow the principle of \"Least Privilege\" RBAC as explained in Kubernetes RBAC Good Practices, with emphasis on the following:
Assign permissions at the namespace level where possible. Use RoleBindings as opposed to ClusterRoleBindings to give users rights only within a specific namespace.
Avoid providing wildcard permissions when possible, especially to all resources. As Kubernetes is an extensible system, providing wildcard access gives rights not just to all object types that currently exist in the cluster, but also to all object types which are created in the future.
list
and watch
on secrets as:It is also important to note that list and watch access also effectively allow users to read Secret contents.
"},{"location":"pipelines/promoting-applications/#policy","title":"Policy","text":"By following the guidelines above, you can have a safe initial configuration. However, given there are no deny semantics in RBAC, you need to guard future changes.
An RBAC Role or ClusterRole contains rules that represent a set of permissions. Permissions are purely additive (there are no \"deny\" rules).
You should ensure that attempts to break this contract are blocked and detected. You could achieve it by using Weave GitOps' Policy capabilities. The Policy Agent acts in two complementary modes: - Admission Controller protects from any attempt to create non-compliant RBAC resources that would end granting access to the secret. - Audit helps you identify already existing resources that are out of compliance. For example, roles created before policy agent was introduced as admission controller.
Once you have enabled Policy, the Policy Library gives you a set of good practices policies that will help you keep pipeline secrets secure according to the previous RBAC recommendations. Deploy them as Kustomization based on the following example:
Tip
In case you don't have access to the Policy Library, work with your Weaveworks Technical Account Manager (TAM) or Weaveworks Customer Reliability Engineer (CRE) to help with this step.
apiVersion: source.toolkit.fluxcd.io/v1\nkind: GitRepository\nmetadata:\nname: policy-library\nnamespace: flux-system\nspec:\ninterval: 10m0s\nurl: https://github.com/weaveworks/policy-library.git\nsecretRef:\nname: policy-library-github-credentials\n---\napiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\nname: rbac-secrets-good-practices\nnamespace: flux-system\nspec:\ninterval: 1m0s\nsourceRef:\nkind: GitRepository\nname: policy-library\npath: ./goodpractices/kubernetes/rbac/secrets\nprune: true\n
Warning Policies typically allow exclusions, to accommodate privileged workloads like Flux. You can manage them via PolicyConfig. For example, in order to allow Flux you could use the following PolicyConfig
:
apiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: allow-flux\nspec:\nmatch:\napps:\n- kind: Kustomization\nname: flux-system\nnamespace: flux-system\nconfig:\nweave.templates.rbac-prohibit-wildcards-policyrule-resources:\nparameters:\nexclude_label_key: \"app.kubernetes.io/part-of\"\nexclude_label_value: \"flux\"\nweave.templates.rbac-prohibit-wildcards-policyrule-verbs:\nparameters:\nexclude_label_key: \"app.kubernetes.io/part-of\"\nexclude_label_value: \"flux\"\nweave.policies.rbac-prohibit-list-secrets:\nparameters:\nexclude_label_key: \"app.kubernetes.io/part-of\"\nexclude_label_value: \"flux\"\nweave.policies.rbac-prohibit-watch-secrets:\nparameters:\nexclude_label_key: \"app.kubernetes.io/part-of\"\nexclude_label_value: \"flux\"\nweave.policies.rbac-prohibit-wildcard-secrets:\nparameters:\nexclude_label_key: \"app.kubernetes.io/part-of\"\nexclude_label_value: \"flux\"\n
Remind not allowing users to create RBAC resources without compliance checks. Otherwise, they could create RBAC resources that could escape this runtime control.
In addition to guarding against privilege escalation via RBAC, you should guard against privilege escalation through workloads:
Permission to create workloads (either Pods, or workload resources that manage Pods) in a namespace implicitly grants access to many other resources in that namespace, such as Secrets, ConfigMaps, and PersistentVolumes that can be mounted in Pods
You could do that by creating pipeline namespaces to hold the Pipeline and its Secret, without permission to run workloads. You could enforce the latter one by using the Policy Containers Should Not Run In Namespace from the Policy Library and PolicyConfig as follows:
Tip
Update updates when onboarding a new pipeline. Consider using Weave Gitops self-service capabilities GitOps Templates or GitOpsSets to help you with the task.
apiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: reject-workloads-pipeline-namespace\nspec:\nmatch:\nnamespaces:\n- podinfo\nconfig:\nweave.policies.containers-should-not-run-in-namespace:\nparameters:\ncustom_namespace: \"podinfo\"\n
"},{"location":"pipelines/promoting-applications/#service-account","title":"Service Account","text":"To enable the Pipeline Controller to read the secret, we need to grant access via RBAC. The promotion credentials secret needs to be in the same namespace as the Pipeline
resource on the management cluster. You should create a RoleBinding
for the Pipeline Controller ServiceAccount
in the pipeline namespace. For a pipeline in namespace podinfo
, it would look like the following:
---\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\nname: read-app-promotion-credentials\nnamespace: podinfo # change for the pipeline namespace\nrules:\n- apiGroups:\n- \"\"\nresourceNames:\n- \"app-promotion-credentials\" # change for the secret name holding the pull requests secret\nresources:\n- \"secrets\"\nverbs:\n- \"get\"\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: pipeline-controller-read-app-promotion-credentials\nnamespace: podinfo\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: Role\nname: read-app-promotion-credentials\nsubjects:\n- kind: ServiceAccount\nname: chart-pipeline-controller # change in case pipeline controller service account has a different name in your context\nnamespace: flux-system\n
"},{"location":"pipelines/promoting-applications/#verify-security-context","title":"Verify Security Context","text":"Use pipeline promotions security to verify that your environments meets the security context described earlier.
Once deployed you could see how the different resources are being rejected. See those rejections in the Violations UI:
In addition, verify that the Pipeline controller can only get the secret by the following tests:
List access is denied:
$ kubectl get secret -n podinfo --as=system:serviceaccount:flux-system:chart-pipeline-controller\n\nError from server (Forbidden): secrets is forbidden: User \"system:serviceaccount:flux-system:chart-pipeline-controller\" cannot list resource \"secrets\" in API group \"\" in the namespace \"podinfo\"\n
Get access is allowed:
$ kubectl get secret -n podinfo --as=system:serviceaccount:flux-system:chart-pipeline-controller app-promotion-credentials\n\nNAME TYPE DATA AGE\napp-promotion-credentials Opaque 1 21m\n
"},{"location":"pipelines/promoting-applications/#tokens","title":"Tokens","text":"For example, if the case of GitHub, use fine-grained tokens to only allow access to the single repo that your configuration manifests exist.
For example, using github and fine-grained tokens you could do so.
The discovery of the version field is done using deterministic markers in a YAML manifest file. An example HelmRelease
manifest with such a marker looks like this:
---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nspec:\nchart:\nspec:\nversion: 0.13.7 # {\"$promotion\": \"pipeline-01:my-app:prod\"}\n
The value of the $promotion
field in the comment is comprised of 3 components separated by colons:
pipeline-01
.name
field of one of the environments defined in the pipeline's .spec.environments
array.Weave GitOps Enterprise will look for this marker whenever it receives an event from the respective HelmRelease of one of the leaf clusters and patch the file with the version denoted in the event (see the section above for instructions on setting up notification events from leaf clusters). Finally, it will create a Git provider PR to update the version of the application for the next environment in the pipeline.
"},{"location":"pipelines/promoting-applications/#supported-git-providers","title":"Supported Git Providers","text":"The following Git providers are currently support by this promotion strategy:
Select your Git provider via .spec.promotion.strategy.pull-request.type
. For example, for gitlab
it would look similar to:
promotion:\nstrategy:\npull-request:\ntype: gitlab\nurl: \"https://gitlab.com/weaveworks/<my-awesome-project.git>\"\nbaseBranch: main\nsecretRef:\nname: gitlab-promotion-credentials\n
More info in the spec.
"},{"location":"pipelines/promoting-applications/#credentials-secret","title":"Credentials Secret","text":"In the journey of creating a pull request, there are different secrets involved:
Create a Kubernetes secret with the previous data.
Expand to see example# example to use git over https with basic auth and pat\n$ kubectl create secret generic promotion-credentials \\\n--namespace=pipeline-01 \\\n--from-literal=\"username=<bot account name>\" \\\n--from-literal=\"password=<token value>\" \\\n--from-literal=\"token=<token value>\" \\\n--from-literal=\"hmac-key=<hmac-key value>\"\n
---\napiVersion: v1\nkind: Secret\nmetadata:\nname: promotion-credentials\nnamespace: pipeline-01\ndata:\nusername: ZXhhbXBsZQ==\npassword: ZXhhbXBsZS1wYXNzd29yZA==\ntoken: Z2hwX01IL3RsTFpXTXZMY0FxVWRYY1ZGL0lGbzh0WDdHNjdsZmRxWQ==\nhmac-key: OEIzMTNBNjQ0REU0OEVGODgxMTJCQ0VFNTQ3NkE=\ntype: Opaque\n
Tip
token
field needs to be given permission to create pull requests in the pipeline's repository (defined in .spec.promotion.strategy.pull-request.url
).hmac-key
field must match the key used for the Provider resource (.spec.secretRef), if specified in the leaf clusters.The field .spec.promotion.strategy.pull-request
defines details about the Git repository used for promoting the given app. Set the secretRef.name
field to the name of the Secret created in the previous step and the url
and branch
fields to the Git repository's HTTPS URL and optionally a specific branch (if the branch is not set, it defaults to main
). If using the generic-hmac
Provider from leaf clusters, also set the .spec.promotion.strategy.secretRef.name
to the name of the Secret created previously.
More info in the spec
"},{"location":"pipelines/promoting-applications/#notification","title":"Notification","text":"This section explains how to configure pipelines to work with external CI systems that are responsible for application promotions.
This strategy uses the notification controller running on the management cluster, to forward any notifications received by the promotion webhook, from leaf clusters to external CI systems. This requires to patch the Flux manifests of the management cluster, in order to allow objects of type Pipeline
to be used as event sources. An example of a patch applied to enable this is shown below:
---\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- gotk-components.yaml\n- gotk-sync.yaml\npatches:\n- patch: |\n- op: add\npath: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/-\nvalue: Pipeline\ntarget:\nkind: CustomResourceDefinition\nname: alerts.notification.toolkit.fluxcd.io\n
You can now create Provider/Alert resources on the management cluster to forward notifications to external systems. For example, the Provider resource shown below is used to invoke a GitHub Actions workflow on a repository:
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Provider\nmetadata:\nname: promotion-my-app-via-github-actions\nspec:\ntype: githubdispatch\naddress: https://github.com/my-org/my-app-repo\nsecretRef:\nname: github-credentials\n
To use this Provider, add an Alert that uses the pipeline resource defined on the management cluster as an event source. An example of such an Alert is shown below:
---\napiVersion: notification.toolkit.fluxcd.io/v1beta1\nkind: Alert\nmetadata:\nname: promotion-my-app-via-github-actions\nspec:\neventSeverity: info\neventSources:\n- kind: Pipeline\nname: my-app\nnamespace: my-app-ns\nproviderRef:\nname: promotion-my-app-via-github-actions\n
The notification controller running on the management cluster is now configured to forward any promotion notifications received from leaf clusters. To actually use this strategy from a pipeline, set the promotion field as shown below:
---\napiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: my-app\nnamespace: my-app-ns\nspec:\npromotion:\nnotification: {}\n
Promotion notifications from leaf clusters should now be forwarded via the notification controller running on the management cluster and should include information about the version of the application being promoted.
"},{"location":"pipelines/promoting-applications/#manual-promotion","title":"Manual promotion","text":"The supported strategies mentioned above, do not require any user interaction when handling promotions. However, there is often a need for a human operator to manually approve a promotion to the next environment. To achieve that, set the spec.promotion.manual
key to true
.
apiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: my-app\nnamespace: my-app-ns\nspec:\npromotion:\nmanual: true\nstrategy:\npull-request:\ntype: github\nurl: https://github.com/my-org/my-app-repo\nbaseBranch: main\nsecretRef:\nname: promotion-credentials\n
When this key is set and a promotion is detected, Weave GitOps will prompt the user to manually promote the application to the next environment, via the use of a button shown under the next environment.
Manual promotion of an application"},{"location":"pipelines/promoting-applications/#configuration","title":"Configuration","text":""},{"location":"pipelines/promoting-applications/#retry-logic","title":"Retry Logic","text":"
By default if a promotion fails, an exponential back-off retry happens and returns with an error only after three retries.
Through Helm values, the retry logic is configurable.
# values.yaml\npromotion:\nretry:\n# Initial delay between retries.\ndelay: 2\n# Maximum delay between retries.\nmaxDelay: 20\n# Number of attempts.\nthreshold: 3\n
The promotion happens through an HTTP endpoint call, that endpoint may has connection timeout limits, that's why the maxDelay
option is there. If the calculated delay would exceed this value, it will use that as delay. For example if the delay values would be [2, 4, 8, 16, 32, 64]
, but maxDelay
is set to 15, the list will be [2, 4, 8, 15, 15, 15]
. With this option, the promotion will be retried on failure, but the sum of delay values will be only 59 seconds instead of 126 seconds.
The promotion endpoint can be exposed to the internet (for example github actions), to mitigate DoS attacks, the endpoint has rate limits. By default it's 20 requests per 30 seconds.
Rate limiting can be configured through Helm values:
# values.yaml\npromotion:\nrateLimit:\n# Number of requests allowed in set interval.\nvalue: 20\ninterval: 30\n
"},{"location":"pipelines/spec/v1alpha1/pipeline/","title":"Pipeline","text":"import TierLabel from \"../../../_components/TierLabel\";
"},{"location":"pipelines/spec/v1alpha1/pipeline/#pipeline-enterprise","title":"Pipeline ENTERPRISE","text":"The Pipeline API defines a resource for continuous delivery pipelines.
An example of a fully defined pipeline that creates pull requests for application promotions is shown below.
apiVersion: pipelines.weave.works/v1alpha1\nkind: Pipeline\nmetadata:\nname: podinfo-02\nnamespace: flux-system\nspec:\nappRef:\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nname: podinfo\nenvironments:\n- name: dev\ntargets:\n- namespace: podinfo-02-dev\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- name: test\ntargets:\n- namespace: podinfo-02-qa\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- namespace: podinfo-02-perf\nclusterRef:\nkind: GitopsCluster\nname: dev\nnamespace: flux-system\n- name: prod\ntargets:\n- namespace: podinfo-02-prod\nclusterRef:\nkind: GitopsCluster\nname: prod\nnamespace: flux-system\npromotion:\nstrategy:\npull-request:\ntype: github\nurl: https://github.com/my-org/my-app-repo\nbaseBranch: main\nsecretRef:\nname: github-credentials\n
"},{"location":"pipelines/spec/v1alpha1/pipeline/#specification","title":"Specification","text":"The documentation for version v1alpha1
of a Pipeline
resource is found next.
// Pipeline is the Schema for the pipelines API\ntype Pipeline struct {\nmetav1.TypeMeta `json:\",inline\"`\nmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\nSpec PipelineSpec `json:\"spec,omitempty\"`\n// +kubebuilder:default={\"observedGeneration\":-1}\nStatus PipelineStatus `json:\"status,omitempty\"`\n}\n\ntype PipelineSpec struct {\n// Environments is a list of environments to which the pipeline's application is supposed to be deployed.\n// +required\nEnvironments []Environment `json:\"environments\"`\n// AppRef denotes the name and type of the application that's governed by the pipeline.\n// +required\nAppRef LocalAppReference `json:\"appRef\"`\n// Promotion defines details about how promotions are carried out between the environments\n// of this pipeline.\n// +optional\nPromotion *Promotion `json:\"promotion,omitempty\"`\n}\n\ntype Environment struct {\n// Name defines the name of this environment. This is commonly something such as \"dev\" or \"prod\".\n// +required\nName string `json:\"name\"`\n// Targets is a list of targets that are part of this environment. Each environment should have\n// at least one target.\n// +required\nTargets []Target `json:\"targets\"`\n// Promotion defines details about how the promotion is done on this environment.\n// +optional\nPromotion *Promotion `json:\"promotion,omitempty\"`\n}\n\ntype Target struct {\n// Namespace denotes the namespace of this target on the referenced cluster. This is where\n// the app pointed to by the environment's `appRef` is searched.\n// +required\nNamespace string `json:\"namespace\"`\n// ClusterRef points to the cluster that's targeted by this target. If this field is not set, then the target is assumed\n// to point to a Namespace on the cluster that the Pipeline resources resides on (i.e. a local target).\n// +optional\nClusterRef *CrossNamespaceClusterReference `json:\"clusterRef,omitempty\"`\n}\n\n// Promotion define promotion configuration for the pipeline.\ntype Promotion struct {\n// Manual option to allow promotion between to require manual approval before proceeding.\n// +optional\nManual bool `json:\"manual,omitempty\"`\n// Strategy defines which strategy the promotion should use.\nStrategy Strategy `json:\"strategy\"`\n}\n\n// Strategy defines all the available promotion strategies. All of the fields in here are mutually exclusive, i.e. you can only select one\n// promotion strategy per Pipeline. Failure to do so will result in undefined behaviour.\ntype Strategy struct {\n// PullRequest defines a promotion through a Pull Request.\n// +optional\nPullRequest *PullRequestPromotion `json:\"pull-request,omitempty\"`\n// Notification defines a promotion where an event is emitted through Flux's notification-controller each time an app is to be promoted.\n// +optional\nNotification *NotificationPromotion `json:\"notification,omitempty\"`\n// SecrefRef reference the secret that contains a 'hmac-key' field with HMAC key used to authenticate webhook calls.\n// +optional\nSecretRef *meta.LocalObjectReference `json:\"secretRef,omitempty\"`\n}\ntype GitProviderType string\n\nconst (\nGithub GitProviderType = \"github\"\nGitlab GitProviderType = \"gitlab\"\nBitBucketServer GitProviderType = \"bitbucket-server\"\n)\n\ntype PullRequestPromotion struct {\n// Indicates the git provider type to manage pull requests.\n// +required\n// +kubebuilder:validation:Enum=github;gitlab;bitbucket-server\nType GitProviderType `json:\"type\"`\n// The git repository HTTPS URL used to patch the manifests for promotion.\n// +required\nURL string `json:\"url\"`\n// The branch to checkout after cloning. Note: This is just the base\n// branch that will eventually receive the PR changes upon merge and does\n// not denote the branch used to create a PR from. The latter is generated\n// automatically and cannot be provided.\n// +required\nBaseBranch string `json:\"baseBranch\"`\n// SecretRef specifies the Secret containing authentication credentials for\n// the git repository and for the Git provider API.\n// For HTTPS repositories the Secret must contain 'username' and 'password'\n// fields.\n// For Git Provider API to manage pull requests, it must contain a 'token' field.\n// +required\nSecretRef meta.LocalObjectReference `json:\"secretRef\"`\n}\n\ntype NotificationPromotion struct{}\n
"},{"location":"pipelines/spec/v1alpha1/pipeline/#references","title":"References","text":"// LocalAppReference is used together with a Target to find a single instance of an application on a certain cluster.\ntype LocalAppReference struct {\n// API version of the referent.\n// +required\nAPIVersion string `json:\"apiVersion\"`\n\n// Kind of the referent.\n// +required\nKind string `json:\"kind\"`\n\n// Name of the referent.\n// +required\nName string `json:\"name\"`\n}\n\n// CrossNamespaceClusterReference contains enough information to let you locate the\n// typed Kubernetes resource object at cluster level.\ntype CrossNamespaceClusterReference struct {\n// API version of the referent.\n// +optional\nAPIVersion string `json:\"apiVersion,omitempty\"`\n\n// Kind of the referent.\n// +required\nKind string `json:\"kind\"`\n\n// Name of the referent.\n// +required\nName string `json:\"name\"`\n\n// Namespace of the referent, defaults to the namespace of the Kubernetes resource object that contains the reference.\n// +optional\nNamespace string `json:\"namespace,omitempty\"`\n}\n
"},{"location":"pipelines/spec/v1alpha1/pipeline/#status","title":"Status","text":"type PipelineStatus struct {\n// ObservedGeneration is the last observed generation.\n// +optional\nObservedGeneration int64 `json:\"observedGeneration,omitempty\"`\n\n// Conditions holds the conditions for the Pipeline.\n// +optional\nConditions []metav1.Condition `json:\"conditions,omitempty\"`\n}\n
"},{"location":"pipelines/spec/v1alpha1/pipeline/#condition-reasons","title":"Condition Reasons","text":"// Reasons are provided as utility, and are not part of the declarative API.\nconst (\n// TargetClusterNotFoundReason signals a failure to locate a cluster resource on the management cluster.\nTargetClusterNotFoundReason string = \"TargetClusterNotFound\"\n// TargetClusterNotReadyReason signals that a cluster pointed to by a Pipeline is not ready.\nTargetClusterNotReadyReason string = \"TargetClusterNotReady\"\n// ReconciliationSucceededReason signals that a Pipeline has been successfully reconciled.\nReconciliationSucceededReason string = \"ReconciliationSucceeded\"\n)\n
"},{"location":"policy/","title":"Introduction ENTERPRISE","text":""},{"location":"policy/#policy","title":"Policy","text":"Weave Policy Engine helps users have continuous security and compliance checks across their software delivery pipeline. The engine utilizes policy-as-code to guarantee security, resilience, and coding standards across applications and infrastructure. The engine comes with 100+ policies covering numerous security and compliance benchmarks like SOC2, GDPR, PCI-DSS, HIPAA, Mitre Attack and more.
The policy engine provides the following functionality:
"},{"location":"policy/#admission-controller","title":"Admission Controller","text":"An out-of-the-box admission controller that monitors any changes happening to the clusters' deployments and resources, and prevents violating changes at deployment time from being deployed to clusters.
"},{"location":"policy/#audit","title":"Audit","text":"Daily scans of your clusters' deployments and resources, then report back any policy violations. The audit results can be published to different data analytics tools to provide compliance posture analysis of your clusters runtime.
"},{"location":"policy/#commitbuild-time-checks","title":"Commit/Build Time Checks","text":"Early feedback on policy violations at the commit or build time, by reporting policy violations right inside git or other CI tools. This helps developers and operators detect policy violations and fix them before they deploy their changes to the clusters.
"},{"location":"policy/authorization/","title":"Authorization ENTERPRISE","text":"This section provides a recommended way to configure RBAC in the context of policies. It is oriented to the journey that you expect your users to have.
"},{"location":"policy/authorization/#view-resources","title":"View Resources","text":"The policy journey in the UI involves several resources. We have the Policies that are used by the agent, the resulting Violations when the agent enforces those policies, and the PolicyConfigs that the user can configure to override policy parameters. The violations are essentially kubernetes events that contain the Validation object.
In order to view those resources, users would need to have read access to the policies
, policysconfigs
, and events
resource.
An example of a configuration to achieve this purpose could be seen below with policies-reader
role and developer-policies-reader
cluster role binding, to allow a group developer
to access all the policy-related resources.
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: policies-reader\nrules:\n- apiGroups: [\"pac.weave.works\"]\nresources: [\"policies\", \"policyconfigs\"]\nverbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\nresources: [\"events\"]\nverbs: [\"get\", \"watch\", \"list\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: developer-policies-reader\nsubjects:\n- kind: Group\nname: developer\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: policies-reader\napiGroup: rbac.authorization.k8s.io\n
"},{"location":"policy/commit-time-checks/","title":"Commit/Build Time Checks ENTERPRISE","text":""},{"location":"policy/commit-time-checks/#overview","title":"Overview","text":"Weave GitOps Enterprise enables developers and operators to check policy violations early in their software development life cycle, specifically at commit and build time. Developers and operators can have Weave Policy Validator integrated in their CI tools to validate whether their code changes are violating any policies or not.
Weave GitOps Enterprise offer a policy engine image that can be used to perform commit/build time checks.The image can be found on Docker Hub under the name: weaveworks/weave-iac-validator:v1.1
.
USAGE:\n app [global options] command [command options] [arguments...]\n\nVERSION:\n 0.0.1\n\nCOMMANDS:\n help, h Shows a list of commands or help for one command\n\nGLOBAL OPTIONS:\n --path value path to scan resources from\n --helm-values-file value path to resources helm values file\n --policies-path value path to policies kustomization directory\n --policies-helm-values-file value path to policies helm values file\n --git-repo-provider value git repository provider\n --git-repo-host value git repository host\n --git-repo-url value git repository url\n --git-repo-branch value git repository branch\n --git-repo-sha value git repository commit sha\n --git-repo-token value git repository toke\n --azure-project value azure project name\n --sast value save result as gitlab sast format\n --sarif value save result as sarif format\n --json value save result as json format\n --generate-git-report generate git report if supported (default: false)\n--remediate auto remediate resources if possible (default: false)\n--no-exit-error exit with no error (default: false)\n--help, -h show help (default: false)\n--version, -v print the version (default: false)\n
"},{"location":"policy/commit-time-checks/#setup-policies","title":"Setup policies","text":"Policies can be a helm chart, kustomize directory or just plain kubernetes yaml files.
Example of policies kustomize directory
\u2514\u2500\u2500 policies\n \u251c\u2500\u2500 kustomization.yaml\n \u251c\u2500\u2500 minimum-replica-count.yaml\n \u251c\u2500\u2500 privileged-mode.yaml\n \u2514\u2500\u2500 privilege-escalation.yaml\n
# kustomization.yaml\nkind: Kustomization\napiVersion: kustomize.config.k8s.io/v1beta1\nresources:\n- minimum-replica-count.yaml\n- privilege-escalation.yaml\n- privileged-mode.yaml\n
"},{"location":"policy/commit-time-checks/#supported-cicd","title":"Supported CI/CD","text":"Weave validator supports auto-remediation functionality which creates a pull request with suggested fixes to remediate the reported violations.
Supported in:
To enable it you need to provide --remediate
flag and --git-repo-token
.
The token must have the permission to create a pull request.
"},{"location":"policy/commit-time-checks/#usecase-github","title":"UseCase: Github","text":"See how to setup the Github Action
"},{"location":"policy/commit-time-checks/#usecase-gitlab","title":"UseCase: Gitlab","text":" weave:\nimage:\nname: weaveworks/weave-iac-validator:v1.1\nscript:\n- weave-validator --path <path to resources> --policies-path <path to policies>\n
"},{"location":"policy/commit-time-checks/#enable-auto-remediation","title":"Enable Auto Remediation","text":" script:\n- weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $GITLAB_TOKEN --remediate\n
"},{"location":"policy/commit-time-checks/#enable-static-application-security-testing","title":"Enable Static Application Security Testing","text":" stages:\n- weave\n- sast\n\nweave:\nstage: weave\nimage:\nname: weaveworks/weave-iac-validator:v1.1\nscript:\n- weave-validator <path to resources> --policies-path <path to policies> --sast sast.json\nartifacts:\nwhen: on_failure\npaths:\n- sast.json\n\nupload_sast:\nstage: sast\nwhen: always\nscript:\n- echo \"creating sast report\"\nartifacts:\nreports:\nsast: sast.json\n
"},{"location":"policy/commit-time-checks/#usecase-bitbucket","title":"UseCase: Bitbucket","text":"pipelines:\ndefault:\n- step:\nname: 'Weaveworks'\nimage: weaveworks/weave-iac-validator:v1.1\nscript:\n- weave-validator --path <path to resources> --policies-path <path to policies>\n
"},{"location":"policy/commit-time-checks/#enable-auto-remediation_1","title":"Enable Auto Remediation","text":" script:\n- weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $TOKEN --remediate\n
"},{"location":"policy/commit-time-checks/#create-pipeline-report","title":"Create Pipeline Report","text":" script:\n- weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $TOKEN -generate-git-report\n
"},{"location":"policy/commit-time-checks/#usecase-circleci","title":"UseCase: CircleCI","text":"jobs:\nweave:\ndocker:\n- image: weaveworks/weave-iac-validator:v1.1\nsteps:\n- checkout\n- run:\ncommand: weave-validator --path <path to resources> --policies-path <path to policies>\n
"},{"location":"policy/commit-time-checks/#enable-auto-remediation_2","title":"Enable Auto Remediation","text":" - run:\ncommand: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token ${GITHUB_TOKEN} --remediate\n
"},{"location":"policy/commit-time-checks/#usecase-azure-devops","title":"UseCase: Azure DevOps","text":"trigger:\n- <list of branches to trigger the pipeline on>\n\npool:\nvmImage: ubuntu-latest\n\ncontainer:\nimage: weaveworks/weave-iac-validator:v1.1-azure\n\nsteps:\n- script: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $(TOKEN)\n
"},{"location":"policy/commit-time-checks/#enable-auto-remediation_3","title":"Enable Auto Remediation","text":"steps:\n- script: weave-validator --path <path to resources> --policies-path <path to policies> --git-repo-token $(TOKEN) --remediate\n
"},{"location":"policy/getting-started/","title":"Getting Started ENTERPRISE","text":"Enabling the Weave Policy Engine features in Weave GitOps is done by running the policy agent on the cluster. This section gives an overview of the policy ecosystem and the steps required for installing and running the policy agent on leaf clusters.
"},{"location":"policy/getting-started/#the-policy-ecosystem","title":"The Policy Ecosystem","text":"The policy ecosystem consists of several moving parts. The two primary components are the Policy Agent and the Policy CRs. The agent runs in several modes, and uses the Policy CRs to perform validations on different resources. The results of those validations can be written to different sinks.
There are two other optional components: the PolicySet, and the PolicyConfig. The PolicySet can be used to filter policies for a specific mode, while the PolicyConfig can be used to override policy parameters during the validation of a certain resource.
"},{"location":"policy/getting-started/#installation-pre-requisites","title":"Installation Pre-requisites","text":""},{"location":"policy/getting-started/#weave-gitops","title":"Weave GitOps","text":"You need to have a running instance of Weave GitOps with at least one CAPI provider installed to provision Kubernetes clusters. See Weave GitOps Installation page for more details about installing Weave GitOps.
"},{"location":"policy/getting-started/#policy-library","title":"Policy Library","text":"For the policy agent to work, it will need a source for the policies that it will enforce in the cluster. Enterprise customers should request access to fork our policy library into their local repositories. Our policy library includes an extensive list of policy CRs that cover a multitude of security and compliance benchmarks.
"},{"location":"policy/getting-started/#install-the-policy-agent","title":"Install the Policy Agent","text":"To install the policy agent on a leaf cluster, you should select the weave-policy-agent
from the profiles dropdown in the Create Cluster
page.
You should then configure the values.yaml
to pull the policies from your repo into the cluster. This is done by configuring the policySource
section. If your policy library repo is private, you will also need to reference the Secret
that contains the repo credentials. This is usually the secret you created while bootstrapping Flux on the management cluster and is copied to your leaf cluster during creation.
policySource:\nenabled: true\nurl: ssh://git@github.com/weaveworks/policy-library # This should be the url of the forked repo\ntag: v1.0.0\npath: ./ # Could be a path to the policies dir or a kustomization.yaml file\nsecretRef: my-pat # the name of the secret containing the repo credentials\n
Expand to see an example that uses an existing git source policySource:\nenabled: true\nsourceRef: # Specify the name for an existing GitSource reference\nkind: GitRepository\nname: policy-library\nnamespace: flux-system\n
You can find more about other policy profile configurations here.
"},{"location":"policy/getting-started/#policies-in-ui","title":"Policies in UI","text":"After the leaf cluster is provisioned and the profile is installed, you should now see the policies listed in the Policies tab in Weave GitOps UI.
Now you have a provisioned cluster with these policies enforced by the policy agent.
By default, the policy profile is set up to enforce policies at deployment time using admission controller, which results in blocking any deployment that violates the enforced policies.
"},{"location":"policy/getting-started/#prevent-violating-changes","title":"Prevent Violating Changes","text":"Now let's try to deploy a Kubernetes deployment that violates the Container Image Pull Policy
which is one of the enforced policies. This policy is violated when the container's imagePullPolicy
is not set to Always
.
apiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: nginx-deployment\nlabels:\napp: nginx\nspec:\nreplicas: 3\nselector:\nmatchLabels:\napp: nginx\ntemplate:\nmetadata:\nlabels:\napp: nginx\nspec:\ncontainers:\n- name: nginx\nimage: nginx:1.14.2\nimagePullPolicy: IfNotPresent\nports:\n- containerPort: 80\n
Once you apply it, the policy agent will deny this request and show a violation message, and accordingly the deployment will not be created.
"},{"location":"policy/getting-started/#violations-logs-in-ui","title":"Violations Logs in UI","text":"You can go to the Violations Log
in Weave GitOps UI to view the policy violations of all the connected clusters, and dive into the details of each violation.
This view shows only the violations resulting from the admission mode by configuring the events sink.
Violations Log
Violations Log Details
"},{"location":"policy/policy-configuration/","title":"PolicyConfig ENTERPRISE","text":""},{"location":"policy/policy-configuration/#goal","title":"Goal","text":"Users sometimes need to enforce the same policy(s) with different configurations (parameters) for different targets (workspaces, namespaces, applications, or resources). The PolicyConfig
CRD allows us to do that without duplicating policies by overriding policy parameters of multiple policies for a specific target.
The PolicyConfig CRD consists of two sections 1) match
used to specify the target of this PolicyConfig and 2) config
used to specify the policy parameters that will override the orginal policy parameters.
apiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig # policy config resource kind\nmetadata:\nname: my-config # policy config name\nspec:\nmatch: # matches (targets of the policy config)\nworkspaces: # add one or more name workspaces\n- team-a\n- team-b\nconfig: # config for policies [one or more]\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 3\n
Each PolicyConfig CR can target either workspaces, namespaces, applications or resources. Targeting the same target explicitly in multiple PolicyConfigs is not allowed, ie: you can't use the same namespace in several PolicyConfigs which target namespaces.
To target workspaces:
match:\nworkspaces:\n- team-a\n- team-b\n
To target namespaces:
match:\nnamespaces:\n- dev\n- prod\n
To target applications:
match:\napps: # add one or more apps [HelmRelease, Kustomization]\n- kind: HelmRelease\nname: my-app # app name\nnamespace: flux-system # app namespace [if empty will match in any namespace]\n
To target resources:
match:\nresources: # add one or more resources [Deployment, ReplicaSet, ..]\n- kind: Deployment\nname: my-deployment # resource name\nnamespace: default # resource namespace [if empty will match in any namespace]\n
Each PolicyConfig can override the parameters of one or more policies:
config: # config for policies [one or more]\nweave.policies.containers-minimum-replica-count: # the id of the policy\nparameters:\nreplica_count: 3\nowner: owner-4\nweave.policies.containers-running-in-privileged-mode:\nparameters:\nprivilege: true\n
"},{"location":"policy/policy-configuration/#overlapping-targets","title":"Overlapping Targets","text":"While it's not possible to create PolicyConfigs that explicitly target the same targets, it can happen implicitly ex: by targeting a namespace in a PolicyConfig and targeting an application that exists in this namespace in another. Whenever targets overlap, the narrower the scope of the PolicyConfig, the more precedence it has. Accordingly in the previous example, the configuration of the PolicyConfig targeting the application will have precedence over the PolicyConfig targeting the namespace.
Those are the possible targets from lowest to highest precedence:
Note:
We have a Kustomization application app-a
and deployment deployment-1
which is part of this application.
apiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: my-config-1\nspec:\nmatch:\nnamespaces:\n- flux-system\nconfig:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 2\nowner: owner-1\n---\napiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: my-config-2\nspec:\nmatch:\napps:\n- kind: Kustomization\nname: app-a\nconfig:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 3\n---\napiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: my-config-3\nspec:\nmatch:\napps:\n- kind: Kustomization\nname: app-a\nnamespace: flux-system\nconfig:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 4\n---\napiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: my-config-4\nspec:\nmatch:\nresources:\n- kind: Deployment\nname: deployment-1\nconfig:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 5\nowner: owner-4\n---\n\napiVersion: pac.weave.works/v2beta2\nkind: PolicyConfig\nmetadata:\nname: my-config-5\nspec:\nmatch:\nresources:\n- kind: Deployment\nname: deployment-1\nnamespace: flux-system\nconfig:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 6\n
In the above example when you apply the 5 configurations...
app-a
will be affected by my-config-5
. It will be applied on the policies defined in it, which will affect deployment deployment-1
in namespace flux-system
as it matches the kind, name and namespace.Note
Deploying deployment-1
in another namespace other than flux-system
won't be affected by this configuration
Final config values will be as follows:
config:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 6 # from my-config-5\nowner: owner-4 # from my-config-4\n
deployment-1
in namespace flux-system
, replica_count
must be >= 6
my-config-4
for owner
configuration parameter owner: owner-4
In the above example when you apply my-config-1
, my-config-2
, my-config-3
and my-config-4
my-config-4
will be applied on the policies defined in it which will affect deployment deployment-1
in all namespaces as it matches the kind and name only.Final config values will be as follows:
config:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 5 # from my-config-4\nowner: owner-4 # from my-config-4\n
deployment-1
in all namespaces replica_count
must be >= 5
my-config-4
for owner
configuration parameter owner: owner-4
In the previous example when you apply my-config-1
, my-config-2
and my-config-3
my-config-3
will be applied on the policies defined in it which will affect application app-a
and all the resources in it in namespace flux-system
as it matches the kind, name and namespace.Note
Deploying app-a
in another namespace other than flux-system
won't be affected by this configuration
Final config values will be the follows:
config:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 4 # from my-config-3\nowner: owner-1 # from my-config-1\n
app-a
and all the resources in it in namespaces flux-system
, replica_count
must be >= 4
my-config-1
for owner
configuration parameter owner: owner-1
In the above example when you apply my-config-1
and my-config-2
my-config-2
will be applied on the policies defined in it which will affect application app-a
and all the resources in it in all namespaces as it matches the kind and name only.Final config values will be as follows:
config:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 3 # from my-config-2\nowner: owner-1 # from my-config-1\n
app-a
and all the resources in all namespaces, replica_count
must be >= 3
my-config-1
for owner
configuration parameter owner: owner-1
In the above example when you apply my-config-1
my-config-1
will be applied on the policies defined in it. which will affect the namespace flux-system
with all applications and resources in it as it matches by namespace only.Final config values will be as follows:
config:\nweave.policies.containers-minimum-replica-count:\nparameters:\nreplica_count: 2 # from my-config-1\nowner: owner-1 # from my-config-1\n
flux-system
, replica_count
must be >= 2
my-config-1
for owner
configuration parameter owner: owner-1
This is an optional custom resource that is used to select a group of policies to work in specific modes.
In each mode, the agent will list all the PolicySets of this mode and check which policies match any of those policysets, then validate the resources against them.
If there are no PolicySets found for a certain mode, all policies will be applied during this mode.
Note: Tenant Policies is always active in the Admission mode, event if it is not selected in the admission
policysets
Example
apiVersion: pac.weave.works/v2beta2\nkind: PolicySet\nmetadata:\nname: my-policy-set\nspec:\nmode: admission\nfilters:\nids:\n- weave.policies.containers-minimum-replica-count\ncategories:\n- security\nseverities:\n- high\n- medium\nstandards:\n- pci-dss\ntags:\n- tag-1\n
PolicySets can be created for any of the three modes supported by the agent: admission
, audit
, and tfAdmission
.
Policies can be grouped by their ids, categories, severities, standards and tags
The policy will be applied if any of the filters are matched.
"},{"location":"policy/policy-set/#migration-from-v2beta1-to-v2beta2","title":"Migration from v2beta1 to v2beta2","text":""},{"location":"policy/policy-set/#new-fields","title":"New fields","text":"spec.mode
is added. PolicySets should be updated to set the modePreviously the agent was configured with which policysets to use in each mode. Now we removed this argument from the agent's configuration and add the mode to the Policyset itself.
"},{"location":"policy/policy-set/#example-of-the-agent-configuration-in-versions-older-than-v200","title":"Example of the agent configuration in versions older than v2.0.0","text":"# config.yaml\nadmission:\nenabled: true\npolicySet: admission-policy-set\nsinks:\nfilesystemSink:\nfileName: admission.txt\n
"},{"location":"policy/policy-set/#example-of-current-policyset-with-mode-field","title":"Example of current PolicySet with mode field","text":"apiVersion: pac.weave.works/v2beta2\nkind: PolicySet\nmetadata:\nname: admission-policy-set\nspec:\nmode: admission\nfilters:\nids:\n- weave.policies.containers-minimum-replica-count\n
"},{"location":"policy/policy-set/#updated-fields","title":"Updated fields","text":"spec.name
became optional.spec.id
is deprecated.The Policy CRD is used to define policies which are then consumed and used by the agent to validate entities.
It uses OPA Rego Language to evaluate the entities.
"},{"location":"policy/policy/#policy-library","title":"Policy Library","text":"You should have a policy library repo set up which includes your policies resources as CRDs.
Info
Enterprise customers should have access to fork policy library repo into their local repositories.
"},{"location":"policy/policy/#tenant-policy","title":"Tenant Policy","text":"Tenant policies are special policies that are used by the Multi Tenancy feature in Weave GitOps Enterprise
Tenant policies have a special tag tenancy
.
Starting from version v2.2.0
, the policy agent will support mutating resources.
To enable mutating resources, policies must have field mutate
set to true
and the rego code should return the violating_key
and the recommended_value
in the violation response. The mutation webhook will use the violating_key
and recommended_value
to mutate the resource and return the new mutated resource.
Example
result = {\n \"issue_detected\": true,\n \"msg\": sprintf(\"Replica count must be greater than or equal to '%v'; found '%v'.\", [min_replica_count, replicas]),\n \"violating_key\": \"spec.replicas\",\n \"recommended_value\": min_replica_count\n}\n
"},{"location":"policy/policy/#policy-validation","title":"Policy Validation","text":"The policy validation object is the result of validating an entity against a policy. It contains all the necessary information to give the user a clear idea on what caused this violation or compliance.
id: string # identifier for the violation\naccount_id: string # organization identifier\ncluster_id: string # cluster identifier\npolicy: object # contains related policy data\nentity: object # contains related resource data\nstatus: string # Violation or Compliance\nmessage: string # message that summarizes the policy validation\ntype: string # the mode that produced this object. one of: Admission, Audit, TFAdmission\ntrigger: string # what triggered the validation, create request or initial audit,..\ncreated_at: string # time that the validation occurred in\n
"},{"location":"policy/releases/","title":"Profile Releases ENTERPRISE","text":""},{"location":"policy/releases/#v065","title":"v0.6.5","text":""},{"location":"policy/releases/#highlights","title":"Highlights","text":"Compatible with Policy Library versions:
Needs this migration steps to be compatible with the following versions:
Compatible with Policy Library versions:
Needs this migration steps to be compatible with the following versions:
While both v.0.4.0 and v1.0.0 are compatible with the agent. Only v1.1.0 includes the modification needed to make Controller Minimum Replica Count policy with with horizontalpodautoscalers
config.audit.interval
. It defaults to 24 hours.Weave policy profile provides policies to automate the enforcement of best practices and conventions. It ensures the compliance of workloads through the use of a policy agent that provides an admission controller webhook that stops violating resources from deploying to a cluster and runs a daily audit that reports violating resources already deployed.
The profile configuration contains two main sections policySource
to configure the source for deploying policies and policy-agent
to configure the policy agent.
policy-agent:\nfailurePolicy: Ignore\n\n# If you don't want to use cert-manager, set useCertManager to false and provide your own certs\nuseCertManager: true\ncertificate: \"\"\nkey: \"\"\ncaCertificate: \"\"\n\npersistence:\nenabled: false\n# claimStorage: 1Gi\n# sinkDir: /tmp\n# storageClassName: standard\n\nconfig:\naccountId: \"\"\nclusterId: \"\"\n\naudit:\n# Enable audit functionality\nenabled: false\n# sinks:\n# # Enable writing violations as K8s events\n# k8sEventsSink:\n# enabled: true\n\nadmission:\n# Enable admission functionality\nenabled: true\n# mutate: true # enable mutating violating resources\nsinks:\n# Enable writing violations as K8s events\nk8sEventsSink:\nenabled: true\n\n\npolicySource:\nenabled: false\n# url: ssh://git@github.com/weaveworks/policy-library\n# tag: v1.0.0\n# branch:\n# path: ./ # Could be a path to the policies dir or a kustomization.yaml file\n# secretRef: policy-library-auth # (Optional): Name of the K8s secret with private repo auth credentials\n# sourceRef: # Could specify a name for an existing GitSource reference instead of creating a new one\n# kind: GitRepository\n# name: policy-library\n# namespace: flux-system\n
"},{"location":"policy/weave-policy-profile/#policy-sources","title":"Policy Sources","text":"Policies are provided in the profile as Custom Resources. The agent reads from the policies deployed on the cluster and runs them during each admission request or when auditing a resource.
Policies are hosted in a policy library which is usually a git repository. They are fetched in the profile through the use of kustomize.toolkit.fluxcd.io.Kustomization
, that deploys the policies to the cluster.
By default all policies in the specified path would be deployed in order to specify which policies should be deployed in a library, a kustomize.config.k8s.io.Kustomization
file should be defined in the repository.
apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources: # specifies the path to each required policy\n- policies/ControllerContainerAllowingPrivilegeEscalation/policy.yaml\n- policies/ControllerContainerRunningAsRoot/policy.yaml\n- policies/ControllerReadOnlyFileSystem/policy.yaml\n
The profile then needs to be configured with the necessary config to be able to reach the repository that is acting as a policy library.
policySource:\nenabled: true\nurl: URL of the repo where your policies exist\ntag: tag name on the policies repo\npath: Path to the policies dir - or a kustomization.yaml that selects some policies - in the repo\nsecretRef (if the repo is private): Name of the K8s secret with private repo credentials (leave empty if the repo is public)\n
There is the option of referencing an existing policy library source instead of creating a new one.
policySource:\nenabled: true\nsourceRef:\nkind: Kind of the existing source\nname: Name of the policy library source\nnamespace: Namespace where the source exists\n
"},{"location":"policy/weave-policy-profile/#policy-agent-configuration","title":"Policy Agent Configuration","text":"The config
section is the single entry point for configuring the agent.
The agent needs the following parameters to be provided in the configuration yaml file:
accountId
: unique identifier that signifies the owner of that agentclusterId
: unique identifier for the cluster that the agent will run againstThe following optional parameters can also be provided:
logLevel
: app log level (default: \"info\")probesListen
: address for the probes server to run on (default: \":9000\")metricsAddress
: address the metric endpoint binds to (default: \":8080\")This contains the admission module that enforces policies. It uses the controller-runtime
Kubernetes package to register a callback that will be called when the agent receives an admission request. Once called, the agent will validate the received resource against the admission and tenant policies and k8s will use the result of this validation to either allow or reject the creation/update of said resource.
Works with policies of provider kubernetes
To enable admission control:
policy-agent:\nconfig:\nadmission:\nenabled: true\n
Enabling admission controller requires certificates for secure communication with the webhook client and the admission server. The best way to achieve this is by installing cert manager and then configuring the profile as follows:
policy-agent:\nuseCertManager: true\n
The cert manager can also be installed by installing the cert manager profile while creating the cluster.
There is the option of providing previously generated certificates although it is not recommended and it is up to the user to manage it:
policy-agent:\ncertificate: \"---\" # admission server certificate\nkey: \"---\" # admission server private key\ncaCertificate: \"---\" # CA bundle to validate the webhook server, used by the client\n
If the agent webhook could not be reached or the request failed to complete, the corresponding request would be refused. To change that behavior and accepts the request in cases of failure, this needs to be set:
policy-agent:\nfailurePolicy: Ignore\n
"},{"location":"policy/weave-policy-profile/#audit","title":"Audit","text":"The audit functionality provides a full scan of the cluster(s) and reports back policy violations. This usually is used for policy violations reporting, and compliance posture analysis against known benchmarks like PCI DSS, CIS, .etc.
Works with policies of provider kubernetes
To enable the audit functionality:
policy-agent:\nconfig:\naudit:\nenabled: true\ninterval: 24 # configuring the frequent of audit operations running in hours (default is 24 hours)\n
The audit will be performed when the agent starts and then again periodically at an interval of your choice in hours (default is 24 hours). The results of the audit will be published to the configured sink(s).
"},{"location":"policy/weave-policy-profile/#terraform-admission","title":"Terraform Admission","text":"This is a webhook used to validate terraform plans. It is mainly used by the TF-Controller to enforce policies on terraform plans
Works with policies of provider terraform
To enable the terraform admission control:
policy-agent:\nconfig:\ntfAdmission:\nenabled: true\n
"},{"location":"policy/weave-policy-profile/#policy-validation-sinks","title":"Policy Validation Sinks","text":"When validating a resource, a validation object is generated that contains information about the status of that validation and metadata about the resource and policy involved. These objects can be exported to be visible for users as a critical part of the audit flow, but can also be useful as logs for the admission scenario.
By default, the agent only writes policy validations that are violating a certain policy when performing an audit. To write compliance results as well, the following needs to be specified in the profile:
policy-agent:\nconfig:\naudit:\nwriteCompliance: true\n
The agent profile supports storing the validations in different sinks. Multiple sinks can be used at the same time:
Text fileKubernetes EventsNotification ControllerElasticsearchThe results will be dumped into a text file in the logs
directory, in the agent container as a json string. It is important to note that this file will not be persisted and will be deleted upon pod restart, so generally this approach is not recommended for a production environment.
To enable writing to a text file in audit scenario:
policy-agent:\nconfig:\naudit:\nsinks:\nfileSystemSink:\nfileName: \"file.json\"\n
To enable writing to a text file in admission scenario:
policy-agent:\nconfig:\nadmission:\nsinks:\nfileSystemSink:\nfileName: \"file.json\"\n
It is possible to make the file persistent using the following configuration. This assumes that there is a PersistentVolume already configured on the cluster.
policy-agent:\npersistence:\nenabled: false # specifies whether to use persistence or not\nclaimStorage: 1Gi # claim size\nstorageClassName: standard # k8s StorageClass name\n
The results will be written as Kubernetes events. This means that they are accessible through the kubernetes API and can be consumed by custom exporters.
To enable writing Kubernetes events in audit scenario:
policy-agent:\nconfig:\naudit:\nsinks:\nk8sEventsSink:\nenabled: true\n
To enable writing Kubernetes events in admission scenario:
policy-agent:\nconfig:\nadmission:\nsinks:\nk8sEventsSink:\nenabled: true\n
This requires the cluster to be managed using flux. It makes use of the flux notification controller to send events to multiple sources, depending on the controller configuration. The agent writes the events to the controller and it proceeds to publish it to the configured listeners.
To enable writing to flux notification controller in audit scenario:
policy-agent:\nconfig:\naudit:\nsinks:\nfluxNotificationSink:\naddress: \"\"\n
To enable writing to flux notification controller in admission scenario:
policy-agent:\nconfig:\nadmission:\nsinks:\nfluxNotificationSink:\naddress: \"\"\n
The results of validating entities against policies will be written to an Elasticsearch index.
To enable writing to elasticsearch in audit scenario:
policy-agent:\nconfig:\naudit:\nsinks:\nelasticSink:\naddress: \"\"\nusername: \"\"\npassword: \"\"\nindexName: \"\"\ninsertionMode: \"upsert\"\n
To enable writing to elasticsearch in admission scenario:
policy-agent:\nconfig:\nadmission:\nsinks:\nelasticSink:\naddress: \"\"\nusername: \"\"\npassword: \"\"\nindexName: \"\"\ninsertionMode: \"insert\"\n
We support the following insertion modes:
insert
: doesn't update or delete any old records. The index will contain a log for all validation objects and give an insight of all the historical data.upsert
: updates the old result of validating an entity against a policy that happened on the same day. So the index will only contain the latest validation results for a policy and entity combination per day.To help you understand the state of progressive delivery updates to your applications, Weave GitOps Enterprise uses Flagger\u2014part of the Flux family of open source projects. WGE's Delivery view shows all of your deployed Canary
objects and rollout progress.
By default, Flagger automatically promotes a new version of an application whenever it passes the defined checks of an analysis phase. However, you can also configure webhooks to enable manual approvals of rollout stages.
This guide shows you how to manually gate a progressive delivery promotion with Flagger by using the in-built load tester.
"},{"location":"progressive-delivery/flagger-manual-gating/#prerequisites","title":"Prerequisites","text":"Canary
object and target deploymentYou can configure Flagger to work with several types of hooks that will be called at given stages during a progressive delivery rollout. Some of these hooks allow you to manually gate whether a rollout proceeds at certain points: - Before scaling up a new deployment and canary analysis begins with confirm-rollout
. - Before increasing traffic weight with confirm-traffic-increase
. - Before promoting a new version after successful canary analysis with confirm-promotion
.
Any URL can serve as a webhook target. It will approve if a 200 OK
status code is returned, and halt if 403 Forbidden
.
The webhook will receive a JSON payload that can be unmarshaled as CanaryWebhookPayload
:
type CanaryWebhookPayload struct {\n// Name of the canary\nName string `json:\"name\"`\n\n// Namespace of the canary\nNamespace string `json:\"namespace\"`\n\n// Phase of the canary analysis\nPhase CanaryPhase `json:\"phase\"`\n\n// Metadata (key-value pairs) for this webhook\nMetadata map[string]string `json:\"metadata,omitempty\"`\n}\n
The Flagger documentation provides more information about webhooks.
"},{"location":"progressive-delivery/flagger-manual-gating/#use-flaggers-load-tester-to-manually-gate-a-promotion","title":"Use Flagger's Load Tester to Manually Gate a Promotion","text":"To enable manual approval of a promotion, configure the confirm-promotion
webhook. This will call a particular gate provided through Flagger's load tester, and is an easy way to experiment using Flagger's included components.
Tip
We strongly recommend that you DO NOT USE the load tester for manual gating in a production environment. It lacks auth, so anyone with cluster access could open and close it. It also lacks storage, so all gates would close upon a restart. Instead, configure these webhooks for appropriate integration with a tool of your choice, such Jira, Slack, Jenkins, etc.
"},{"location":"progressive-delivery/flagger-manual-gating/#configure-the-confirm-promotion-webhook","title":"Configure theconfirm-promotion
Webhook","text":"In your canary object, add the following in the analysis
section:
analysis:\nwebhooks:\n- name: \"ask for confirmation\"\ntype: confirm-promotion\nurl: http://flagger-loadtester.test/gate/check\n
This gate is closed by default.
"},{"location":"progressive-delivery/flagger-manual-gating/#deploy-a-new-version-of-your-application","title":"Deploy a New Version of Your Application","text":"Trigger a Canary rollout by updating your target deployment/daemonset\u2014for example, by bumping the container image tag. A full list of ways to trigger a rollout is available here.
Weave GitOps Enterprise (WGE)'s Applications > Delivery view enables you to watch the progression of a canary:
"},{"location":"progressive-delivery/flagger-manual-gating/#wait-for-the-canary-analysis-to-complete","title":"Wait for the Canary Analysis to Complete","text":"Once the canary analysis has successfully completed, Flagger will call the confirm-promotion
webhook and change status to WaitingPromotion
:
To open the gate and confirm that you approve promotion of the new version of your application, exec into the load tester container:
$ kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh\n\n# to open\n> curl -d '{\"name\": \"app\",\"namespace\":\"test\"}' http://localhost:8080/gate/open\n
Flagger will now promote the canary version to the primary and complete the progressive delivery rollout.
To manually close the gate again, issue this command:
> curl -d '{\"name\": \"app\",\"namespace\":\"test\"}' http://localhost:8080/gate/close\n
References:
Built upon the core tenets of continuous integration and continuous delivery (CI/CD), progressive delivery involves gradually rolling out features to small groups of select users to balance performance with speed. Developers and DevOps teams use fine-grained controls to minimize the risks of pushing new features to the production environment. If the newly released feature proves to be stable and performant, it can then be released to all users.
Flagger is a progressive delivery operator for Kubernetes and part of the Flux family of open source projects. It reduces the risk of introducing new software versions and automates production releases to improve your time to delivery. Flagger implements deployment strategies\u2014canary releases, A/B testing, Blue/Green mirroring\u2014using a service mesh (App Mesh, Istio, Linkerd, Kuma, Open Service Mesh) or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik, APISIX) for traffic routing. For release analysis, Flagger can query Prometheus, InfluxDB, Datadog, New Relic, CloudWatch, Stackdriver, or Graphite. For alerting it uses Slack, MS Teams, Discord, and Rocket. Using Flux allows us to manage our cluster applications in a declarative way through changes in a Git repository.
Weave GitOps Enterprise integrates with Flagger in order to provide a view on progressive delivery deployments. This includes the ability to view all the resources that Flagger manages during its operation. The default ClusterRole gitops-canaries-reader
includes the minimum permissions necessary for a user to be able to view canary object details, metric template object details and canary related events.
The WGE UI's Applications > Delivery view provides an \"at a glance\" view so that you can understand the status of your progressive delivery rollouts across a fleet of connected clusters. This removes the cognitive overhead of having to know which objects to query and where they are located. You can also drill down into each rollout to understand its status and configuration, and view near-to-realtime data on any summary or details page.
How to use WGE's progressive delivery offering: - if you don\u2019t have Flagger installed on any clusters, you'll receive an onboarding message about installing it - click on the delivery tab on the menu bar to retrieve a table view of canaries with key summary information regarding their location and state - click on a canary to see more detailed information about status, gates, and other elements - click on the events tab on the detail page to see the most recent Kubernetes events for that canary and learn more about deployment history - click on the yaml tab on the detail page to see the raw yaml of the canary - view objects from any cluster/namespace that you have the appropriate permissions for, and nothing else
Supported deployment strategies include:
Canary Release: the system gradually shifts traffic to a new version of an application and assesses performance\u2014either promoting the release or abandoning it, based on performance.
A/B Testing: uses HTTP headers or cookies to ensure users remain on the same version of an application during a canary analysis.
Blue/Green: Traffic is switched from the current application to a new version based on the success of testing.
Blue/Green with Traffic Mirroring: sends copies of incoming requests to the new version of an application. The user receives the response from the current application and the other is discarded. The new version is promoted only if metrics are healthy.
This guide uses Flux manifests to install Flagger and Linkerd, a CNCF project and service mesh for Kubernetes and beyond. We will walk you through a full end-to-end scenario where you will: - Install the Linkerd service mesh - Install Flagger - Deploy a sample application using a canary release strategy based on metrics provided through Linkerd's in-built Prometheus instance
"},{"location":"progressive-delivery/progressive-delivery-flagger-install/#prerequisites","title":"Prerequisites","text":"autoscaling/v2
or autoscaling/v2beta2
API to be installed on your cluster. You can use kubectl api-resources
to check which API versions are supported.To install Linkerd we'll use a Kustomization file. It will allow us to specify the order and default namespace for the installed resources, and to generate Secrets from certificate files via the use of a secretGenerator
.
To support mTLS connections between meshed pods, Linkerd requires a trust anchor certificate and an issuer certificate with its corresponding key. These certificates are automatically created via the linkerd install
command. However, when using a Helm chart to install Linkerd, you must provide these certificates deliberately. The step
CLI, listed above, allows us to generate these certificates.
To generate the trust anchor certificate, run:
step certificate create root.linkerd.cluster.local ca.crt ca.key \\\n--profile root-ca --no-password --insecure\n
To generate the issuer certificate, run:
step certificate create identity.linkerd.cluster.local issuer.crt issuer.key \\\n--profile intermediate-ca --not-after 8760h --no-password --insecure \\\n--ca ca.crt --ca-key ca.key\n
Add the ca.crt
, issuer.crt
, and issuer.key
files to the cluster repository under a linkerd
directory.
Let's add the three manifests for Linkerd components under the ./linkerd
directory: - A Namespace
resource to control where the components are installed - A HelmRepository
resource to make the Linkerd Helm repo available on the cluster - A HelmRelease
resource to install the latest version of Linkerd from the HelmRepository
---\napiVersion: v1\nkind: Namespace\nmetadata:\nname: linkerd\nlabels:\nconfig.linkerd.io/admission-webhooks: disabled\n
linkerd/source.yaml---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: linkerd\nspec:\ninterval: 1h\nurl: https://helm.linkerd.io/stable\n
Note: The value for the spec.values.identity.issuer.crtExpiry
field below depends on the parameter value used during the creation of the issuer certificate. In this example, it should be set to one year from the certificate creation.
---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: linkerd\nspec:\ninterval: 10m\nchart:\nspec:\nchart: linkerd2\nreconcileStrategy: ChartVersion\nsourceRef:\nkind: HelmRepository\nname: linkerd\ninstall:\ncrds: Create\nupgrade:\ncrds: CreateReplace\nvaluesFrom:\n- kind: Secret\nname: linkerd-certs\nvaluesKey: ca.crt\ntargetPath: identityTrustAnchorsPEM\n- kind: Secret\nname: linkerd-certs\nvaluesKey: issuer.crt\ntargetPath: identity.issuer.tls.crtPEM\n- kind: Secret\nname: linkerd-certs\nvaluesKey: issuer.key\ntargetPath: identity.issuer.tls.keyPEM\nvalues:\ninstallNamespace: false\nidentity:\nissuer:\ncrtExpiry: \"2023-07-18T20:00:00Z\" # Change this to match generated certificate expiry date\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: linkerd-viz\nspec:\ninterval: 10m\ndependsOn:\n- name: linkerd\nchart:\nspec:\nchart: linkerd-viz\nreconcileStrategy: ChartVersion\nsourceRef:\nkind: HelmRepository\nname: linkerd\n
Next, add the following manifests. The first file instructs Kustomize to patch any Secrets
that are referenced in HelmRelease
manifests. The second file is a Kustomization
that references all the other linkerd
resource files.
nameReference:\n- kind: Secret\nversion: v1\nfieldSpecs:\n- path: spec/valuesFrom/name\nkind: HelmRelease\n
linkerd/kustomization.yaml---\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nnamespace: linkerd\nconfigurations:\n- kustomizeconfig.yaml\nresources:\n- namespace.yaml\n- source.yaml\n- releases.yaml\nsecretGenerator:\n- name: linkerd-certs\nfiles:\n- ca.crt\n- issuer.crt\n- issuer.key\n
Note: The secretGenerator
generates Secrets from the files you've just created.
At this point the linkerd
directory in your cluster repository should look like this:
> tree linkerd\nlinkerd\n\u251c\u2500\u2500 ca.crt\n\u251c\u2500\u2500 issuer.crt\n\u251c\u2500\u2500 issuer.key\n\u251c\u2500\u2500 kustomization.yaml\n\u251c\u2500\u2500 kustomizeconfig.yaml\n\u251c\u2500\u2500 namespace.yaml\n\u251c\u2500\u2500 releases.yaml\n\u2514\u2500\u2500 source.yaml\n
Once Flux reconciles this directory to the cluster, Linkerd should be installed.
Before proceeding to the next step, check that all the Linkerd pods have started successfully:
> kubectl get pods -n linkerd \nNAME READY STATUS RESTARTS AGE\nlinkerd-destination-66d5668b-4mw49 4/4 Running 0 10m\nlinkerd-identity-6b4658c74b-6nc97 2/2 Running 0 10m\nlinkerd-proxy-injector-6b76789cb4-8vqj4 2/2 Running 0 10m\n\n> kubectl get pods -n linkerd-viz \nNAME READY STATUS RESTARTS AGE\ngrafana-db56d7cb4-xlnn4 2/2 Running 0 10m\nmetrics-api-595c7b564-724ps 2/2 Running 0 10m\nprometheus-5d4dffff55-8fscd 2/2 Running 0 10m\ntap-6dcb89d487-5ns8n 2/2 Running 0 10m\ntap-injector-54895654bb-9xn7k 2/2 Running 0 10m\nweb-6b6f65dbc7-wltdg 2/2 Running 0 10m\n
Note
Any new directories that you add to the cluster repository while following this guide must be included in a path that Flux reconciles.
"},{"location":"progressive-delivery/progressive-delivery-flagger-install/#installing-flagger-using-flux","title":"Installing Flagger Using Flux","text":"To install Flagger, you'll use a Kustomization file that will define the installation order and provide a default namespace for the installed resources.
Create a new flagger
directory. Make sure to locate it under a repository path that Flux reconciles.
Now add under this directory the three resource manifests for Flagger: - A Namespace
resource to control where the components are installed - A HelmRepository
resource to make the Flagger Helm repo available on the cluster - A HelmRelease
resource to install the latest version of Flagger and the load tester app (which generates synthetic traffic during the analysis phase), from that HelmRepository
---\napiVersion: v1\nkind: Namespace\nmetadata:\nname: flagger\n
flagger/source.yaml---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: HelmRepository\nmetadata:\nname: flagger\nspec:\ninterval: 1h\nurl: https://flagger.app\n
flagger/releases.yaml---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: flagger\nspec:\nreleaseName: flagger\ninstall:\ncrds: Create\nupgrade:\ncrds: CreateReplace\ninterval: 10m\nchart:\nspec:\nchart: flagger\nreconcileStrategy: ChartVersion\nsourceRef:\nkind: HelmRepository\nname: flagger\nvalues:\nmetricsServer: http://prometheus.linkerd-viz:9090\nmeshProvider: linkerd\n---\napiVersion: helm.toolkit.fluxcd.io/v2beta1\nkind: HelmRelease\nmetadata:\nname: loadtester\nspec:\ninterval: 10m\nchart:\nspec:\nchart: loadtester\nreconcileStrategy: ChartVersion\nsourceRef:\nkind: HelmRepository\nname: flagger\n
Now add the following Kustomization file. It references all of the previous files that you've added:
Expand to see the Flagger Kustomization manifest flagger/kustomization.yaml---\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nnamespace: flagger\nresources:\n- namespace.yaml\n- source.yaml\n- releases.yaml\n
The flagger
directory in the cluster repository should look like this:
> tree flagger\nflagger\n\u251c\u2500\u2500 kustomization.yaml\n\u251c\u2500\u2500 namespace.yaml\n\u251c\u2500\u2500 releases.yaml\n\u2514\u2500\u2500 source.yaml\n
Once Flux reconciles this directory to the cluster, Flagger and the load tester app should be installed.
Before proceeding to the next step, check that all of your Flagger pods have started successfully:
> kubectl get pods -n flagger\nNAME READY STATUS RESTARTS AGE\nflagger-7d456d4fc7-knf2g 1/1 Running 0 4m\nloadtester-855b4d77f6-scl6r 1/1 Running 0 4m\n
"},{"location":"progressive-delivery/progressive-delivery-flagger-install/#custom-resources-generated-by-flagger","title":"Custom Resources Generated by Flagger","text":"When Flagger is configured to integrate with a service mesh such as Linkerd or Istio for the rollout, this ClusterRole needs to be extended so that it can read the additional service mesh resources that Flagger generates. To display service mesh- or ingress-related resources, we require spec.provider
to be set in each canary resource.
The following table provides a list of all the custom resources that Flagger generates grouped by provider:
Provider API Group Resource AppMesh appmesh.k8s.aws virtualnode appmesh.k8s.aws virtualrouter appmesh.k8s.aws virtualservice Linkerd split.smi-spec.io trafficsplit Istio networking.istio.io destinationrule networking.istio.io virtualservice Contour projectcontour.io httpproxy Gloo gateway.solo.io routetable gloo.solo.io upstream Nginx networking.k8s.io ingress Skipper networking.k8s.io ingress Traefik traefik.containo.us traefikservice Open Service Mesh split.smi-spec.io trafficsplit Kuma kuma.io trafficroute GatewayAPI gateway.networking.k8s.io httprouteFor example, the following manifest shows how gitops-canaries-reader
has been extended to allow the user for viewing TrafficSplit resources when Linkerd is used:
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: gitops-canaries-reader\nrules:\n- apiGroups:\n- flagger.app\nresources:\n- canaries\n- metrictemplates\nverbs:\n- get\n- list\n- apiGroups:\n- \"\"\nresources:\n- events\nverbs:\n- get\n- watch\n- list\n# Additional permissions for Linkerd resources are added below\n- apiGroups:\n- split.smi-spec.io\nresources:\n- trafficsplits\nverbs:\n- get\n- list\n
"},{"location":"progressive-delivery/progressive-delivery-flagger-install/#setting-up-remote-cluster-permissions","title":"Setting up Remote Cluster Permissions","text":"In order to view canaries in a remote cluster from the management cluster, you need to consider the following: - The service account used to access the remote cluster needs to be able to list namespaces and custom resource definitions in the given cluster. It additionally needs to be able to impersonate users and groups. - The user or group that logs in to the management cluster, needs appropriate permissions to certain resources of the remote cluster.
For example, applying the following manifest on remote clusters, ensures that the wego-admin
user will be able to view canary information from within the Weave GitOps Enterprise UI on the management cluster:
apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: user-groups-impersonator\nrules:\n- apiGroups: [\"\"]\nresources: [\"users\", \"groups\"]\nverbs: [\"impersonate\"]\n- apiGroups: [\"\"]\nresources: [\"namespaces\"]\nverbs: [\"get\", \"list\"]\n- apiGroups: [\"apiextensions.k8s.io\"]\nresources: [\"customresourcedefinitions\"]\nverbs: [\"get\", \"list\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: impersonate-user-groups\nsubjects:\n- kind: ServiceAccount\nname: remote-cluster-01 # Service account created in remote cluster\nnamespace: default\nroleRef:\nkind: ClusterRole\nname: user-groups-impersonator\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\nname: canary-reader\nrules:\n- apiGroups: [\"\"]\nresources: [ \"events\", \"services\" ]\nverbs: [ \"get\", \"list\", \"watch\" ]\n- apiGroups: [ \"apps\" ]\nresources: [ \"*\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [ \"autoscaling\" ]\nresources: [ \"*\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [ \"flagger.app\" ]\nresources: [ \"canaries\", \"metrictemplates\"]\nverbs: [ \"get\", \"list\", \"watch\" ]\n- apiGroups: [ \"helm.toolkit.fluxcd.io\" ]\nresources: [ \"helmreleases\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [ \"kustomize.toolkit.fluxcd.io\" ]\nresources: [ \"kustomizations\" ]\nverbs: [ \"get\", \"list\" ]\n- apiGroups: [ \"source.toolkit.fluxcd.io\" ]\nresources: [ \"buckets\", \"helmcharts\", \"gitrepositories\", \"helmrepositories\", \"ocirepositories\" ]\nverbs: [ \"get\", \"list\" ]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\nname: read-canaries\nsubjects:\n- kind: User\nname: wego-admin # User logged in management cluster, impersonated via service account\napiGroup: rbac.authorization.k8s.io\nroleRef:\nkind: ClusterRole\nname: canary-reader\napiGroup: rbac.authorization.k8s.io\n
You may need to add more users/groups to the read-canaries
ClusterRoleBinding to ensure additional users can view canary information from within the Weave GitOps Enterprise UI.
To demonstrate the progressive rollout of an application, we'll use a tiny sample web app called podinfo and configure a canary release strategy.
In our example, Flagger will scale up a new version of podinfo (the canary) alongside the existing version (the primary). It will gradually increase traffic to the new version in increments of 5%, up to a maximum of 50%. Flagger will continuously monitor the new version for an acceptable request response rate and average request duration. Based on this analysis, Flagger will either update the primary to the new version or abandon the promotion, then scale the canary back down to zero.
Create a new test
directory and add these three canary resource manifests under it: - A Namespace
resource to control where the components are installed - A Deployment
and HorizontalPodAutoscaler
for the podinfo
application - A Canary
resource which references the Deployment
and HorizontalPodAutoscaler
resources
We don't need to define a service resource. This is specified within the canary definition and created by Flagger.
Expand to see the three canary resource manifests test/namespace.yaml---\napiVersion: v1\nkind: Namespace\nmetadata:\nname: test\nannotations:\nlinkerd.io/inject: enabled\n
test/deployment.yaml---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\nname: podinfo\nlabels:\napp: podinfo\nspec:\nminReadySeconds: 5\nrevisionHistoryLimit: 5\nprogressDeadlineSeconds: 60\nstrategy:\nrollingUpdate:\nmaxUnavailable: 1\ntype: RollingUpdate\nselector:\nmatchLabels:\napp: podinfo\ntemplate:\nmetadata:\nannotations:\nprometheus.io/scrape: \"true\"\nprometheus.io/port: \"9797\"\nlabels:\napp: podinfo\nspec:\ncontainers:\n- name: podinfod\nimage: ghcr.io/stefanprodan/podinfo:6.1.8\nimagePullPolicy: IfNotPresent\nports:\n- name: http\ncontainerPort: 9898\nprotocol: TCP\n- name: http-metrics\ncontainerPort: 9797\nprotocol: TCP\n- name: grpc\ncontainerPort: 9999\nprotocol: TCP\ncommand:\n- ./podinfo\n- --port=9898\n- --port-metrics=9797\n- --grpc-port=9999\n- --grpc-service-name=podinfo\n- --level=info\n- --random-delay=false\n- --random-error=false\nenv:\n- name: PODINFO_UI_COLOR\nvalue: \"#34577c\"\nlivenessProbe:\nexec:\ncommand:\n- podcli\n- check\n- http\n- localhost:9898/healthz\ninitialDelaySeconds: 5\ntimeoutSeconds: 5\nreadinessProbe:\nexec:\ncommand:\n- podcli\n- check\n- http\n- localhost:9898/readyz\ninitialDelaySeconds: 5\ntimeoutSeconds: 5\nresources:\nlimits:\ncpu: 2000m\nmemory: 512Mi\nrequests:\ncpu: 100m\nmemory: 64Mi\n\n---\napiVersion: autoscaling/v2beta2\nkind: HorizontalPodAutoscaler\nmetadata:\nname: podinfo\nspec:\nscaleTargetRef:\napiVersion: apps/v1\nkind: Deployment\nname: podinfo\nminReplicas: 2\nmaxReplicas: 4\nmetrics:\n- type: Resource\nresource:\nname: cpu\ntarget:\ntype: Utilization\n# scale up if usage is above\n# 99% of the requested CPU (100m)\naverageUtilization: 99\n
test/canary.yaml---\napiVersion: flagger.app/v1beta1\nkind: Canary\nmetadata:\nname: podinfo\nspec:\n# deployment reference\ntargetRef:\napiVersion: apps/v1\nkind: Deployment\nname: podinfo\n# HPA reference (optional)\nautoscalerRef:\napiVersion: autoscaling/v2beta2\nkind: HorizontalPodAutoscaler\nname: podinfo\n# the maximum time in seconds for the canary deployment\n# to make progress before it is rollback (default 600s)\nprogressDeadlineSeconds: 60\nservice:\n# ClusterIP port number\nport: 9898\n# container port number or name (optional)\ntargetPort: 9898\nanalysis:\n# schedule interval (default 60s)\ninterval: 30s\n# max number of failed metric checks before rollback\nthreshold: 5\n# max traffic percentage routed to canary\n# percentage (0-100)\nmaxWeight: 50\n# canary increment step\n# percentage (0-100)\nstepWeight: 5\n# Linkerd Prometheus checks\nmetrics:\n- name: request-success-rate\n# minimum req success rate (non 5xx responses)\n# percentage (0-100)\nthresholdRange:\nmin: 99\ninterval: 1m\n- name: request-duration\n# maximum req duration P99\n# milliseconds\nthresholdRange:\nmax: 500\ninterval: 30s\n# testing (optional)\nwebhooks:\n- name: acceptance-test\ntype: pre-rollout\nurl: http://loadtester.flagger/\ntimeout: 30s\nmetadata:\ntype: bash\ncmd: \"curl -sd 'test' http://podinfo-canary.test:9898/token | grep token\"\n- name: load-test\ntype: rollout\nurl: http://loadtester.flagger/\nmetadata:\ncmd: \"hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/\"\n
Add a Kustomization file to apply all resources to the test
namespace:
---\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nnamespace: test\nresources:\n- namespace.yaml\n- deployment.yaml\n- canary.yaml\n
At this point, the test
directory in the cluster repository should look like this:
> tree test\ntest\n\u251c\u2500\u2500 canary.yaml\n\u251c\u2500\u2500 deployment.yaml\n\u251c\u2500\u2500 kustomization.yaml\n\u2514\u2500\u2500 namespace.yaml\n
After a short time, the status of the canary object should be set to Initialized
:
> kubectl get canary podinfo -n test\nNAME STATUS WEIGHT LASTTRANSITIONTIME\npodinfo Initialized 0 2022-07-22T12:37:58Z\n
Trigger a new rollout by bumping the version of podinfo
:
> kubectl set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1 -n test\n
During the progressive rollout, the canary object reports on its current status:
> kubectl get canary podinfo -n test\nNAME STATUS WEIGHT LASTTRANSITIONTIME\npodinfo Progressing 5 2022-07-22T12:41:57Z\n
After a short time the rollout is completed and the status of the canary object is set to Succeeded
:
> kubectl get canary podinfo -n test\nNAME STATUS WEIGHT LASTTRANSITIONTIME\npodinfo Succeeded 0 2022-07-22T12:47:58Z\n
"},{"location":"progressive-delivery/progressive-delivery-flagger-install/#summary","title":"Summary","text":"Congratulations, you have now completed a progressive delivery rollout with Flagger and Linkerd!
Next steps: - Explore more of what Flagger offers - Configure manual approvals for progressive delivery deployments
"},{"location":"references/helm-reference/","title":"Helm chart reference","text":"This is a reference of all the configurable values in Weave GitOps's Helm chart. This is intended for customizing your installation after you've gone through the getting started guide.
This reference was generated for the chart version 4.0.34 which installs weave gitops v0.36.0.
"},{"location":"references/helm-reference/#values","title":"Values","text":"Key Type Default Description additionalArgs list[]
Additional arguments to pass in to the gitops-server adminUser.create bool false
Whether the local admin user should be created. If you use this make sure you add it to rbac.impersonationResourceNames
. adminUser.createClusterRole bool true
Specifies whether the clusterRole & binding to the admin user should be created. Will be created only if adminUser.create
is enabled. Without this, the adminUser will only be able to see resources in the target namespace. adminUser.createSecret bool true
Whether we should create the secret for the local adminUser. Will be created only if adminUser.create
is enabled. Without this, we'll still set up the roles and permissions, but the secret with username and password has to be provided separately. adminUser.passwordHash string nil
Set the password for local admin user. Requires adminUser.create
and adminUser.createSecret
This needs to have been hashed using bcrypt. You can do this via our CLI with gitops get bcrypt-hash
. adminUser.username string \"gitops-test-user\"
Set username for local admin user, this should match the value in the secret cluster-user-auth
which can be created with adminUser.createSecret
. Requires adminUser.create
. affinity object {}
annotations object {}
Annotations to add to the deployment envVars[0].name string \"WEAVE_GITOPS_FEATURE_TENANCY\"
envVars[0].value string \"true\"
envVars[1].name string \"WEAVE_GITOPS_FEATURE_CLUSTER\"
envVars[1].value string \"false\"
extraVolumeMounts list []
extraVolumes list []
fullnameOverride string \"\"
image.pullPolicy string \"IfNotPresent\"
image.repository string \"ghcr.io/weaveworks/wego-app\"
image.tag string \"v0.36.0\"
imagePullSecrets list []
ingress.annotations object {}
ingress.className string \"\"
ingress.enabled bool false
ingress.hosts string nil
ingress.tls list []
logLevel string \"info\"
What log level to output. Valid levels are 'debug', 'info', 'warn' and 'error' metrics.enabled bool false
Start the metrics exporter metrics.service.annotations object {\"prometheus.io/path\":\"/metrics\",\"prometheus.io/port\":\"{{ .Values.metrics.service.port }}\",\"prometheus.io/scrape\":\"true\"}
Annotations to set on the service metrics.service.port int 2112
Port to start the metrics exporter on nameOverride string \"\"
networkPolicy.create bool true
Specifies whether default network policies should be created. nodeSelector object {}
oidcSecret.create bool false
podAnnotations object {}
podLabels object {}
podSecurityContext object {}
rbac.additionalRules list []
If non-empty, these additional rules will be appended to the RBAC role and the cluster role. for example, additionalRules: - apiGroups: [\"infra.contrib.fluxcd.io\"] resources: [\"terraforms\"] verbs: [ \"get\", \"list\", \"patch\" ] rbac.create bool true
Specifies whether the clusterRole & binding to the service account should be created rbac.impersonationResourceNames list []
If non-empty, this limits the resources that the service account can impersonate. This applies to both users and groups, e.g. ['user1@corporation.com', 'user2@corporation.com', 'operations']
rbac.impersonationResources list [\"users\",\"groups\"]
Limit the type of principal that can be impersonated rbac.viewSecretsResourceNames list [\"cluster-user-auth\",\"oidc-auth\"]
If non-empty, this limits the secrets that can be accessed by the service account to the specified ones, e.g. ['weave-gitops-enterprise-credentials']
replicaCount int 1
resources object {}
securityContext.allowPrivilegeEscalation bool false
securityContext.capabilities.drop[0] string \"ALL\"
securityContext.readOnlyRootFilesystem bool true
securityContext.runAsNonRoot bool true
securityContext.runAsUser int 1000
securityContext.seccompProfile.type string \"RuntimeDefault\"
serverTLS.enable bool false
Enable TLS termination in gitops itself. If you enable this, you need to create a secret, and specify the secretName. Another option is to create an ingress. serverTLS.secretName string \"my-secret-tls\"
Specify the tls secret name. This type of secrets have a key called tls.crt
and tls.key
containing their corresponding values in base64 format. See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for more details and examples service.annotations object {}
service.create bool true
service.port int 9001
service.type string \"ClusterIP\"
serviceAccount.annotations object {}
Annotations to add to the service account serviceAccount.create bool true
Specifies whether a service account should be created serviceAccount.name string \"\"
The name of the service account to use. If not set and create is true, a name is generated using the fullname template tolerations list []
"},{"location":"references/cli-reference/gitops/","title":"Gitops","text":""},{"location":"references/cli-reference/gitops/#gitops","title":"gitops","text":"Weave GitOps
"},{"location":"references/cli-reference/gitops/#synopsis","title":"Synopsis","text":"Command line utility for managing Kubernetes applications via GitOps.
"},{"location":"references/cli-reference/gitops/#examples","title":"Examples","text":" # Get help for gitops create dashboard command\n gitops create dashboard -h\n gitops help create dashboard\n\n # Get the version of gitops along with commit, branch, and flux version\n gitops version\n\n To learn more, you can find our documentation at https://docs.gitops.weave.works/\n
"},{"location":"references/cli-reference/gitops/#options","title":"Options","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n -h, --help help for gitops\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops/#see-also","title":"SEE ALSO","text":"Validates flux compatibility
gitops check [flags]\n
"},{"location":"references/cli-reference/gitops_check/#examples","title":"Examples","text":"# Validate flux and kubernetes compatibility\ngitops check\n
"},{"location":"references/cli-reference/gitops_check/#options","title":"Options","text":" -h, --help help for check\n
"},{"location":"references/cli-reference/gitops_check/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_check/#see-also","title":"SEE ALSO","text":"Generate the autocompletion script for the specified shell
"},{"location":"references/cli-reference/gitops_completion/#synopsis","title":"Synopsis","text":"Generate the autocompletion script for gitops for the specified shell. See each sub-command's help for details on how to use the generated script.
"},{"location":"references/cli-reference/gitops_completion/#options","title":"Options","text":" -h, --help help for completion\n
"},{"location":"references/cli-reference/gitops_completion/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_completion/#see-also","title":"SEE ALSO","text":"Generate the autocompletion script for bash
"},{"location":"references/cli-reference/gitops_completion_bash/#synopsis","title":"Synopsis","text":"Generate the autocompletion script for the bash shell.
This script depends on the 'bash-completion' package. If it is not installed already, you can install it via your OS's package manager.
To load completions in your current shell session:
source <(gitops completion bash)\n
To load completions for every new session, execute once:
"},{"location":"references/cli-reference/gitops_completion_bash/#linux","title":"Linux:","text":"gitops completion bash > /etc/bash_completion.d/gitops\n
"},{"location":"references/cli-reference/gitops_completion_bash/#macos","title":"macOS:","text":"gitops completion bash > $(brew --prefix)/etc/bash_completion.d/gitops\n
You will need to start a new shell for this setup to take effect.
gitops completion bash\n
"},{"location":"references/cli-reference/gitops_completion_bash/#options","title":"Options","text":" -h, --help help for bash\n --no-descriptions disable completion descriptions\n
"},{"location":"references/cli-reference/gitops_completion_bash/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_completion_bash/#see-also","title":"SEE ALSO","text":"Generate the autocompletion script for fish
"},{"location":"references/cli-reference/gitops_completion_fish/#synopsis","title":"Synopsis","text":"Generate the autocompletion script for the fish shell.
To load completions in your current shell session:
gitops completion fish | source\n
To load completions for every new session, execute once:
gitops completion fish > ~/.config/fish/completions/gitops.fish\n
You will need to start a new shell for this setup to take effect.
gitops completion fish [flags]\n
"},{"location":"references/cli-reference/gitops_completion_fish/#options","title":"Options","text":" -h, --help help for fish\n --no-descriptions disable completion descriptions\n
"},{"location":"references/cli-reference/gitops_completion_fish/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_completion_fish/#see-also","title":"SEE ALSO","text":"Generate the autocompletion script for powershell
"},{"location":"references/cli-reference/gitops_completion_powershell/#synopsis","title":"Synopsis","text":"Generate the autocompletion script for powershell.
To load completions in your current shell session:
gitops completion powershell | Out-String | Invoke-Expression\n
To load completions for every new session, add the output of the above command to your powershell profile.
gitops completion powershell [flags]\n
"},{"location":"references/cli-reference/gitops_completion_powershell/#options","title":"Options","text":" -h, --help help for powershell\n --no-descriptions disable completion descriptions\n
"},{"location":"references/cli-reference/gitops_completion_powershell/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_completion_powershell/#see-also","title":"SEE ALSO","text":"Generate the autocompletion script for zsh
"},{"location":"references/cli-reference/gitops_completion_zsh/#synopsis","title":"Synopsis","text":"Generate the autocompletion script for the zsh shell.
If shell completion is not already enabled in your environment you will need to enable it. You can execute the following once:
echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n
To load completions in your current shell session:
source <(gitops completion zsh)\n
To load completions for every new session, execute once:
"},{"location":"references/cli-reference/gitops_completion_zsh/#linux","title":"Linux:","text":"gitops completion zsh > \"${fpath[1]}/_gitops\"\n
"},{"location":"references/cli-reference/gitops_completion_zsh/#macos","title":"macOS:","text":"gitops completion zsh > $(brew --prefix)/share/zsh/site-functions/_gitops\n
You will need to start a new shell for this setup to take effect.
gitops completion zsh [flags]\n
"},{"location":"references/cli-reference/gitops_completion_zsh/#options","title":"Options","text":" -h, --help help for zsh\n --no-descriptions disable completion descriptions\n
"},{"location":"references/cli-reference/gitops_completion_zsh/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_completion_zsh/#see-also","title":"SEE ALSO","text":"Creates a resource
"},{"location":"references/cli-reference/gitops_create/#examples","title":"Examples","text":"# Create a HelmRepository and HelmRelease to deploy Weave GitOps\ngitops create dashboard ww-gitops \\\n --password=$PASSWORD \\\n --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml\n\n# Create a Terraform object\ngitops create terraform my-resource \\\n -n my-namespace \\\n --source GitRepository/my-project \\\n --path ./terraform \\\n --interval 1m \\\n --export > ./clusters/my-cluster/infra/terraform-my-resource.yaml\n
"},{"location":"references/cli-reference/gitops_create/#options","title":"Options","text":" --export Export in YAML format to stdout.\n -h, --help help for create\n --timeout duration The timeout for operations during resource creation. (default 3m0s)\n
"},{"location":"references/cli-reference/gitops_create/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_create/#see-also","title":"SEE ALSO","text":"Create a HelmRepository and HelmRelease to deploy Weave GitOps
"},{"location":"references/cli-reference/gitops_create_dashboard/#synopsis","title":"Synopsis","text":"Create a HelmRepository and HelmRelease to deploy Weave GitOps
gitops create dashboard [flags]\n
"},{"location":"references/cli-reference/gitops_create_dashboard/#examples","title":"Examples","text":"# Create a HelmRepository and HelmRelease to deploy Weave GitOps\ngitops create dashboard ww-gitops \\\n --password=$PASSWORD \\\n --export > ./clusters/my-cluster/weave-gitops-dashboard.yaml\n
"},{"location":"references/cli-reference/gitops_create_dashboard/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for dashboard\n --password string The password of the dashboard admin user.\n --username string The username of the dashboard admin user. (default \"admin\")\n --values strings Local path to values.yaml files for HelmRelease, also accepts comma-separated values.\n
"},{"location":"references/cli-reference/gitops_create_dashboard/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --export Export in YAML format to stdout.\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n --timeout duration The timeout for operations during resource creation. (default 3m0s)\n
"},{"location":"references/cli-reference/gitops_create_dashboard/#see-also","title":"SEE ALSO","text":"Create a Terraform object
"},{"location":"references/cli-reference/gitops_create_terraform/#synopsis","title":"Synopsis","text":"Create a Terraform object
gitops create terraform [flags]\n
"},{"location":"references/cli-reference/gitops_create_terraform/#examples","title":"Examples","text":"# Create a Terraform resource in the default namespace\ngitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m\n\n# Create and export a Terraform resource manifest to the standard output\ngitops create terraform -n default my-resource --source GitRepository/my-project --path ./terraform --interval 15m --export\n
"},{"location":"references/cli-reference/gitops_create_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n --interval string Interval at which the Terraform configuration should be applied\n --path string Path to the Terraform configuration\n --source string Source of the Terraform configuration\n
"},{"location":"references/cli-reference/gitops_create_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --export Export in YAML format to stdout.\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n --timeout duration The timeout for operations during resource creation. (default 3m0s)\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_create_terraform/#see-also","title":"SEE ALSO","text":"Delete a resource
"},{"location":"references/cli-reference/gitops_delete/#options","title":"Options","text":" -h, --help help for delete\n
"},{"location":"references/cli-reference/gitops_delete/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_delete/#see-also","title":"SEE ALSO","text":"Delete a Terraform object
gitops delete terraform [flags]\n
"},{"location":"references/cli-reference/gitops_delete_terraform/#examples","title":"Examples","text":"# Delete a Terraform resource in the default namespace\ngitops delete terraform -n default my-resource\n
"},{"location":"references/cli-reference/gitops_delete_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n
"},{"location":"references/cli-reference/gitops_delete_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_delete_terraform/#see-also","title":"SEE ALSO","text":"Display one or many Weave GitOps resources
"},{"location":"references/cli-reference/gitops_get/#examples","title":"Examples","text":"# Get the CLI configuration for Weave GitOps\ngitops get config\n\n# Generate a hashed secret\nPASSWORD=\"<your password>\"\necho -n $PASSWORD | gitops get bcrypt-hash\n
"},{"location":"references/cli-reference/gitops_get/#options","title":"Options","text":" -h, --help help for get\n
"},{"location":"references/cli-reference/gitops_get/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_get/#see-also","title":"SEE ALSO","text":"Generates a hashed secret
gitops get bcrypt-hash [flags]\n
"},{"location":"references/cli-reference/gitops_get_bcrypt-hash/#examples","title":"Examples","text":"PASSWORD=\"<your password>\"\necho -n $PASSWORD | gitops get bcrypt-hash\n
"},{"location":"references/cli-reference/gitops_get_bcrypt-hash/#options","title":"Options","text":" -h, --help help for bcrypt-hash\n
"},{"location":"references/cli-reference/gitops_get_bcrypt-hash/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_get_bcrypt-hash/#see-also","title":"SEE ALSO","text":"Prints out the CLI configuration for Weave GitOps
gitops get config [flags]\n
"},{"location":"references/cli-reference/gitops_get_config/#examples","title":"Examples","text":"# Prints out the CLI configuration for Weave GitOps\ngitops get config\n
"},{"location":"references/cli-reference/gitops_get_config/#options","title":"Options","text":" -h, --help help for config\n
"},{"location":"references/cli-reference/gitops_get_config/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_get_config/#see-also","title":"SEE ALSO","text":"Get logs for a resource
"},{"location":"references/cli-reference/gitops_logs/#options","title":"Options","text":" -h, --help help for logs\n
"},{"location":"references/cli-reference/gitops_logs/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_logs/#see-also","title":"SEE ALSO","text":"Get the runner logs of a Terraform object
gitops logs terraform [flags]\n
"},{"location":"references/cli-reference/gitops_logs_terraform/#examples","title":"Examples","text":"# Get the runner logs of a Terraform object in the \"flux-system\" namespace\ngitops logs terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_logs_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n
"},{"location":"references/cli-reference/gitops_logs_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_logs_terraform/#see-also","title":"SEE ALSO","text":"Replan a resource
"},{"location":"references/cli-reference/gitops_replan/#examples","title":"Examples","text":"# Replan the Terraform plan of a Terraform object from the \"flux-system\" namespace\ngitops replan terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_replan/#options","title":"Options","text":" -h, --help help for replan\n
"},{"location":"references/cli-reference/gitops_replan/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_replan/#see-also","title":"SEE ALSO","text":"Trigger replan for a Terraform object
gitops replan terraform [flags]\n
"},{"location":"references/cli-reference/gitops_replan_terraform/#examples","title":"Examples","text":"# Replan the Terraform plan of a Terraform object from the \"flux-system\" namespace\ngitops replan terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_replan_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n
"},{"location":"references/cli-reference/gitops_replan_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_replan_terraform/#see-also","title":"SEE ALSO","text":"Resume a resource
"},{"location":"references/cli-reference/gitops_resume/#examples","title":"Examples","text":"# Suspend a Terraform object from the \"flux-system\" namespace\ngitops resume terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_resume/#options","title":"Options","text":" -h, --help help for resume\n
"},{"location":"references/cli-reference/gitops_resume/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_resume/#see-also","title":"SEE ALSO","text":"Resume a Terraform object
gitops resume terraform [flags]\n
"},{"location":"references/cli-reference/gitops_resume_terraform/#examples","title":"Examples","text":"# Resume a Terraform object in the \"flux-system\" namespace\ngitops resume terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_resume_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n
"},{"location":"references/cli-reference/gitops_resume_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_resume_terraform/#see-also","title":"SEE ALSO","text":"Sets one or many Weave GitOps CLI configs or resources
"},{"location":"references/cli-reference/gitops_set/#examples","title":"Examples","text":"# Enables analytics in the current user's CLI configuration for Weave GitOps\ngitops set config analytics true\n
"},{"location":"references/cli-reference/gitops_set/#options","title":"Options","text":" -h, --help help for set\n
"},{"location":"references/cli-reference/gitops_set/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_set/#see-also","title":"SEE ALSO","text":"Set the CLI configuration for Weave GitOps
gitops set config [flags]\n
"},{"location":"references/cli-reference/gitops_set_config/#examples","title":"Examples","text":"# Enables analytics in the current user's CLI configuration for Weave GitOps\ngitops set config analytics true\n
"},{"location":"references/cli-reference/gitops_set_config/#options","title":"Options","text":" -h, --help help for config\n
"},{"location":"references/cli-reference/gitops_set_config/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_set_config/#see-also","title":"SEE ALSO","text":"Suspend a resource
"},{"location":"references/cli-reference/gitops_suspend/#examples","title":"Examples","text":"# Suspend a Terraform object in the \"flux-system\" namespace\ngitops resume terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_suspend/#options","title":"Options","text":" -h, --help help for suspend\n
"},{"location":"references/cli-reference/gitops_suspend/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_suspend/#see-also","title":"SEE ALSO","text":"Suspend a Terraform object
gitops suspend terraform [flags]\n
"},{"location":"references/cli-reference/gitops_suspend_terraform/#examples","title":"Examples","text":"# Suspend a Terraform object in the \"flux-system\" namespace\ngitops suspend terraform --namespace flux-system my-resource\n
"},{"location":"references/cli-reference/gitops_suspend_terraform/#options","title":"Options","text":" --context string The name of the kubeconfig context to use\n --disable-compression If true, opt-out of response compression for all requests to the server\n -h, --help help for terraform\n
"},{"location":"references/cli-reference/gitops_suspend_terraform/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_suspend_terraform/#see-also","title":"SEE ALSO","text":"Display gitops version
gitops version [flags]\n
"},{"location":"references/cli-reference/gitops_version/#options","title":"Options","text":" -h, --help help for version\n
"},{"location":"references/cli-reference/gitops_version/#options-inherited-from-parent-commands","title":"Options inherited from parent commands","text":" -e, --endpoint WEAVE_GITOPS_ENTERPRISE_API_URL The Weave GitOps Enterprise HTTP API endpoint can be set with WEAVE_GITOPS_ENTERPRISE_API_URL environment variable\n --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure\n --kubeconfig string Paths to a kubeconfig. Only required if out-of-cluster.\n -n, --namespace string The namespace scope for this operation (default \"flux-system\")\n -p, --password WEAVE_GITOPS_PASSWORD The Weave GitOps Enterprise password for authentication can be set with WEAVE_GITOPS_PASSWORD environment variable\n -u, --username WEAVE_GITOPS_USERNAME The Weave GitOps Enterprise username for authentication can be set with WEAVE_GITOPS_USERNAME environment variable\n
"},{"location":"references/cli-reference/gitops_version/#see-also","title":"SEE ALSO","text":"Secrets are sensitive information such as passwords, access keys, and other credentials that should not be exposed publicly. In cloud-native applications, secrets are often used to authenticate and authorize access to various resources, such as databases, APIs, and other services.
In a GitOps environment, secrets are typically stored either encrypted in Git, or using Custom Resources that reference the secret in an external secret store. Secrets are then synced into the clusters and securely passed to the application containers or workloads.
Effective secrets management in cloud-native applications and GitOps environments is critical for maintaining the security and compliance of the overall system. Best practices include regularly rotating secrets, using strong encryption and access controls, and implementing robust auditing and monitoring processes.
"},{"location":"secrets/#weave-gitops-secrets-management","title":"Weave Gitops Secrets Management","text":"Weave GitOps Secrets Management is a set of features that makes it easier for teams to manage secrets in a GitOps environment across multiple clusers. These features provide an automated way to manage secrets effectively, and make it easier for different personas to work with secrets.
For Developers, they can use Weave GitOps Secrets Management to securely create and track application secrets such as API keys, passwords, and other credentials. They can do that using Weave GitOps UI in a self-serve manner.
For Operation Teams, they can use Weave GitOps Secrets Management to help set up secure and reliable flows for developers to create and consume secrets for their applications.
Weave GitOps Secrets Management supports integrations with SOPS and External Secrets Operator (ESO) to provide a secure and automated way to manage secrets in a GitOps environment, while giving the option for customers to choose any of these secrets operators or working with both of them.
For SOPS and ESO operators, Weave GitOps is providing different ways to do the following: * Setup Secrets Operators (SOPS | ESO) * Bootstrap Secrets into clusters * Manage Secrets through Weave GitOps UI
In order to get started with WeaveGitOps Secrets Management, please follow this guide here.
"},{"location":"secrets/bootstrapping-secrets/","title":"Bootstrapping Secrets ENTERPRISE","text":"When accessing protected resources there is a need for a client to authenticate before the access is granted and the resource is consumed. For authentication, a client presents credentials that are either created manually or available through infrastructure. A common scenario is to have a secrets store.
Weave Gitops allows you to provision the secret management infrastructure as part of its capabilities. However, in order to provision, as any other application that has secrets, we need to create the secret needed for installing it. This is known as a chicken-egg scenario that get addressed by providing an initial secret. This secret we call it bootstrapping secret.
Bootstrapping secrets comes in handy, not only while provisioning your secrets management solution, but also in any platform provisioning task where the existence of the secret is a prerequisite. Another common example could be provisioning platform capabilities via profiles that are stored in private repositories.
Weave Gitops provides SecretSync as a solution to managing your bootstrapping secrets.
"},{"location":"secrets/bootstrapping-secrets/#secretsync","title":"SecretSync","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
SecretSync
is a Kubernetes Custom Resource that provides semantics to sync Kuberentes Secrets from management cluster to leaf clusters.
An example could be seen below:
apiVersion: capi.weave.works/v1alpha1\nkind: SecretSync\nmetadata:\nname: my-dev-secret-syncer\nnamespace: default\nspec:\nclusterSelector:\nmatchLabels:\nenvironment: dev\nsecretRef:\nname: my-dev-secret\ntargetNamespace: my-namespace\n
Where you could find the following configuration sections: 1) Select the secret to sync:
secretRef:\nname: my-dev-secret\n
2) Specify the GitopsClusters that the secret will be synced to via labels:
clusterSelector:\nmatchLabels:\nenvironment: dev\n
Secretsync
reconciles secrets on clusters: any cluster at a future time matching the label selector will have the secret reconciled too.
More info about the CRD spec here
"},{"location":"secrets/bootstrapping-secrets/#working-with-secretsync","title":"Working with SecretSync","text":""},{"location":"secrets/bootstrapping-secrets/#pre-requisites","title":"Pre-requisites","text":"apiVersion: gitops.weave.works/v1alpha1\nkind: GitopsCluster\nmetadata:\nnamespace: flux-system\nlabels:\nenvironment: dev\n
apiVersion: v1\nkind: Secret\nmetadata:\nname: my-dev-secret\nnamespace: flux-system\ntype: Opaque\n
Info
Some restriction apply to the current version: - Resources should be in the same namespace - Leaf cluster nodes should be annotated with node-role.kubernetes.io/control-plane
apiVersion: capi.weave.works/v1alpha1\nkind: SecretSync\nmetadata:\nname: my-dev-secret-syncer\nnamespace: default\nspec:\nclusterSelector:\nmatchLabels:\nenvironment: dev\nsecretRef:\nname: my-dev-secret\ntargetNamespace: my-namespace\n
Check-in to your configuration repo within your management cluster
Create a PR, review and merge
See the progress
Once reconciled, the status section would show created secret resource version
status:\n versions:\n leaf-cluster-1: \"211496\"\n
Your secret has been created in the target leaf cluster
\u279c kubectl get secret -n default\nNAME TYPE DATA\nmy-dev-secret Opaque 1\n
"},{"location":"secrets/getting-started/","title":"Getting started with secrets management ENTERPRISE","text":"Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
This guide shows you a basic experience to get started with Weave Gitops Secrets. It covers the scenario of setting up the capability in a test environment and how to use it for your applications.
"},{"location":"secrets/getting-started/#requirements","title":"Requirements","text":"In order to be able to manage external secrets stores and secrets, add external-secrets
application from weaveworks-charts
profiles repository.
Include via values.yaml
the configuration to deploy the SecretStore connecting to AWS Secrets Manager.
values:\nsecretStores:\nenabled: true\npath: ./clusters/bases/secrets\nsourceRef:\nkind: GitRepository\nname: flux-system\nnamespace: flux-system\n
This example points to the path clusters/bases/secrets
in our configuration repo where a kustomization exists apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- aws-secrets-manager.yaml\n
With the AWS Secrets Manager secret store
apiVersion: external-secrets.io/v1beta1\nkind: SecretStore\nmetadata:\nname: aws-secrets-manager\nnamespace: flux-system\nspec:\nprovider:\naws:\nauth:\nsecretRef:\naccessKeyIDSecretRef:\nkey: access-key\nname: awssm-secret\nsecretAccessKeySecretRef:\nkey: secret-access-key\nname: awssm-secret\nregion: eu-north-1\nservice: SecretsManager\n
Review and merge the PR and see it available in your cluster
"},{"location":"secrets/getting-started/#create-the-secret","title":"Create the secret","text":"Given you have a secret in AWS Secrets Manager for example test/search/db
.
Create the External Secret manifest via Secrets UI to pull the secret from your store into your environment.
See it available in your cluster.
"},{"location":"secrets/getting-started/#use-the-secret","title":"Use the secret","text":"At this stage you have everything you need for your application to consume the secret. Add it to your application as usual.
Expand to see exampleapiVersion: v1\nkind: Pod\nmetadata:\nname: secret-dotfiles-pod\nspec:\nvolumes:\n- name: database-secrets\nsecret:\nsecretName: search-database\ncontainers:\n- name: dotfile-test-container\nimage: registry.k8s.io/busybox\ncommand:\n- ls\n- \"-l\"\n- \"/etc/database-secrets\"\nvolumeMounts:\n- name: database-secrets\nreadOnly: true\nmountPath: \"/etc/database-secrets\"\n
You could see the expected secret available
kubectl logs -f secret-dotfiles-pod\n\ntotal 0\nlrwxrwxrwx 1 root root 15 Apr 5 17:26 password -> ..data/password\n
"},{"location":"secrets/getting-started/#next-steps","title":"Next steps?","text":"At Weave GitOps Enterprise (WGE), we support two approaches for creating and managing secrets: External Secrets Operator and Mozilla SOPS. In this guide, we will provide an overview of both approaches and explain how to use the UI to create and manage secrets.
Clicking on the Secrets under the Platform section in the left hand menu will bring you to the secrets page where you can create external secrets, sops secrets, and view the external secrets list.
"},{"location":"secrets/manage-secrets-ui/#external-secrets","title":"External Secrets","text":""},{"location":"secrets/manage-secrets-ui/#prerequisites","title":"Prerequisites","text":"Setup the External Secrets Operator by following this guide.
"},{"location":"secrets/manage-secrets-ui/#create-external-secret-cr","title":"Create External Secret CR","text":"To create a new ExternalSecret
CR, start by clicking on to the Create External Secret
button to navigate to the creation page.
Here, you will be prompted to enter the External Secret Name
and the Target K8s Secret Name
. Once you choose the Target Cluster
, you will find a new list of all the Secret Stores
on this cluster to choose from.
It's important to note that the chosen SecretStore
may be a cluster-scoped SecretStore
ie: ClusterSecretStore
or a namespace-scoped SecretStore
.
If you choose a namespace scoped SecretStore
, the new secret will be created on the same namespace as the SecretStore
.
If you choose a cluster-scoped ClusterSecretStore
, the new secret will be created in a namespace of your choice.
Then you need to add the SecretPath
, which is the path to the external secret within the secret store.
After you have chosen your desired SecretStore
& SecretPath
the UI allows you to add secret properties in two differen scenarios:
The first scenario allows you to add specific property fields. Each added property
also has an optional SecretKey
field. Here's how to do it:
In the Properties
section, click the Add
button to create a new property field.
Enter the name of the property
you want to create. You can add as many properties as you need.
If you wish to specify a SecretKey
for the property, enter it in the SecretKey
field. If this field is left blank, the property
name will be used as the SecretKey
.
To remove a property, click the Remove
sign next to the property you wish to delete.
Remember, this option allows you to have fine-grained control over which properties are included in your ExternalSecret
.
The second scenario is to include all properties in your ExternalSecret
. If the Include all properties
checkbox is checked, all property inputs will be disabled and ignored, and all secrets including all keys under the specified SecretPath
will be added. Here's how:
Check the Include all properties
checkbox. This will automatically disable the property input fields.
Using this option allows you to quickly create an ExternalSecret
that includes all secrets under a specific SecretPath
, without the need to specify each one individually.
Warning
Remember to use this option with caution. You may not need to expose all your secret properties to be on the cluster.
This process allows you to easily create new ExternalSecret
CRs without needing to manually create them through YAML files or command line tools.
The ExternalSecrets List section of the UI allows you to view all the external secrets that are currently stored in your Kubernetes clusters. This section provides an overview of each external secret, including its name, namespace, cluster, k8s-secret, secret-store and the age. From this page, you can also navigate to the details page to view more information about a specific secret.
"},{"location":"secrets/manage-secrets-ui/#external-secret-details","title":"External Secret Details","text":"The details page displays the details of a specific external secret, including its name, namespace, data, and creation date. Below are the details that you can expect to see on this page:
Version: This shows the version of the external secret, which may be blank if no version has been specified.
Based on the configuration of the external secret, this section will vary:
If the \"Include all properties\" option was selected during the creation of the external secret, this section will display the text \"All properties are included\".
If specific properties were manually added during creation, this section will display a table with two columns: \"Property\" and \"SecretKey\". This table lists all the property and secret key pairs added to the external secret.
Understanding the information provided on the details page can help you to manage and troubleshoot your external secrets as needed.
"},{"location":"secrets/manage-secrets-ui/#properties","title":"Properties","text":""},{"location":"secrets/manage-secrets-ui/#understanding-events","title":"Understanding Events","text":"The following events can be expected when using the UI to manage external secrets:
Understanding these events can help you to troubleshoot issues that may arise when managing external secrets using the UI. In particular, if you encounter a Not Ready
event, you may need to check your secret store credentials and ensure that the secret store is operational before proceeding with any further actions.
Creating a SOPS secret involves using the SOPS tool to encrypt a file containing sensitive information, such as credentials or API keys. This encrypted file can then be stored securely in version control or another location, with only authorized users able to decrypt it using their own private key. This adds an additional layer of security to sensitive data, reducing the risk of unauthorized access or accidental exposure.
"},{"location":"secrets/manage-secrets-ui/#prerequisites_1","title":"Prerequisites","text":"For more information about how to generate OpenPGP/age keys and configure your cluster to work with Weave GitOps Enterprise secrets management follow this guide.
"},{"location":"secrets/manage-secrets-ui/#create-sops-secret","title":"Create SOPS Secret","text":"To create a new SOPS secret, start by clicking on the Create Sops Secret
button.
This will open the create form where you can specify the details of your new secret. First, choose the Cluster
where you want to create the secret. Then, enter a name
for your secret and select the namespace
where it will be created.
Next, select the encryption method
that you want to use - currently, only GPG/AGE encryption is supported. Finally, choose the kustomization
that will be used by SOPS to decrypt the secret, as well as, having the public key info that will be used to encrypt the secret data. Afterwards, add your key-value
pairs of your secrets. It's important to note that the value
input will be encoded to base64.
The generated secret should be something like below.
After approving the pull request, Flux will reconcile it to your cluster. To verify that the secret has been successfully created, you can use the following command to retrieve it as YAML:
kubectl get secret secretTest-default-sops-secret -n default -o yaml\n
which will give the following output:
apiVersion: v1\ndata:\nsecret-1: dmFsCg==\nkind: Secret\nmetadata:\nname: secretTest-default-sops-secret\nnamespace: default\ntype: Opaque\n
"},{"location":"secrets/setup-eso/","title":"Setup ESO ENTERPRISE","text":"Weave GitOps Enterprise now supports managing secrets using External Secrets Operator from the UI. External Secrets Operator is a Kubernetes operator that allows users to use secrets from external secrets management systems by reading their information using external APIs and injecting their values into Kubernetes secrets. To be able to use this functionality, users need to configure their External Secrets Operator and SecretStores using one of the guides below.
"},{"location":"secrets/setup-eso/#prerequisites","title":"Prerequisites","text":""},{"location":"secrets/setup-eso/#secretstores","title":"SecretStores","text":"You should have your SecretStore CRs defined in a git repository. Those CRs will be installed to your cluster in the following steps and used by the creation UI.
"},{"location":"secrets/setup-eso/#eso-profile","title":"ESO Profile","text":"The ESO profile is packaged with the weaveworks-charts. If you have the usual profiles setup, you will not need to do anything extra. This profile installs the ESO controller, all the required CRDs, and the SecretStore CRs defined in the previous step.
"},{"location":"secrets/setup-eso/#secrets","title":"Secrets","text":"There are several Kubernetes Secrets that need to exist on your management cluster for the whole flow to work.
If your SecretStores repository is private then you'll need a Secret, that contains the repo credentials, to access the repository. This is usually the Secret you created while bootstrapping Flux on the management cluster and is copied to your leaf cluster during creation.
For each SecretStore CR, you'll need to add a Secret, that follows the format expected by this CR, to allow the operator to access the defined External Secret Store.
Follow this guide for bootstrapping those secrets on leaf clusters.
"},{"location":"secrets/setup-eso/#installation-steps","title":"Installation Steps","text":""},{"location":"secrets/setup-eso/#install-eso-on-management-cluster-or-existing-leaf-cluster","title":"Install ESO on management cluster or existing leaf cluster","text":"To install the ESO profile on an exisitng cluster, use Add an application
from the Applications
page and select external-secrets
from weaveworks-charts
. Check the Profile values section for more info about configuring the values.yaml
.
To bootstrap the ESO profile on a leaf cluster, select external-secrets
from the profiles dropdown in the Create Cluster
page. Check the Profile values section for more info about configuring the values.yaml
.
You should then configure the values.yaml
to install the SecretStores
on the cluster from a GitRepository
. This is done by configuring the secretStores
section.
secretStores:\nenabled: true\nurl: ssh://git@github.com/github-owner/repo-name # url for the git repository that contains the SecretStores\ntag: v1.0.0\npath: ./ # could be a path to the secrets dir or a kustomization.yaml file for the SecretStore in the GitRepository\nsecretRef: my-pat # the name of the Secret containing the repo credentials for private repositories\n
Expand to see an example that uses an existing git source secretStores:\nenabled: true\nsourceRef: # Specify the name for an existing GitSource reference\nkind: GitRepository\nname: flux-system\nnamespace: flux-system\n
"},{"location":"secrets/setup-sops/","title":"Setup SOPS","text":"import CodeBlock from \"@theme/CodeBlock\";
import SopsBootstrapJob from \"!!raw-loader!./assets/sops-bootstrap-job.yaml\"; import TemplateParams from \"!!raw-loader!./assets/template-params.yaml\"; import TemplateAnnotations from \"!!raw-loader!./assets/template-annotations.yaml\";
"},{"location":"secrets/setup-sops/#setup-sops-enterprise","title":"Setup SOPS ENTERPRISE","text":"Weave GitOps Enterprise now supports managing secrets using SOPS, a tool that encrypts and decrypts secrets using various key management services, from the UI. To be able to use this functionality, users need to configure their private and public key-pairs using one of the guides below.
"},{"location":"secrets/setup-sops/#setup-sops-on-management-cluster-or-existing-leaf-cluster","title":"Setup SOPS on management cluster or existing leaf cluster","text":"In this section, we will cover the prerequisites for using SOPS with Weave GitOps Enterprise, and how to configure SOPS for your existing Kubernetes cluster to work with GPG and age keys.
For a more advanced setup for SOPS with flux, please refer to this guide.
"},{"location":"secrets/setup-sops/#encrypting-secrets-using-gpgopenpgp","title":"Encrypting secrets using GPG/OpenPGP","text":"OpenPGP is a way of using SOPS to encrypt and decrypt secrets with Weave GitOps Enterprise.
Here are the steps to generate an OpenPGP key and configure your cluster to work with Weave GitOps Enterprise secrets management.
1- Generate a gpg key pairs
Expand for instructionsexport KEY_NAME=\"gpg-key\"\nexport KEY_COMMENT=\"gpg key\"\n\ngpg --batch --full-generate-key <<EOF\n%no-protection\nKey-Type: 1\nKey-Length: 4096\nSubkey-Type: 1\nSubkey-Length: 4096\nExpire-Date: 0\nName-Comment: ${KEY_COMMENT}\nName-Real: ${KEY_NAME}\nEOF\n
2- Export the key pairs fingerprint in the shell
gpg --list-secret-keys \"${KEY_NAME}\"\n\nsec rsa4096 2020-09-06 [SC]\n710DC0DB6C1662F707095FC30233CB21E656A3CB\n\nexport KEY_FP=\"710DC0DB6C1662F707095FC30233CB21E656A3CB\"\n
3- Export the generated private key to a kubernetes secret sops-gpg-private-key
which will be used by flux's kustomize-controller to decrypt the secrets using sops.
gpg --export-secret-keys --armor \"${KEY_FP}\" |\nkubectl create secret generic sops-gpg-private-key \\\n--namespace=flux-system \\\n--from-file=sops.asc=/dev/stdin\n
4- Export the generated public key to a kubernetes secret sops-gpg-public-key
which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI.
gpg --export --armor \"${KEY_FP}\" |\nkubectl create secret generic sops-gpg-public-key \\\n--namespace=flux-system \\\n--from-file=sops.asc=/dev/stdin\n
Tip
It's recommended to remove the secret from your machine
gpg --delete-secret-keys \"${KEY_FP}\"\n
5- Create a kustomization for reconciling the secrets on the cluster and set the --decryption-secret
flag to the name of the private key created in step 3.
flux create kustomization gpg-secrets \\\n--source=secrets \\ # the git source to reconcile the secrets from\n--path=./secrets/gpg \\\n--prune=true \\\n--interval=10m \\\n--decryption-provider=sops \\\n--decryption-secret=sops-gpg-private-key\n
6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4.
kubectl annotate kustomization gpg-secrets \\\nsops-public-key/name=sops-gpg-public-key \\\nsops-public-key/namespace=flux-system \\\n-n flux-system\n
Expand to see the expected kustomization object apiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: gpg-secrets\nnamespace: flux-system\nannotations:\nsops-public-key/name: sops-gpg-public-key\nsops-public-key/namespace: flux-system\nspec:\ninterval: 10m\nsourceRef:\nkind: GitRepository\nname: secrets\npath: ./secrets/gpg\ndecryption:\nprovider: sops\nsecretRef:\nname: sops-gpg-private-key\nprune: true\nvalidation: server\n
Note
This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster.
"},{"location":"secrets/setup-sops/#encrypting-secrets-using-age","title":"Encrypting secrets using age","text":"age is a simple, modern and secure file encryption tool, that can be used to encrypt secrets using Weave GitOps Enterprise.
Here are the steps to generate an age key and configure your cluster to work with Weave GitOps Enterprise secrets management.
1- Generate an age key with age-keygen
age-keygen -o age.agekey\n\nPublic key: <public key>\n
2- Export the generated private key to a kubernetes secret sops-age-private-key
which will be used by flux's kustomize-controller to decrypt the secrets using sops.
cat age.agekey |\nkubectl create secret generic sops-age-private-key \\\n--namespace=flux-system \\\n--from-file=age.agekey=/dev/stdin\n
4- Export the generated public key to a kubernetes secret sops-age-public-key
which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI.
echo \"<public key>\" |\nkubectl create secret generic sops-age-public-key \\\n--namespace=flux-system \\\n--from-file=age.agekey=/dev/stdin\n
Tip
It's recommended to remove the secret from your machine
rm -f age.ageKey\n
5- Create a kustomization for reconciling the secrets on the cluster and set the --decryption-secret
flag to the name of the private key created in step 2.
flux create kustomization age-secrets \\\n--source=secrets \\ # the git source to reconcile the secrets from\n--path=./secrets/age \\\n--prune=true \\\n--interval=10m \\\n--decryption-provider=sops \\\n--decryption-secret=sops-age-private-key\n
6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4.
kubectl annotate kustomization age-secrets \\\nsops-public-key/name=sops-age-public-key \\\nsops-public-key/namespace=flux-system \\\n-n flux-system\n
Expand to see the expected kustomization object apiVersion: kustomize.toolkit.fluxcd.io/v1beta2\nkind: Kustomization\nmetadata:\nname: age-secrets\nnamespace: flux-system\nannotations:\nsops-public-key/name: sops-age-public-key\nsops-public-key/namespace: flux-system\nspec:\ninterval: 10m\nsourceRef:\nkind: GitRepository\nname: secrets\npath: ./secrets/age\ndecryption:\nprovider: sops\nsecretRef:\nname: sops-age-private-key\nprune: true\nvalidation: server\n
Note
This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster.
Tip
In case of using OpenPGP and age in the same cluster, you need to make the kustomizations point to different directories. This is because flux's kustomize-controller expects that all the secrets in the kustomization's path are encrypted with the same key.
"},{"location":"secrets/setup-sops/#bootstrapping-sops-to-leaf-clusters","title":"Bootstrapping SOPS to leaf clusters","text":"Bootstrapping SOPS to leaf clusters in WGE can be done by utilizing ClusterBootstrapConfig
job to bootstrap Flux and SOPS. The job is a container which generates SOPS secrets key pair, creates a kubernetes secret with the private key, creates a kubernetes secret with the public key (to be used in self-serve flow) and the proper rbac for it. As well as an option to push the public key to the git repository via a PR (to be distributed).
The following example is using GPG encryption to install SOPS and generate keys when bootstrapping leaf clusters. Create the following ClusterBootstrapConfig
CR and push it to your fleet repo.
<CodeBlock title=\"clusters/management/capi/boostrap/sops-bootstrap-job.yaml\" className=\"language-yaml\"
{SopsBootstrapJob}
"},{"location":"secrets/setup-sops/#cluster-template-updates","title":"Cluster template updates","text":"In order to bootstrap SOPS to leaf clusters, we need some modifications to the cluster template to allow creating a Kustomization for reconciling the secrets on the cluster using SOPS and to run the ClusterBootstrapConfig
job during cluster creation.
The template metadata should have annotation, it will be used by WGE to create the Kustomization with the cluster files.
templates.weave.works/sops-enabled: \"true\"\n
The template should have the following parameters that are needed for the Kustomization
Expand to view<CodeBlock title=\"clusters/management/capi/templates/template.yaml\" className=\"language-yaml\"
{TemplateParams}
The template should have the following annotations under GitOpsCluster
to be used in the bootstrap job
<CodeBlock title=\"clusters/management/capi/templates/template.yaml\" className=\"language-yaml\"
{TemplateAnnotations}
"},{"location":"secrets/setup-sops/#installation-steps","title":"Installation Steps","text":"To bootstrap SOPS on a leaf cluster, create a new cluster using the SOPS template from the Create Cluster
page and fill in the following SOPS-related values in the form:
SOPS_KUSTOMIZATION_NAME
: This Kustomization will be used to decrypt SOPS secrets from this path clusters/default/leaf-cluster/sops/
after reconciling on the cluster. example (my-secrets
)SOPS_SECRET_REF
: The private key secret name that will be generated by SOPS in the bootstrap job. example (sops-gpg
)SOPS_SECRET_REF_NAMESPACE
: The private key secret namespace this secret will be generated by SOPS in the bootstrap job. example (flux-system
)SOPS_KEY_NAME
: SOPS key name. This will be used to generate SOPS keys. example (test.yourdomain.com
)SOPS_KEY_COMMENT
: SOPS key comment. This will be used to generate SOPS keys. example (sops secret comment
)SOPS_PUSH_TO_GIT
: Option to push the public key to the git repository. expected values (true
, false
)sops-gpg
to decrypt secretssops-gpg-pub
to encrypt secretsdecryption
defined in it to SOPS
location in the cluster repo locationAccess to sops decryption secrets should be restricted and allowed only to be read by flux's kustomize controller. This can be done using Kubernetes RBAC.
Here's an example of how you can use RBAC to restrict access to sops decryption secrets:
apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\nname: sops-secrets-role\nrules:\n- apiGroups: [\"\"]\nresources: [\"secrets\"]\nresourceNames: [\"sops-gpg-private-key\", \"sops-age-private-key\"]\nverbs: [\"get\", \"watch\", \"list\"]\n
apiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\nname: sops-secrets-rolebinding\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: Role\nname: sops-secrets-role\nsubjects:\n- kind: ServiceAccount\nname: kustomize-controller\n
Warning
You would need to ensure that no other rolebindings or clusterrolebndings would allow reading the the decryption secret at any time. This could be achieved by leveraging policy capabilities to detect existing and prevent future creation of roles that would grant read secrets permissions.
"},{"location":"secrets/spec/v1alpha1/secretSync/","title":"SecretSync ENTERPRISE","text":"It provides semantics to sync Kuberentes Secrets from management cluster to leaf clusters.
apiVersion: capi.weave.works/v1alpha1\nkind: SecretSync\nmetadata:\nname: my-dev-secret-syncer\nnamespace: default\nspec:\nclusterSelector:\nmatchLabels:\nenvironment: dev\nsecretRef:\nname: my-dev-secret\ntargetNamespace: my-namespace\n
"},{"location":"secrets/spec/v1alpha1/secretSync/#specification","title":"Specification","text":"The documentation for the api version capi.weave.works/v1alpha1
type SecretSync struct {\nmetav1.TypeMeta `json:\",inline\"`\nmetav1.ObjectMeta `json:\"metadata,omitempty\"`\nSpec SecretSyncSpec `json:\"spec,omitempty\"`\nStatus SecretSyncStatus `json:\"status,omitempty\"`\n}\n\n// SecretSyncSpec\ntype SecretSyncSpec struct {\n// Label selector for Clusters. The Clusters that are\n// selected by this will be the ones affected by this SecretSync.\n// It must match the Cluster labels. This field is immutable.\n// Label selector cannot be empty.\nClusterSelector metav1.LabelSelector `json:\"clusterSelector\"`\n// SecretRef specifies the Secret to be bootstrapped to the matched clusters\n// Secret must be in the same namespace of the SecretSync object\nSecretRef v1.LocalObjectReference `json:\"secretRef\"`\n// TargetNamespace specifies the namespace which the secret should be bootstrapped in\n// The default value is the namespace of the referenced secret\n//+optional\nTargetNamespace string `json:\"targetNamespace,omitempty\"`\n}\n\n// SecretSyncStatus secretsync object status\ntype SecretSyncStatus struct {\n// SecretVersions a map contains the ResourceVersion of the secret of each cluster\n// Cluster name is the key and secret's ResourceVersion is the value\nSecretVersions map[string]string `json:\"versions\"`\n}\n
"},{"location":"terraform/","title":"Overview","text":"Terraform Controller (TF-Controller) is a reliable tool for managing your infrastructure and application resources using the GitOps approach, all at your own pace. An open source project created by Weaveworks, the makers of Flux, TF-Controller follows patterns established by Flux and integrates with Weave GitOps.
TF-Controller makes the following GitOps models available to suit your specific needs:
tfstate
without making any other changes.To get started with TF-controller, simply follow the provided getting started guide. You can also find extensive documentation here\u2014it covers API references, CLI references, and how-to's for common situations.
With Weave GitOps Enterprise, you can manage Terraform
objects the same way you can with Kustomization
and HelmReleases
:
plan
and apply
inside Runner Pods. When specifying .metadata.namespace
and .spec.serviceAccountName
, the Runner Pod uses the specified ServiceAccount and runs inside the specified Namespace. These settings enable the soft multi-tenancy model, usable within the Flux multi-tenancy setup..spec.approvePlan=auto
allows a Terraform
object to be reconciled and act as the representation of your Terraform resources. TF-controller uses the spec of the Terraform
object to plan
and apply
its associated Terraform resources. It then stores the TFSTATE
of the applied resources as a Secret
inside the Kubernetes cluster. After .spec.interval
passes, TF-Controller checks for drift between your live system and your Terraform resources and, if affirmative, automatically generates and applies a plan to correct it.TFSTATE
. You can use the field .spec.disableDriftDetection
to disable this behaviour. Drift detection-only mode, without plan
or apply
steps, allows you to perform read-only drift detection.plan
from the apply
step, just like in the Terraform workflow you are familiar with\u2014but in a GitOps way. When a plan is generated, the controller shows you a message asking if you want to apply it. Optionally create and push the change to a new branch for your team members to review and approve too.Terraform
object in v0.13.0+ allows you to better configure your Terraform resources via YAMLs, but without introducing any extra CRDs to your cluster.spec.cloud
to configure Terraform
objects to use Terraform Cloud as the backend for storing the state.TF-controller has its own versioning system that is separate from the versioning system used by Weave GitOps. This means that you can install and use TF-controller independently of Weave GitOps\u2014it will not be affected by the version of Weave GitOps that you are using.
Here is the dependency matrix:
Version Terraform Source Controller Flux v2 v0.14.0 v1.3.9 v0.35.1 v0.40.x v0.13.1 v1.3.1 v0.31.0 v0.38.x"},{"location":"terraform/get-started-terraform/","title":"Get Started with the Terraform Controller","text":""},{"location":"terraform/get-started-terraform/#preflight-checks","title":"Preflight Checks","text":"To set up the Terraform Controller (TF-Controller), follow the steps in the preflight checks. Here is a summary of what you will need to do:
The exact steps for setting up the TF-controller will depend on the specific environment and infrastructure that you are using. The project's documentation provides additional information to help with setup.
"},{"location":"terraform/get-started-terraform/#setup","title":"Setup","text":"Perform the following actions to set up TF-Controller:
Create a local cluster using a tool such as kind
or minikube
. This will allow you to develop and test TF-Controller in a local environment before deploying it to a production cluster.
kind create cluster --name tf-controller\n
Install the Flux CLI on your local machine. This will allow you to interact with the Flux controllers on your cluster.
brew install fluxcd/tap/flux\n
Prepare a Git repository to store the configuration files and manifests for Flux and TF-controller. For this example we'll use GitHub. To follow along, you'll need a GitHub account and personal access token with repo permissions. You'll also need to properly configure your Git client by setting your username and email address.
Assuming your username is $GITHUB_USER
, you can create a new repository called gitops-tf-controller
using the following command:
export GITHUB_USER=<your github username>\nexport GITHUB_TOKEN=<your github personal access token>\n\ngh repo create $GITHUB_USER/gitops-tf-controller\n
Bootstrap the cluster with Flux v2 (v0.32.0 or later) using the path (for example) ./cluster/my-cluster
. This will install Flux on the cluster and create a Flux system at ./cluster/my-cluster/flux-system
.
git clone git@github.com:$GITHUB_USER/gitops-tf-controller.git\ncd gitops-tf-controller\n\nflux bootstrap github \\\n--owner=$GITHUB_USER \\\n--repository=gitops-tf-controller \\\n--branch=main \\\n--path=./cluster/my-cluster \\\n--personal \\\n--token-auth\n
Create a directory at ./cluster/my-cluster/infra/
:
mkdir -p ./cluster/my-cluster/infra/\n
Download the TF-controller manifest from the release location and save it to ./cluster/my-cluster/infra/tf-controller.yaml
\u2014placing the file tf-controller.yaml
in this directory:
curl -s https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml > ./cluster/my-cluster/infra/tf-controller.yaml\n
Add the manifest file to the Git repository, then push the changes to your repository. kustomization.yaml
file that contains the following:apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- tf-controller.yaml\n
Add the kustomization.yaml
file to your Git repository, then push the changes to your repository.
If you want to use TF-Controller with the Notification Controller, you will also need to modify the manifest to enable the two controllers to work together. The exact steps for doing this will depend on the specific requirements of your environment and the configuration of the Notification Controller. You may need to refer to the documentation for the TF-Controller and Notification Controller for more information on how to set this up.
"},{"location":"terraform/get-started-terraform/#other-installation-methods","title":"Other Installation Methods","text":"Before using TF-Controller, you must install Flux by using either flux install
or the flux bootstrap
command. Make sure you have the latest version of Flux. After that, you can install TF-controller with Flux HelmRelease with this command:
kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml\n
For the most recent TF-Controller release candidate, please use rc.yaml:
kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml\n
or manually with Helm by:
# Add tf-controller helm repository\nhelm repo add tf-controller https://weaveworks.github.io/tf-controller/\n\n# Install tf-controller\nhelm upgrade -i tf-controller tf-controller/tf-controller \\\n--namespace flux-system\n
For details on configurable parameters of the TF-controller chart, please see this chart Readme.
Alternatively, you can install TF-controller via kubectl
:
export TF_CON_VER=v0.14.0\nkubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.crds.yaml\nkubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.rbac.yaml\nkubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.deployment.yaml\n
"},{"location":"terraform/get-started-terraform/#quick-start","title":"Quick Start","text":"Here's a simple example of how to GitOps your Terraform resources with TF-controller and Flux.
"},{"location":"terraform/get-started-terraform/#define-source","title":"Define Source","text":"First, define a Source controller's source (GitRepository
, Bucket
, OCIRepository
)\u2014for example:
apiVersion: source.toolkit.fluxcd.io/v1beta1\nkind: GitRepository\nmetadata:\nname: helloworld\nnamespace: flux-system\nspec:\ninterval: 30s\nurl: https://github.com/tf-controller/helloworld\nref:\nbranch: main\n
"},{"location":"terraform/get-started-terraform/#the-gitops-automation-mode","title":"The GitOps Automation Mode","text":"In this mode, Terraform resources will be planned and automatically applied for you. Enable it by setting .spec.approvePlan=auto
:
apiVersion: infra.contrib.fluxcd.io/v1alpha2\nkind: Terraform\nmetadata:\nname: helloworld\nnamespace: flux-system\nspec:\ninterval: 1m\napprovePlan: auto\npath: ./\nsourceRef:\nkind: GitRepository\nname: helloworld\nnamespace: flux-system\n
For a full list of features and how to use them, please visit the Terraform overview.
"},{"location":"terraform/get-started-terraform/#troubleshooting","title":"Troubleshooting","text":""},{"location":"terraform/get-started-terraform/#getting-a-drift-detected-event-message-when-its-a-change-of-source-that-triggered-the-update","title":"Getting adrift detected
event message when it's a change of source that triggered the update","text":"Whenever you change a source, you will get a new plan. TF-controller picks up the new plan and applies it. Drift happens if, and only if, the live system changes intentionally. Then TF-controller will generate a lengthy message see an example stating that a drift has occurred. If there is drift, the icon will be red in the TF Objects > Status column of the WGE UI.
"},{"location":"terraform/get-started-terraform/#other-examples","title":"Other Examples","text":"This guide will show you how to use a template to create a Terraform resource in Weave GitOps Enterprise.
"},{"location":"terraform/using-terraform-templates/#cli-guide","title":"CLI Guide","text":""},{"location":"terraform/using-terraform-templates/#prerequisites","title":"Prerequisites","text":"Add the following template to a path in your Git repository that is synced by Flux. For example, in the Installation guide, we set the path that is synced by Flux to ./clusters/management
.
Commit and push these changes. Once a template is available in the cluster, it can be used to create a resource, which will be shown in the next step.
Expand to see ./clusters/management/tf-template.yaml ./clusters/management/tf-template.yaml---\napiVersion: clustertemplates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: tf-template\nnamespace: default\nspec:\ndescription:\nThis is a sample WGE template that will be translated into a tf-controller specific template.\nparams:\n- name: RESOURCE_NAME\ndescription: Resource Name\nresourcetemplates:\n- content:\n- apiVersion: infra.contrib.fluxcd.io/v1alpha1\nkind: Terraform\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: flux-system\nspec:\ninterval: 1h\npath: ./\napprovePlan: auto\nalwaysCleanupRunnerPod: true\nsourceRef:\nkind: GitRepository\nname: flux-system\nnamespace: flux-system\n
Verify that your template is in the cluster:
kubectl get gitopstemplates.clustertemplates.weave.works -A\nNAME AGE\nsample-wge-tf-controller-template 14m\n
If the template does not appear immediately, reconcile the changes with Flux:
flux reconcile kustomization flux-system\n\u25ba annotating Kustomization flux-system in flux-system namespace\n\u2714 Kustomization annotated\n\u25ce waiting for Kustomization reconciliation\n\u2714 applied revision main/e6f5f0c3925bcfecdb50bceb12af9a87677d2213\n
"},{"location":"terraform/using-terraform-templates/#2-use-the-template-to-create-a-resource","title":"2. Use the template to create a resource","text":"A resource can be created from a template by specifying the template's name and supplying values to it, as well as your Weave GitOps Enterprise username, password, and HTTP API endpoint.
gitops add terraform --from-template sample-wge-tf-controller-template \\\n--set=\"RESOURCE_NAME\"=\"name\" \\\n--username=<username> --password=<password> \\\n--endpoint https://localhost:8000 \\\n--url https://github.com/myawesomeorg/myawesomerepo\n\nCreated pull request: https://github.com/myawesomeorg/myawesomerepo/pull/5\n
This will create a PR in your Git repository with a TF-Controller manifest. Once the PR is merged, TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest!
This template can be used to create multiple resources out of the same Terraform manifest by supplying different values to the template. Any changes to the Terraform manifest will be reconciled automatically to all resources.
"},{"location":"terraform/using-terraform-templates/#3-list-available-templates","title":"3. List available templates","text":"Get a specific template that can be used to create a Terraform resource:
gitops get template terraform sample-wge-tf-controller-template --endpoint https://localhost:8000 --username=<username> --password=<password>\nNAME PROVIDER DESCRIPTION ERROR\nsample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template.\n
List all the templates available on the cluster:
gitops get template terraform --endpoint https://localhost:8000 --username=<username> --password=<password>\nNAME PROVIDER DESCRIPTION ERROR\nsample-aurora-tf-template This is a sample Aurora RDS template.\nsample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template.\n
"},{"location":"terraform/using-terraform-templates/#4-list-the-parameters-of-a-template","title":"4. List the parameters of a template","text":"List all the parameters that can be defined on a specific template:
gitops get template terraform tf-controller-aurora --list-parameters --endpoint https://localhost:8000 --username=<username> --password=<password>\nNAME REQUIRED DESCRIPTION OPTIONS\nRESOURCE_NAME false Resource Name\n
"},{"location":"terraform/using-terraform-templates/#use-case-create-an-aurora-rds-with-wge","title":"Use Case: Create an Aurora RDS with WGE","text":"BONUS
For a more advanced example, here is a template to create an Aurora RDS cluster using WGE with Flux and the TF-Controller.
"},{"location":"terraform/using-terraform-templates/#pre-requisites","title":"Pre-requisites","text":"iam:CreateRole
. More info here.Configure a way to safely store Secrets. One method is to use the Mozilla SOPS CLI, but there are other ways, such as Sealed Secrets or Vaults.
Follow the steps in the Flux docs except for the \"Configure in-cluster secrets decryption\" step! This step looks slightly different for WGE. Instead of re-creating the controllers, you can configure the kustomize-controller
as instructed below.
In your Git repository source, add the following to your kustomize-controller
configuration:
cat <<EOF >> ./clusters/<cluster-name>/flux-system/gotk-sync.yaml\n decryption:\n provider: sops\n secretRef:\n name: sops-gpg\nEOF\n
"},{"location":"terraform/using-terraform-templates/#2-encrypt-and-store-your-credentials-in-your-git-repository","title":"2. Encrypt and store your credentials in your Git repository","text":"Create a Secret to store sensitive values such as the following: - DB username - DB password - AWS Access Key ID - AWS Secret Access Key - AWS Role ARN
Note
If following the Flux guide, this steps corresponds to \"Encrypting secrets using OpenPGP\". You can stop following the Flux guide at this step.
For example, here is what you would do if using the SOPS method:
kubectl -n flux-system create secret generic tf-controller-auth \\\n--from-literal=master_username=admin \\\n--from-literal=master_password=change-me \\\n--from-literal=aws_access_key=AKIAIOSFODNN7EXAMPLE \\\n--from-literal=aws_secret_key=\"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\" \\\n--from-literal=aws_role_arn=\"arn:aws:iam::012345678910:role/wge-tf-controller-example\" \\\n--dry-run=client \\\n-o yaml > tf-controller-auth.yaml\n
Then, encrypt the secret:
sops --encrypt --in-place tf-controller-auth.yaml\n
Commit and push your changes. You can now store encrypted secrets to your Git repository.
"},{"location":"terraform/using-terraform-templates/#4-add-the-manifests-to-your-cluster","title":"4. Add the manifests to your cluster","text":"Add the following Terraform manifest to the root of your Git repository.
Expand to see Terraform manifest ./rds.tfterraform {\nrequired_providers {\naws = {\nsource = \"hashicorp/aws\"\nversion = \"~> 3.0\"\n}\n}\n}\n\nvariable \"cluster_identifier\" {}\nvariable \"database_name\" {}\nvariable \"master_username\" {}\nvariable \"master_password\" {}\nvariable \"backup_retention_period\" {}\nvariable \"region\" {}\nvariable \"aws_access_key\" {}\nvariable \"aws_secret_key\" {}\nvariable \"aws_role_arn\" {}\n\nprovider \"aws\" {\nregion = var.region\naccess_key = var.aws_access_key\nsecret_key = var.aws_secret_key\n\nassume_role {\nrole_arn = var.aws_role_arn\n}\n}\n\nlocals {\nengine = \"aurora-mysql\"\nengine_version = \"5.7.mysql_aurora.2.07.5\"\nport = 3306\n}\n\ndata \"aws_availability_zones\" \"available\" {\nstate = \"available\"\n\nfilter {\nname = \"group-name\"\nvalues = [var.region]\n}\n}\n\nresource \"aws_rds_cluster\" \"mycluster\" {\ncluster_identifier = var.cluster_identifier\nengine = local.engine\nengine_version = local.engine_version\nport = local.port\navailability_zones = slice(data.aws_availability_zones.available.names, 0, 3)\ndatabase_name = var.database_name\nmaster_username = var.master_username\nmaster_password = var.master_password\nbackup_retention_period = var.backup_retention_period\nskip_final_snapshot = true\napply_immediately = true\n}\n\nresource \"aws_rds_cluster_instance\" \"cluster_instance\" {\ncount = 1\nidentifier = \"${aws_rds_cluster.mycluster.id}-${count.index}\"\ncluster_identifier = aws_rds_cluster.mycluster.id\ninstance_class = \"db.t3.small\"\nengine = aws_rds_cluster.mycluster.engine\nengine_version = aws_rds_cluster.mycluster.engine_version\n}\n
Add the following template to a path in your Git repository that is synced by Flux. In the quickstart guide, we set this path to ./clusters/management
.
---\napiVersion: clustertemplates.weave.works/v1alpha2\nkind: GitOpsTemplate\nmetadata:\nname: rds-template\nnamespace: default\nspec:\ndescription: This is a sample Aurora RDS template.\nparams:\n- name: RESOURCE_NAME\ndescription: Resource Name\n- name: CLUSTER_IDENTIFIER\ndescription: Cluster Identifier\n- name: DATABASE_NAME\ndescription: Database Name\n- name: BACKUP_RETENTION_PERIOD\ndescription: Backup Retention Period\n- name: REGION\ndescription: Region\nresourcetemplates:\n- contents:\n- apiVersion: infra.contrib.fluxcd.io/v1alpha1\nkind: Terraform\nmetadata:\nname: ${RESOURCE_NAME}\nnamespace: flux-system\nspec:\ninterval: 1h\npath: ./\napprovePlan: auto\nalwaysCleanupRunnerPod: true\nvars:\n- name: cluster_identifier\nvalue: ${CLUSTER_IDENTIFIER}\n- name: database_name\nvalue: ${DATABASE_NAME}\n- name: backup_retention_period\nvalue: ${BACKUP_RETENTION_PERIOD}\n- name: region\nvalue: ${REGION}\nvarsFrom:\n- kind: Secret\nname: tf-controller-auth\nsourceRef:\nkind: GitRepository\nname: flux-system\nnamespace: flux-system\n
Commit and push your changes.
Tip
You can change the location where you keep your Terraform manifests in your Git source (which the TF-Controller will reconcile) by configuring spec.resourcetemplates.spec.path
.
gitops add terraform --from-template rds-template \\\n--username=<username> --password=<password> \\\n--endpoint https://localhost:8000 \\\n--url https://github.com/myawesomeorg/myawesomerepo \\\n--set \"RESOURCE_NAME\"=\"tf-controller-aurora\",\"CLUSTER_IDENTIFIER\"=\"super-awesome-aurora\",\"DATABASE_NAME\"=\"db1\",\"BACKUP_RETENTION_PERIOD\"=5,\"REGION\"=\"us-west-2\"\n\nCreated pull request: https://github.com/myawesomeorg/myawesomerepo/pull/6\n
Merge the PR in your Git repository to add the TF-Controller manifest. TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest.
Any changes to your Terraform manifest will be automatically reconciled by the TF-controller with Flux.
You can re-use this template to create multiple Terraform resources, each with a different set of values!
Make sure to delete the newly created RDS resources to not incur additional costs.
"},{"location":"workspaces/","title":"Introduction ENTERPRISE","text":""},{"location":"workspaces/#workspaces","title":"Workspaces","text":"Organizations working with Kubernetes have a tremendous need to manage tenancy for numerous software delivery teams. Weave GitOps Workspaces offers tenancy management for Kubernetes clusters at scale. It\u2019s built on top of Flux's powerful approach to managing tenancy, and adds policies that will help you to define finer-grain rules on your tenants.
With WGE Workspaces, all it takes for platform operators to create workspaces is a single CLI command that generates:
Multi tenancy provides users with the ability to define boundaries to multiple engineering teams working on a single cluster. Through a simple interface it adds permissions to the necessary Kubernetes resources to make it easy for customers to manage their multiple tenants.
WGE multi tenancy expands on the multi tenancy feature provided by flux
. In addition to creating the necessary Kubernetes tenancy resources that flux
adds, multi tenancy in WGE also adds the following: - Defining tenancy using a single yaml file that serves as a source of truth for the organization - Makes use of WGE policy features to enforce non Kubernetes native permissions
gitops
command line toolgitops
command line tool is responsible for creating the multi tenancy resources. The tool is distributed as part of WGE offering. It reads the definitions of a yaml file and can either apply the necessary changes directly to the cluster or output it to stdout so it can be saved into a file and pushed to a repo to be reconciled by flux
.
To make use of the policy features, policy agent needs to be installed in the necessary cluster(s).
"},{"location":"workspaces/multi-tenancy/#tenancy-file","title":"Tenancy file","text":"Below is an example of a tenancy file:
Expand to view tenancy.yaml---\ntenants:\n- name: first-tenant\nnamespaces:\n- first-ns\n- name: second-tenant\nnamespaces:\n- second-test-ns\n- second-dev-ns\nallowedRepositories:\n- kind: GitRepository\nurl: https://github.com/testorg/testrepo\n- kind: GitRepository\nurl: https://github.com/testorg/testinfo\n- kind: Bucket\nurl: minio.example.com\n- kind: HelmRepository\nurl: https://testorg.github.io/testrepo\nallowedClusters:\n- kubeConfig: cluster-1-kubeconfig\n- kubeConfig: cluster-2-kubeconfig\nteamRBAC:\ngroupNames:\n- foo-group\n- bar-group\nrules:\n- apiGroups:\n- ''\nresources:\n- 'namespaces'\n- 'pods'\nverbs:\n- 'list'\n- 'get'\ndeploymentRBAC:\nbindRoles:\n- name: foo-role\nkind: Role\nrules:\n- apiGroups:\n- ''\nresources:\n- 'namespaces'\n- 'pods'\nverbs:\n- 'list'\n- 'get'\nserviceAccount:\nname: \"reconcilerServiceAccount\"\n
The file above defines two tenants: first-tenant
and second-tenant
as follows:
namespaces
: describes which namespaces should be part of the tenant. Meaning that users who are part of the tenant would have access on those namespaces.allowedRepositories
: limits the flux
repositories sources that can be used in the tenant's namespaces. This is done through policies and thus requires policy-agent
to be deployed on the cluster which will stop these sources from being deployed if they aren't allowed as part of the tenant. IT consists of:kind
: the flux
source kind. Can be: GitRepository
, Bucket
and HelmRepository
.url
: the URL for that source.allowedClusters
: limits which secrets containing cluster configuraton can be used. It stops WGE GitopsCluster
and flux Kustomization
from being deployed if they point to a secret not in the list, essentially giving control on which cluster can be added to a multi-cluster setup. Requires policy-agent
.kubeConfig
: name of the secret that can be used for this tenant.teamRBAC
: Generate Roles and Rolebindings for a list of groupNames
. This allows you to easily give an OIDC group access to a tenant's resources. When the Weave Gitops Enterprise UI is configured with your OIDC provider, tenants can log in and view the status of the resources they have been granted access to.deploymentRBAC
: generate Roles and Rolebindings for a service account. Can additionally bind to an existing Roles/ClusterRoles. Would use the global service account if specified in the tenants file, otherwise it will use the created service account which takes the tenant name. If not specified a Rolebinding would be created that binds to cluster-admin
ClusterRole.Global options:
serviceAccount
: Override the name of the generated ServiceAccount
for all tenants. This allows you to easily use the flux controllers' --default-service-account
feature. Tenants do not need to make sure they correctly specify the serviceAccount
when using Kustomization
or HelmRelease
resources. The kustomization-controller and helm-controller will instead look for the default-service-account
in the namespace being reconciled to and use that. Just configure serviceAccount.name
and --default-service-account
to the same value.The command creates the necessary resources to apply multi tenancy on the user's cluster. To use the command to apply the resources directly the user needs to have the necessary configuration to connect to the desired cluster. The command considers the tenancy file as a source of truth and will change the cluster state to match what is currently described in the file.
For more control on a specific tenant a tenancy file should be used, the command allows the creation of the base resources that defines a tenancy through the arguments:
gitops create tenants --name test-tenant --namespace test-ns1 --namespace test-ns2\n
Expand to view command output namespace/test-ns1 created\ntest-ns1/serviceaccount/test-tenant created\ntest-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created\nnamespace/test-ns2 created\ntest-ns2/serviceaccount/test-tenant created\ntest-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created\npolicy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created\n
The above will create the namespaces and permissions through a ServiceAccount
with the same name as the tenant, test-tenant
in the case of the above example, in each required namespace. The same can be done through a file as follows:
tenants:\n- name: test-tenant\nnamespaces:\n- test-ns1\n- test-ns2\n
gitops create tenants --from-file tenants.yaml\n
Expand to view command output namespace/test-ns1 created\ntest-ns1/serviceaccount/test-tenant created\ntest-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created\nnamespace/test-ns2 created\ntest-ns2/serviceaccount/test-tenant created\ntest-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created\npolicy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created\n
To check the resources that would be deployed first use the export
flag:
gitops create tenants --from-file tenants.yaml --export\n
Expand to view command output apiVersion: v1\nkind: Namespace\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-ns1\nspec: {}\nstatus: {}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-tenant\nnamespace: test-ns1\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-tenant-service-account-cluster-admin\nnamespace: test-ns1\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: ClusterRole\nname: cluster-admin\nsubjects:\n- kind: ServiceAccount\nname: test-tenant\nnamespace: test-ns1\n---\napiVersion: v1\nkind: Namespace\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-ns2\nspec: {}\nstatus: {}\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-tenant\nnamespace: test-ns2\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: test-tenant-service-account-cluster-admin\nnamespace: test-ns2\nroleRef:\napiGroup: rbac.authorization.k8s.io\nkind: ClusterRole\nname: cluster-admin\nsubjects:\n- kind: ServiceAccount\nname: test-tenant\nnamespace: test-ns2\n---\napiVersion: pac.weave.works/v2beta2\nkind: Policy\nmetadata:\ncreationTimestamp: null\nlabels:\n toolkit.fluxcd.io/tenant: test-tenant\nname: weave.policies.tenancy.test-tenant-allowed-application-deploy\nspec:\ncategory: weave.categories.tenancy\ncode: |\npackage weave.tenancy.allowed_application_deploy\n\n controller_input := input.review.object\n violation[result] {\nnamespaces := input.parameters.namespaces\n targetNamespace := controller_input.spec.targetNamespace\n not contains_array(targetNamespace, namespaces)\nresult = {\n\"issue detected\": true,\n \"msg\": sprintf(\"using target namespace %v is not allowed\", [targetNamespace]),\n }\n}\nviolation[result] {\nserviceAccountName := controller_input.spec.serviceAccountName\n serviceAccountName != input.parameters.service_account_name\n result = {\n\"issue detected\": true,\n \"msg\": sprintf(\"using service account name %v is not allowed\", [serviceAccountName]),\n }\n}\ncontains_array(item, items) {\nitems[_] = item\n }\ndescription: Determines which helm release and kustomization can be used in a tenant\nhow_to_solve: \"\"\nid: weave.policies.tenancy.test-tenant-allowed-application-deploy\nname: test-tenant allowed application deploy\nparameters:\n- name: namespaces\n required: false\ntype: array\n value:\n - test-ns1\n - test-ns2\n- name: service_account_name\n required: false\ntype: string\n value: test-tenant\nprovider: kubernetes\nseverity: high\nstandards: []\ntags:\n- tenancy\ntargets:\n kinds:\n - HelmRelease\n - Kustomization\n labels: []\nnamespaces:\n - test-ns1\n - test-ns2\nstatus: {}\n---\n
Applying the resources through the command line is not usually recommended. For WGE the recommended way is to commit the result of the create tenants
command to source control and let flux
handle deployment. To achieve that you can save the result of the export
to a file:
gitops create tenants --from-file tenants.yaml --export > clusters/management/tenants.yaml
"},{"location":"workspaces/view-workspaces/","title":"Workspaces List View ENTERPRISE","text":"From the side menu, you can click on the Workspaces tab to go to the workspaces list view.
This view lists workspaces across all clusters. You can filter workspaces by their clusters or their names.
"},{"location":"workspaces/view-workspaces/#workspace-details-view","title":"Workspace Details View","text":"You can go to this view by clicking on the name of the workspace in the Workspaces List View.
In this view you can see all details of the workspace such as its name, namespace, and all resources related to this workspace.
"}]} \ No newline at end of file diff --git a/userdocs/site/secrets/assets/sops-bootstrap-job.yaml b/userdocs/site/secrets/assets/sops-bootstrap-job.yaml new file mode 100644 index 0000000000..255b9525d1 --- /dev/null +++ b/userdocs/site/secrets/assets/sops-bootstrap-job.yaml @@ -0,0 +1,68 @@ +apiVersion: capi.weave.works/v1alpha1 +kind: ClusterBootstrapConfig +metadata: + name: sops-installation + namespace: default +spec: + clusterSelector: + matchLabels: + weave.works/flux: "bootstrap" + jobTemplate: + generateName: "run-gitops-flux-{{ .ObjectMeta.Name }}" + spec: + containers: + - image: ghcr.io/fluxcd/flux-cli:v0.35.0 + imagePullPolicy: Always + name: flux-bootstrap + resources: {} + volumeMounts: + - name: kubeconfig + mountPath: "/etc/gitops" + readOnly: true + args: + [ + "bootstrap", + "github", + "--kubeconfig=/etc/gitops/value", + "--owner=When accessing protected resources there is a need for a client to authenticate before the access is granted and the resource is consumed. For authentication, a client presents credentials that are either created manually or available through infrastructure. A common scenario is to have a secrets store.
Weave Gitops allows you to provision the secret management infrastructure as part of its capabilities. However, in order to provision, as any other application that has secrets, we need to create the secret needed for installing it. This is known as a chicken-egg scenario that get addressed by providing an initial secret. This secret we call it bootstrapping secret.
Bootstrapping secrets comes in handy, not only while provisioning your secrets management solution, but also in any platform provisioning task where the existence of the secret is a prerequisite. Another common example could be provisioning platform capabilities via profiles that are stored in private repositories.
Weave Gitops provides SecretSync as a solution to managing your bootstrapping secrets.
Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
SecretSync
is a Kubernetes Custom Resource that provides semantics to sync Kuberentes Secrets from management cluster to leaf clusters.
An example could be seen below:
apiVersion: capi.weave.works/v1alpha1
+kind: SecretSync
+metadata:
+ name: my-dev-secret-syncer
+ namespace: default
+spec:
+ clusterSelector:
+ matchLabels:
+ environment: dev
+ secretRef:
+ name: my-dev-secret
+ targetNamespace: my-namespace
+
1) Select the secret to sync:
secretRef:
+ name: my-dev-secret
+
2) Specify the GitopsClusters that the secret will be synced to via labels:
clusterSelector:
+ matchLabels:
+ environment: dev
+
Secretsync
reconciles secrets on clusters: any cluster at a future time matching the label selector will have the secret reconciled too.
More info about the CRD spec here
apiVersion: gitops.weave.works/v1alpha1
+kind: GitopsCluster
+metadata:
+namespace: flux-system
+labels:
+ environment: dev
+
apiVersion: v1
+kind: Secret
+metadata:
+name: my-dev-secret
+namespace: flux-system
+type: Opaque
+
Info
Some restriction apply to the current version: - Resources should be in the same namespace - Leaf cluster nodes should be annotated with node-role.kubernetes.io/control-plane
apiVersion: capi.weave.works/v1alpha1
+kind: SecretSync
+metadata:
+ name: my-dev-secret-syncer
+ namespace: default
+spec:
+ clusterSelector:
+ matchLabels:
+ environment: dev
+ secretRef:
+ name: my-dev-secret
+ targetNamespace: my-namespace
+
Check-in to your configuration repo within your management cluster
Create a PR, review and merge
See the progress
Once reconciled, the status section would show created secret resource version
status:
+ versions:
+ leaf-cluster-1: "211496"
+
Your secret has been created in the target leaf cluster
➜ kubectl get secret -n default
+NAME TYPE DATA
+my-dev-secret Opaque 1
+
Warning
This feature is in alpha and certain aspects will change We're very excited for people to use this feature. However, please note that changes in the API, behaviour and security will evolve. The feature is suitable to use in controlled testing environments.
This guide shows you a basic experience to get started with Weave Gitops Secrets. It covers the scenario of setting up the capability in a test environment and how to use it for your applications.
In order to be able to manage external secrets stores and secrets, add external-secrets
application from weaveworks-charts
profiles repository.
Include via values.yaml
the configuration to deploy the SecretStore connecting to AWS Secrets Manager.
values:
+ secretStores:
+ enabled: true
+ path: ./clusters/bases/secrets
+ sourceRef:
+ kind: GitRepository
+ name: flux-system
+ namespace: flux-system
+
clusters/bases/secrets
in our configuration repo where a kustomization exists apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- aws-secrets-manager.yaml
+
With the AWS Secrets Manager secret store
apiVersion: external-secrets.io/v1beta1
+kind: SecretStore
+metadata:
+ name: aws-secrets-manager
+ namespace: flux-system
+spec:
+ provider:
+ aws:
+ auth:
+ secretRef:
+ accessKeyIDSecretRef:
+ key: access-key
+ name: awssm-secret
+ secretAccessKeySecretRef:
+ key: secret-access-key
+ name: awssm-secret
+ region: eu-north-1
+ service: SecretsManager
+
Review and merge the PR and see it available in your cluster
Given you have a secret in AWS Secrets Manager for example test/search/db
.
Create the External Secret manifest via Secrets UI to pull the secret from your store into your environment.
See it available in your cluster.
At this stage you have everything you need for your application to consume the secret. Add it to your application as usual.
apiVersion: v1
+kind: Pod
+metadata:
+name: secret-dotfiles-pod
+spec:
+volumes:
+- name: database-secrets
+ secret:
+ secretName: search-database
+containers:
+- name: dotfile-test-container
+ image: registry.k8s.io/busybox
+ command:
+ - ls
+ - "-l"
+ - "/etc/database-secrets"
+ volumeMounts:
+ - name: database-secrets
+ readOnly: true
+ mountPath: "/etc/database-secrets"
+
You could see the expected secret available
kubectl logs -f secret-dotfiles-pod
+
+total 0
+lrwxrwxrwx 1 root root 15 Apr 5 17:26 password -> ..data/password
+
Secrets are sensitive information such as passwords, access keys, and other credentials that should not be exposed publicly. In cloud-native applications, secrets are often used to authenticate and authorize access to various resources, such as databases, APIs, and other services.
In a GitOps environment, secrets are typically stored either encrypted in Git, or using Custom Resources that reference the secret in an external secret store. Secrets are then synced into the clusters and securely passed to the application containers or workloads.
Effective secrets management in cloud-native applications and GitOps environments is critical for maintaining the security and compliance of the overall system. Best practices include regularly rotating secrets, using strong encryption and access controls, and implementing robust auditing and monitoring processes.
Weave GitOps Secrets Management is a set of features that makes it easier for teams to manage secrets in a GitOps environment across multiple clusers. These features provide an automated way to manage secrets effectively, and make it easier for different personas to work with secrets.
For Developers, they can use Weave GitOps Secrets Management to securely create and track application secrets such as API keys, passwords, and other credentials. They can do that using Weave GitOps UI in a self-serve manner.
For Operation Teams, they can use Weave GitOps Secrets Management to help set up secure and reliable flows for developers to create and consume secrets for their applications.
Weave GitOps Secrets Management supports integrations with SOPS and External Secrets Operator (ESO) to provide a secure and automated way to manage secrets in a GitOps environment, while giving the option for customers to choose any of these secrets operators or working with both of them.
For SOPS and ESO operators, Weave GitOps is providing different ways to do the following: * Setup Secrets Operators (SOPS | ESO) * Bootstrap Secrets into clusters * Manage Secrets through Weave GitOps UI
In order to get started with WeaveGitOps Secrets Management, please follow this guide here.
At Weave GitOps Enterprise (WGE), we support two approaches for creating and managing secrets: External Secrets Operator and Mozilla SOPS. In this guide, we will provide an overview of both approaches and explain how to use the UI to create and manage secrets.
Clicking on the Secrets under the Platform section in the left hand menu will bring you to the secrets page where you can create external secrets, sops secrets, and view the external secrets list.
Setup the External Secrets Operator by following this guide.
To create a new ExternalSecret
CR, start by clicking on to the Create External Secret
button to navigate to the creation page.
Here, you will be prompted to enter the External Secret Name
and the Target K8s Secret Name
. Once you choose the Target Cluster
, you will find a new list of all the Secret Stores
on this cluster to choose from.
It's important to note that the chosen SecretStore
may be a cluster-scoped SecretStore
ie: ClusterSecretStore
or a namespace-scoped SecretStore
.
If you choose a namespace scoped SecretStore
, the new secret will be created on the same namespace as the SecretStore
.
If you choose a cluster-scoped ClusterSecretStore
, the new secret will be created in a namespace of your choice.
Then you need to add the SecretPath
, which is the path to the external secret within the secret store.
After you have chosen your desired SecretStore
& SecretPath
the UI allows you to add secret properties in two differen scenarios:
The first scenario allows you to add specific property fields. Each added property
also has an optional SecretKey
field. Here's how to do it:
In the Properties
section, click the Add
button to create a new property field.
Enter the name of the property
you want to create. You can add as many properties as you need.
If you wish to specify a SecretKey
for the property, enter it in the SecretKey
field. If this field is left blank, the property
name will be used as the SecretKey
.
To remove a property, click the Remove
sign next to the property you wish to delete.
Remember, this option allows you to have fine-grained control over which properties are included in your ExternalSecret
.
The second scenario is to include all properties in your ExternalSecret
. If the Include all properties
checkbox is checked, all property inputs will be disabled and ignored, and all secrets including all keys under the specified SecretPath
will be added. Here's how:
Check the Include all properties
checkbox. This will automatically disable the property input fields.
Using this option allows you to quickly create an ExternalSecret
that includes all secrets under a specific SecretPath
, without the need to specify each one individually.
Warning
Remember to use this option with caution. You may not need to expose all your secret properties to be on the cluster.
This process allows you to easily create new ExternalSecret
CRs without needing to manually create them through YAML files or command line tools.
The ExternalSecrets List section of the UI allows you to view all the external secrets that are currently stored in your Kubernetes clusters. This section provides an overview of each external secret, including its name, namespace, cluster, k8s-secret, secret-store and the age. From this page, you can also navigate to the details page to view more information about a specific secret.
The details page displays the details of a specific external secret, including its name, namespace, data, and creation date. Below are the details that you can expect to see on this page:
Version: This shows the version of the external secret, which may be blank if no version has been specified.
Based on the configuration of the external secret, this section will vary:
If the "Include all properties" option was selected during the creation of the external secret, this section will display the text "All properties are included".
If specific properties were manually added during creation, this section will display a table with two columns: "Property" and "SecretKey". This table lists all the property and secret key pairs added to the external secret.
Understanding the information provided on the details page can help you to manage and troubleshoot your external secrets as needed.
The following events can be expected when using the UI to manage external secrets:
Understanding these events can help you to troubleshoot issues that may arise when managing external secrets using the UI. In particular, if you encounter a Not Ready
event, you may need to check your secret store credentials and ensure that the secret store is operational before proceeding with any further actions.
Creating a SOPS secret involves using the SOPS tool to encrypt a file containing sensitive information, such as credentials or API keys. This encrypted file can then be stored securely in version control or another location, with only authorized users able to decrypt it using their own private key. This adds an additional layer of security to sensitive data, reducing the risk of unauthorized access or accidental exposure.
For more information about how to generate OpenPGP/age keys and configure your cluster to work with Weave GitOps Enterprise secrets management follow this guide.
To create a new SOPS secret, start by clicking on the Create Sops Secret
button.
This will open the create form where you can specify the details of your new secret. First, choose the Cluster
where you want to create the secret. Then, enter a name
for your secret and select the namespace
where it will be created.
Next, select the encryption method
that you want to use - currently, only GPG/AGE encryption is supported. Finally, choose the kustomization
that will be used by SOPS to decrypt the secret, as well as, having the public key info that will be used to encrypt the secret data. Afterwards, add your key-value
pairs of your secrets. It's important to note that the value
input will be encoded to base64.
The generated secret should be something like below.
After approving the pull request, Flux will reconcile it to your cluster. To verify that the secret has been successfully created, you can use the following command to retrieve it as YAML:
kubectl get secret secretTest-default-sops-secret -n default -o yaml
+
which will give the following output:
apiVersion: v1
+data:
+ secret-1: dmFsCg==
+kind: Secret
+metadata:
+ name: secretTest-default-sops-secret
+ namespace: default
+type: Opaque
+
Weave GitOps Enterprise now supports managing secrets using External Secrets Operator from the UI. External Secrets Operator is a Kubernetes operator that allows users to use secrets from external secrets management systems by reading their information using external APIs and injecting their values into Kubernetes secrets. To be able to use this functionality, users need to configure their External Secrets Operator and SecretStores using one of the guides below.
You should have your SecretStore CRs defined in a git repository. Those CRs will be installed to your cluster in the following steps and used by the creation UI.
The ESO profile is packaged with the weaveworks-charts. If you have the usual profiles setup, you will not need to do anything extra. This profile installs the ESO controller, all the required CRDs, and the SecretStore CRs defined in the previous step.
There are several Kubernetes Secrets that need to exist on your management cluster for the whole flow to work.
If your SecretStores repository is private then you'll need a Secret, that contains the repo credentials, to access the repository. This is usually the Secret you created while bootstrapping Flux on the management cluster and is copied to your leaf cluster during creation.
For each SecretStore CR, you'll need to add a Secret, that follows the format expected by this CR, to allow the operator to access the defined External Secret Store.
Follow this guide for bootstrapping those secrets on leaf clusters.
To install the ESO profile on an exisitng cluster, use Add an application
from the Applications
page and select external-secrets
from weaveworks-charts
. Check the Profile values section for more info about configuring the values.yaml
.
To bootstrap the ESO profile on a leaf cluster, select external-secrets
from the profiles dropdown in the Create Cluster
page. Check the Profile values section for more info about configuring the values.yaml
.
You should then configure the values.yaml
to install the SecretStores
on the cluster from a GitRepository
. This is done by configuring the secretStores
section.
secretStores:
+enabled: true
+url: ssh://git@github.com/github-owner/repo-name # url for the git repository that contains the SecretStores
+tag: v1.0.0
+path: ./ # could be a path to the secrets dir or a kustomization.yaml file for the SecretStore in the GitRepository
+secretRef: my-pat # the name of the Secret containing the repo credentials for private repositories
+
secretStores:
+enabled: true
+sourceRef: # Specify the name for an existing GitSource reference
+ kind: GitRepository
+ name: flux-system
+ namespace: flux-system
+
import CodeBlock from "@theme/CodeBlock";
import SopsBootstrapJob from "!!raw-loader!./assets/sops-bootstrap-job.yaml"; import TemplateParams from "!!raw-loader!./assets/template-params.yaml"; import TemplateAnnotations from "!!raw-loader!./assets/template-annotations.yaml";
Weave GitOps Enterprise now supports managing secrets using SOPS, a tool that encrypts and decrypts secrets using various key management services, from the UI. To be able to use this functionality, users need to configure their private and public key-pairs using one of the guides below.
In this section, we will cover the prerequisites for using SOPS with Weave GitOps Enterprise, and how to configure SOPS for your existing Kubernetes cluster to work with GPG and age keys.
For a more advanced setup for SOPS with flux, please refer to this guide.
OpenPGP is a way of using SOPS to encrypt and decrypt secrets with Weave GitOps Enterprise.
Here are the steps to generate an OpenPGP key and configure your cluster to work with Weave GitOps Enterprise secrets management.
1- Generate a gpg key pairs
export KEY_NAME="gpg-key"
+export KEY_COMMENT="gpg key"
+
+gpg --batch --full-generate-key <<EOF
+%no-protection
+Key-Type: 1
+Key-Length: 4096
+Subkey-Type: 1
+Subkey-Length: 4096
+Expire-Date: 0
+Name-Comment: ${KEY_COMMENT}
+Name-Real: ${KEY_NAME}
+EOF
+
2- Export the key pairs fingerprint in the shell
gpg --list-secret-keys "${KEY_NAME}"
+
+sec rsa4096 2020-09-06 [SC]
+ 710DC0DB6C1662F707095FC30233CB21E656A3CB
+
+export KEY_FP="710DC0DB6C1662F707095FC30233CB21E656A3CB"
+
3- Export the generated private key to a kubernetes secret sops-gpg-private-key
which will be used by flux's kustomize-controller to decrypt the secrets using sops.
gpg --export-secret-keys --armor "${KEY_FP}" |
+kubectl create secret generic sops-gpg-private-key \
+--namespace=flux-system \
+--from-file=sops.asc=/dev/stdin
+
4- Export the generated public key to a kubernetes secret sops-gpg-public-key
which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI.
gpg --export --armor "${KEY_FP}" |
+kubectl create secret generic sops-gpg-public-key \
+--namespace=flux-system \
+--from-file=sops.asc=/dev/stdin
+
Tip
It's recommended to remove the secret from your machine
gpg --delete-secret-keys "${KEY_FP}"
+
5- Create a kustomization for reconciling the secrets on the cluster and set the --decryption-secret
flag to the name of the private key created in step 3.
flux create kustomization gpg-secrets \
+--source=secrets \ # the git source to reconcile the secrets from
+--path=./secrets/gpg \
+--prune=true \
+--interval=10m \
+--decryption-provider=sops \
+--decryption-secret=sops-gpg-private-key
+
6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4.
kubectl annotate kustomization gpg-secrets \
+sops-public-key/name=sops-gpg-public-key \
+sops-public-key/namespace=flux-system \
+-n flux-system
+
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+kind: Kustomization
+metadata:
+name: gpg-secrets
+namespace: flux-system
+annotations:
+ sops-public-key/name: sops-gpg-public-key
+ sops-public-key/namespace: flux-system
+spec:
+interval: 10m
+sourceRef:
+ kind: GitRepository
+ name: secrets
+path: ./secrets/gpg
+decryption:
+ provider: sops
+ secretRef:
+ name: sops-gpg-private-key
+prune: true
+validation: server
+
Note
This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster.
age is a simple, modern and secure file encryption tool, that can be used to encrypt secrets using Weave GitOps Enterprise.
Here are the steps to generate an age key and configure your cluster to work with Weave GitOps Enterprise secrets management.
1- Generate an age key with age-keygen
age-keygen -o age.agekey
+
+Public key: <public key>
+
2- Export the generated private key to a kubernetes secret sops-age-private-key
which will be used by flux's kustomize-controller to decrypt the secrets using sops.
cat age.agekey |
+kubectl create secret generic sops-age-private-key \
+--namespace=flux-system \
+--from-file=age.agekey=/dev/stdin
+
4- Export the generated public key to a kubernetes secret sops-age-public-key
which will be used by Weave GitOps Enterprise to encrypt the secrets created from the UI.
echo "<public key>" |
+kubectl create secret generic sops-age-public-key \
+--namespace=flux-system \
+--from-file=age.agekey=/dev/stdin
+
Tip
It's recommended to remove the secret from your machine
rm -f age.ageKey
+
5- Create a kustomization for reconciling the secrets on the cluster and set the --decryption-secret
flag to the name of the private key created in step 2.
flux create kustomization age-secrets \
+--source=secrets \ # the git source to reconcile the secrets from
+--path=./secrets/age \
+--prune=true \
+--interval=10m \
+--decryption-provider=sops \
+--decryption-secret=sops-age-private-key
+
6- Annotate the kustomization object created in the previous step with the name and namespace of the public key created in step 4.
kubectl annotate kustomization age-secrets \
+sops-public-key/name=sops-age-public-key \
+sops-public-key/namespace=flux-system \
+-n flux-system
+
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
+kind: Kustomization
+metadata:
+name: age-secrets
+namespace: flux-system
+annotations:
+ sops-public-key/name: sops-age-public-key
+ sops-public-key/namespace: flux-system
+spec:
+interval: 10m
+sourceRef:
+ kind: GitRepository
+ name: secrets
+path: ./secrets/age
+decryption:
+ provider: sops
+ secretRef:
+ name: sops-age-private-key
+prune: true
+validation: server
+
Note
This is an essential step in order to allow other operators and developers to utilize WeaveGitOps UI to encrypt SOPS secrets using the public key secret in the cluster.
Tip
In case of using OpenPGP and age in the same cluster, you need to make the kustomizations point to different directories. This is because flux's kustomize-controller expects that all the secrets in the kustomization's path are encrypted with the same key.
Bootstrapping SOPS to leaf clusters in WGE can be done by utilizing ClusterBootstrapConfig
job to bootstrap Flux and SOPS. The job is a container which generates SOPS secrets key pair, creates a kubernetes secret with the private key, creates a kubernetes secret with the public key (to be used in self-serve flow) and the proper rbac for it. As well as an option to push the public key to the git repository via a PR (to be distributed).
The following example is using GPG encryption to install SOPS and generate keys when bootstrapping leaf clusters. Create the following ClusterBootstrapConfig
CR and push it to your fleet repo.
<CodeBlock title="clusters/management/capi/boostrap/sops-bootstrap-job.yaml" className="language-yaml"
{SopsBootstrapJob}
In order to bootstrap SOPS to leaf clusters, we need some modifications to the cluster template to allow creating a Kustomization for reconciling the secrets on the cluster using SOPS and to run the ClusterBootstrapConfig
job during cluster creation.
The template metadata should have annotation, it will be used by WGE to create the Kustomization with the cluster files.
templates.weave.works/sops-enabled: "true"
+
The template should have the following parameters that are needed for the Kustomization
<CodeBlock title="clusters/management/capi/templates/template.yaml" className="language-yaml"
{TemplateParams}
The template should have the following annotations under GitOpsCluster
to be used in the bootstrap job
<CodeBlock title="clusters/management/capi/templates/template.yaml" className="language-yaml"
{TemplateAnnotations}
To bootstrap SOPS on a leaf cluster, create a new cluster using the SOPS template from the Create Cluster
page and fill in the following SOPS-related values in the form:
SOPS_KUSTOMIZATION_NAME
: This Kustomization will be used to decrypt SOPS secrets from this path clusters/default/leaf-cluster/sops/
after reconciling on the cluster. example (my-secrets
)SOPS_SECRET_REF
: The private key secret name that will be generated by SOPS in the bootstrap job. example (sops-gpg
)SOPS_SECRET_REF_NAMESPACE
: The private key secret namespace this secret will be generated by SOPS in the bootstrap job. example (flux-system
)SOPS_KEY_NAME
: SOPS key name. This will be used to generate SOPS keys. example (test.yourdomain.com
)SOPS_KEY_COMMENT
: SOPS key comment. This will be used to generate SOPS keys. example (sops secret comment
)SOPS_PUSH_TO_GIT
: Option to push the public key to the git repository. expected values (true
, false
)sops-gpg
to decrypt secretssops-gpg-pub
to encrypt secretsdecryption
defined in it to SOPS
location in the cluster repo locationAccess to sops decryption secrets should be restricted and allowed only to be read by flux's kustomize controller. This can be done using Kubernetes RBAC.
Here's an example of how you can use RBAC to restrict access to sops decryption secrets:
apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: sops-secrets-role
+rules:
+- apiGroups: [""]
+ resources: ["secrets"]
+ resourceNames: ["sops-gpg-private-key", "sops-age-private-key"]
+ verbs: ["get", "watch", "list"]
+
apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: sops-secrets-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: sops-secrets-role
+subjects:
+- kind: ServiceAccount
+ name: kustomize-controller
+
Warning
You would need to ensure that no other rolebindings or clusterrolebndings would allow reading the the decryption secret at any time. This could be achieved by leveraging policy capabilities to detect existing and prevent future creation of roles that would grant read secrets permissions.
It provides semantics to sync Kuberentes Secrets from management cluster to leaf clusters.
apiVersion: capi.weave.works/v1alpha1
+kind: SecretSync
+metadata:
+ name: my-dev-secret-syncer
+ namespace: default
+spec:
+ clusterSelector:
+ matchLabels:
+ environment: dev
+ secretRef:
+ name: my-dev-secret
+ targetNamespace: my-namespace
+
The documentation for the api version capi.weave.works/v1alpha1
type SecretSync struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec SecretSyncSpec `json:"spec,omitempty"`
+ Status SecretSyncStatus `json:"status,omitempty"`
+}
+
+// SecretSyncSpec
+type SecretSyncSpec struct {
+ // Label selector for Clusters. The Clusters that are
+ // selected by this will be the ones affected by this SecretSync.
+ // It must match the Cluster labels. This field is immutable.
+ // Label selector cannot be empty.
+ ClusterSelector metav1.LabelSelector `json:"clusterSelector"`
+ // SecretRef specifies the Secret to be bootstrapped to the matched clusters
+ // Secret must be in the same namespace of the SecretSync object
+ SecretRef v1.LocalObjectReference `json:"secretRef"`
+ // TargetNamespace specifies the namespace which the secret should be bootstrapped in
+ // The default value is the namespace of the referenced secret
+ //+optional
+ TargetNamespace string `json:"targetNamespace,omitempty"`
+}
+
+// SecretSyncStatus secretsync object status
+type SecretSyncStatus struct {
+ // SecretVersions a map contains the ResourceVersion of the secret of each cluster
+ // Cluster name is the key and secret's ResourceVersion is the value
+ SecretVersions map[string]string `json:"versions"`
+}
+
This document defines security reporting, handling, disclosure, and audit information for Weave Gitops.
Vulnerability disclosures announced publicly. Disclosures will contain an overview, details about the vulnerability, a fix that will typically be an update, and optionally a workaround if one is available.
We will coordinate publishing disclosures and security releases in a way that is realistic and necessary for end users. We prefer to fully disclose the vulnerability as soon as possible once a user mitigation is available. Disclosures will always be published in a timely manner after a release is published that fixes the vulnerability.
Here is an overview of all our published security advisories.
Date | CVE | Security Advisory | Severity | Affected version(s) |
---|---|---|---|---|
2022-06-23 | CVE-2022-31098 | Weave GitOps leaked cluster credentials into logs on connection errors | Critical | <= 0.8.1-rc.5 |
Date | CVE | Security Advisory | Severity | Affected version(s) |
---|---|---|---|---|
2022-08-27 | CVE-2022-38790 | Malicious links can be crafted by users and shown in the UI | Critical | < v0.9.0-rc.5 |
To set up the Terraform Controller (TF-Controller), follow the steps in the preflight checks. Here is a summary of what you will need to do:
The exact steps for setting up the TF-controller will depend on the specific environment and infrastructure that you are using. The project's documentation provides additional information to help with setup.
Perform the following actions to set up TF-Controller:
Create a local cluster using a tool such as kind
or minikube
. This will allow you to develop and test TF-Controller in a local environment before deploying it to a production cluster.
kind create cluster --name tf-controller
+
Install the Flux CLI on your local machine. This will allow you to interact with the Flux controllers on your cluster.
brew install fluxcd/tap/flux
+
Prepare a Git repository to store the configuration files and manifests for Flux and TF-controller. For this example we'll use GitHub. To follow along, you'll need a GitHub account and personal access token with repo permissions. You'll also need to properly configure your Git client by setting your username and email address.
Assuming your username is $GITHUB_USER
, you can create a new repository called gitops-tf-controller
using the following command:
export GITHUB_USER=<your github username>
+export GITHUB_TOKEN=<your github personal access token>
+
+gh repo create $GITHUB_USER/gitops-tf-controller
+
Bootstrap the cluster with Flux v2 (v0.32.0 or later) using the path (for example) ./cluster/my-cluster
. This will install Flux on the cluster and create a Flux system at ./cluster/my-cluster/flux-system
.
git clone git@github.com:$GITHUB_USER/gitops-tf-controller.git
+cd gitops-tf-controller
+
+flux bootstrap github \
+ --owner=$GITHUB_USER \
+ --repository=gitops-tf-controller \
+ --branch=main \
+ --path=./cluster/my-cluster \
+ --personal \
+ --token-auth
+
Create a directory at ./cluster/my-cluster/infra/
:
mkdir -p ./cluster/my-cluster/infra/
+
Download the TF-controller manifest from the release location and save it to ./cluster/my-cluster/infra/tf-controller.yaml
—placing the file tf-controller.yaml
in this directory:
curl -s https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml > ./cluster/my-cluster/infra/tf-controller.yaml
+
kustomization.yaml
file that contains the following:apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - tf-controller.yaml
+
Add the kustomization.yaml
file to your Git repository, then push the changes to your repository.
If you want to use TF-Controller with the Notification Controller, you will also need to modify the manifest to enable the two controllers to work together. The exact steps for doing this will depend on the specific requirements of your environment and the configuration of the Notification Controller. You may need to refer to the documentation for the TF-Controller and Notification Controller for more information on how to set this up.
Before using TF-Controller, you must install Flux by using either flux install
or the flux bootstrap
command. Make sure you have the latest version of Flux. After that, you can install TF-controller with Flux HelmRelease with this command:
kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/release.yaml
+
For the most recent TF-Controller release candidate, please use rc.yaml:
kubectl apply -f https://raw.githubusercontent.com/weaveworks/tf-controller/main/docs/rc.yaml
+
or manually with Helm by:
# Add tf-controller helm repository
+helm repo add tf-controller https://weaveworks.github.io/tf-controller/
+
+# Install tf-controller
+helm upgrade -i tf-controller tf-controller/tf-controller \
+ --namespace flux-system
+
For details on configurable parameters of the TF-controller chart, please see this chart Readme.
Alternatively, you can install TF-controller via kubectl
:
export TF_CON_VER=v0.14.0
+kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.crds.yaml
+kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.rbac.yaml
+kubectl apply -f https://github.com/weaveworks/tf-controller/releases/download/${TF_CON_VER}/tf-controller.deployment.yaml
+
Here's a simple example of how to GitOps your Terraform resources with TF-controller and Flux.
First, define a Source controller's source (GitRepository
, Bucket
, OCIRepository
)—for example:
apiVersion: source.toolkit.fluxcd.io/v1beta1
+kind: GitRepository
+metadata:
+ name: helloworld
+ namespace: flux-system
+spec:
+ interval: 30s
+ url: https://github.com/tf-controller/helloworld
+ ref:
+ branch: main
+
In this mode, Terraform resources will be planned and automatically applied for you. Enable it by setting .spec.approvePlan=auto
:
apiVersion: infra.contrib.fluxcd.io/v1alpha2
+kind: Terraform
+metadata:
+ name: helloworld
+ namespace: flux-system
+spec:
+ interval: 1m
+ approvePlan: auto
+ path: ./
+ sourceRef:
+ kind: GitRepository
+ name: helloworld
+ namespace: flux-system
+
For a full list of features and how to use them, please visit the Terraform overview.
drift detected
event message when it's a change of source that triggered the update¶Whenever you change a source, you will get a new plan. TF-controller picks up the new plan and applies it. Drift happens if, and only if, the live system changes intentionally. Then TF-controller will generate a lengthy message see an example stating that a drift has occurred. If there is drift, the icon will be red in the TF Objects > Status column of the WGE UI.
Terraform Controller (TF-Controller) is a reliable tool for managing your infrastructure and application resources using the GitOps approach, all at your own pace. An open source project created by Weaveworks, the makers of Flux, TF-Controller follows patterns established by Flux and integrates with Weave GitOps.
TF-Controller makes the following GitOps models available to suit your specific needs:
tfstate
without making any other changes.To get started with TF-controller, simply follow the provided getting started guide. You can also find extensive documentation here—it covers API references, CLI references, and how-to's for common situations.
With Weave GitOps Enterprise, you can manage Terraform
objects the same way you can with Kustomization
and HelmReleases
:
plan
and apply
inside Runner Pods. When specifying .metadata.namespace
and .spec.serviceAccountName
, the Runner Pod uses the specified ServiceAccount and runs inside the specified Namespace. These settings enable the soft multi-tenancy model, usable within the Flux multi-tenancy setup..spec.approvePlan=auto
allows a Terraform
object to be reconciled and act as the representation of your Terraform resources. TF-controller uses the spec of the Terraform
object to plan
and apply
its associated Terraform resources. It then stores the TFSTATE
of the applied resources as a Secret
inside the Kubernetes cluster. After .spec.interval
passes, TF-Controller checks for drift between your live system and your Terraform resources and, if affirmative, automatically generates and applies a plan to correct it.TFSTATE
. You can use the field .spec.disableDriftDetection
to disable this behaviour. Drift detection-only mode, without plan
or apply
steps, allows you to perform read-only drift detection.plan
from the apply
step, just like in the Terraform workflow you are familiar with—but in a GitOps way. When a plan is generated, the controller shows you a message asking if you want to apply it. Optionally create and push the change to a new branch for your team members to review and approve too.Terraform
object in v0.13.0+ allows you to better configure your Terraform resources via YAMLs, but without introducing any extra CRDs to your cluster.spec.cloud
to configure Terraform
objects to use Terraform Cloud as the backend for storing the state.TF-controller has its own versioning system that is separate from the versioning system used by Weave GitOps. This means that you can install and use TF-controller independently of Weave GitOps—it will not be affected by the version of Weave GitOps that you are using.
Here is the dependency matrix:
Version | Terraform | Source Controller | Flux v2 |
---|---|---|---|
v0.14.0 | v1.3.9 | v0.35.1 | v0.40.x |
v0.13.1 | v1.3.1 | v0.31.0 | v0.38.x |
This guide will show you how to use a template to create a Terraform resource in Weave GitOps Enterprise.
Add the following template to a path in your Git repository that is synced by Flux. For example, in the Installation guide, we set the path that is synced by Flux to ./clusters/management
.
Commit and push these changes. Once a template is available in the cluster, it can be used to create a resource, which will be shown in the next step.
---
+apiVersion: clustertemplates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+name: tf-template
+namespace: default
+spec:
+description:
+ This is a sample WGE template that will be translated into a tf-controller specific template.
+params:
+ - name: RESOURCE_NAME
+ description: Resource Name
+resourcetemplates:
+ - content:
+ - apiVersion: infra.contrib.fluxcd.io/v1alpha1
+ kind: Terraform
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: flux-system
+ spec:
+ interval: 1h
+ path: ./
+ approvePlan: auto
+ alwaysCleanupRunnerPod: true
+ sourceRef:
+ kind: GitRepository
+ name: flux-system
+ namespace: flux-system
+
Verify that your template is in the cluster:
kubectl get gitopstemplates.clustertemplates.weave.works -A
+NAME AGE
+sample-wge-tf-controller-template 14m
+
If the template does not appear immediately, reconcile the changes with Flux:
flux reconcile kustomization flux-system
+► annotating Kustomization flux-system in flux-system namespace
+✔ Kustomization annotated
+◎ waiting for Kustomization reconciliation
+✔ applied revision main/e6f5f0c3925bcfecdb50bceb12af9a87677d2213
+
A resource can be created from a template by specifying the template's name and supplying values to it, as well as your Weave GitOps Enterprise username, password, and HTTP API endpoint.
gitops add terraform --from-template sample-wge-tf-controller-template \
+--set="RESOURCE_NAME"="name" \
+--username=<username> --password=<password> \
+--endpoint https://localhost:8000 \
+--url https://github.com/myawesomeorg/myawesomerepo
+
+Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/5
+
This will create a PR in your Git repository with a TF-Controller manifest. Once the PR is merged, TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest!
This template can be used to create multiple resources out of the same Terraform manifest by supplying different values to the template. Any changes to the Terraform manifest will be reconciled automatically to all resources.
Get a specific template that can be used to create a Terraform resource:
gitops get template terraform sample-wge-tf-controller-template --endpoint https://localhost:8000 --username=<username> --password=<password>
+NAME PROVIDER DESCRIPTION ERROR
+sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template.
+
List all the templates available on the cluster:
gitops get template terraform --endpoint https://localhost:8000 --username=<username> --password=<password>
+NAME PROVIDER DESCRIPTION ERROR
+sample-aurora-tf-template This is a sample Aurora RDS template.
+sample-wge-tf-controller-template This is a sample WGE template that will be translated into a tf-controller specific template.
+
List all the parameters that can be defined on a specific template:
gitops get template terraform tf-controller-aurora --list-parameters --endpoint https://localhost:8000 --username=<username> --password=<password>
+NAME REQUIRED DESCRIPTION OPTIONS
+RESOURCE_NAME false Resource Name
+
BONUS
For a more advanced example, here is a template to create an Aurora RDS cluster using WGE with Flux and the TF-Controller.
iam:CreateRole
. More info here.Configure a way to safely store Secrets. One method is to use the Mozilla SOPS CLI, but there are other ways, such as Sealed Secrets or Vaults.
Follow the steps in the Flux docs except for the "Configure in-cluster secrets decryption" step! This step looks slightly different for WGE. Instead of re-creating the controllers, you can configure the kustomize-controller
as instructed below.
In your Git repository source, add the following to your kustomize-controller
configuration:
cat <<EOF >> ./clusters/<cluster-name>/flux-system/gotk-sync.yaml
+ decryption:
+ provider: sops
+ secretRef:
+ name: sops-gpg
+EOF
+
Create a Secret to store sensitive values such as the following: - DB username - DB password - AWS Access Key ID - AWS Secret Access Key - AWS Role ARN
Note
If following the Flux guide, this steps corresponds to "Encrypting secrets using OpenPGP". You can stop following the Flux guide at this step.
For example, here is what you would do if using the SOPS method:
kubectl -n flux-system create secret generic tf-controller-auth \
+--from-literal=master_username=admin \
+--from-literal=master_password=change-me \
+--from-literal=aws_access_key=AKIAIOSFODNN7EXAMPLE \
+--from-literal=aws_secret_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \
+--from-literal=aws_role_arn="arn:aws:iam::012345678910:role/wge-tf-controller-example" \
+--dry-run=client \
+-o yaml > tf-controller-auth.yaml
+
Then, encrypt the secret:
sops --encrypt --in-place tf-controller-auth.yaml
+
Commit and push your changes. You can now store encrypted secrets to your Git repository.
Add the following Terraform manifest to the root of your Git repository.
terraform {
+required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 3.0"
+ }
+}
+}
+
+variable "cluster_identifier" {}
+variable "database_name" {}
+variable "master_username" {}
+variable "master_password" {}
+variable "backup_retention_period" {}
+variable "region" {}
+variable "aws_access_key" {}
+variable "aws_secret_key" {}
+variable "aws_role_arn" {}
+
+provider "aws" {
+region = var.region
+access_key = var.aws_access_key
+secret_key = var.aws_secret_key
+
+assume_role {
+ role_arn = var.aws_role_arn
+}
+}
+
+locals {
+engine = "aurora-mysql"
+engine_version = "5.7.mysql_aurora.2.07.5"
+port = 3306
+}
+
+data "aws_availability_zones" "available" {
+state = "available"
+
+filter {
+ name = "group-name"
+ values = [var.region]
+}
+}
+
+resource "aws_rds_cluster" "mycluster" {
+cluster_identifier = var.cluster_identifier
+engine = local.engine
+engine_version = local.engine_version
+port = local.port
+availability_zones = slice(data.aws_availability_zones.available.names, 0, 3)
+database_name = var.database_name
+master_username = var.master_username
+master_password = var.master_password
+backup_retention_period = var.backup_retention_period
+skip_final_snapshot = true
+apply_immediately = true
+}
+
+resource "aws_rds_cluster_instance" "cluster_instance" {
+count = 1
+identifier = "${aws_rds_cluster.mycluster.id}-${count.index}"
+cluster_identifier = aws_rds_cluster.mycluster.id
+instance_class = "db.t3.small"
+engine = aws_rds_cluster.mycluster.engine
+engine_version = aws_rds_cluster.mycluster.engine_version
+}
+
Add the following template to a path in your Git repository that is synced by Flux. In the quickstart guide, we set this path to ./clusters/management
.
---
+apiVersion: clustertemplates.weave.works/v1alpha2
+kind: GitOpsTemplate
+metadata:
+name: rds-template
+namespace: default
+spec:
+description: This is a sample Aurora RDS template.
+params:
+ - name: RESOURCE_NAME
+ description: Resource Name
+ - name: CLUSTER_IDENTIFIER
+ description: Cluster Identifier
+ - name: DATABASE_NAME
+ description: Database Name
+ - name: BACKUP_RETENTION_PERIOD
+ description: Backup Retention Period
+ - name: REGION
+ description: Region
+resourcetemplates:
+ - contents:
+ - apiVersion: infra.contrib.fluxcd.io/v1alpha1
+ kind: Terraform
+ metadata:
+ name: ${RESOURCE_NAME}
+ namespace: flux-system
+ spec:
+ interval: 1h
+ path: ./
+ approvePlan: auto
+ alwaysCleanupRunnerPod: true
+ vars:
+ - name: cluster_identifier
+ value: ${CLUSTER_IDENTIFIER}
+ - name: database_name
+ value: ${DATABASE_NAME}
+ - name: backup_retention_period
+ value: ${BACKUP_RETENTION_PERIOD}
+ - name: region
+ value: ${REGION}
+ varsFrom:
+ - kind: Secret
+ name: tf-controller-auth
+ sourceRef:
+ kind: GitRepository
+ name: flux-system
+ namespace: flux-system
+
Commit and push your changes.
Tip
You can change the location where you keep your Terraform manifests in your Git source (which the TF-Controller will reconcile) by configuring spec.resourcetemplates.spec.path
.
gitops add terraform --from-template rds-template \
+--username=<username> --password=<password> \
+--endpoint https://localhost:8000 \
+--url https://github.com/myawesomeorg/myawesomerepo \
+--set "RESOURCE_NAME"="tf-controller-aurora","CLUSTER_IDENTIFIER"="super-awesome-aurora","DATABASE_NAME"="db1","BACKUP_RETENTION_PERIOD"=5,"REGION"="us-west-2"
+
+Created pull request: https://github.com/myawesomeorg/myawesomerepo/pull/6
+
Merge the PR in your Git repository to add the TF-Controller manifest. TF-Controller will supply the values to the Terraform manifest, apply the Terraform manifest to create the resource, and reconcile any changes that you make to the Terraform manifest.
Any changes to your Terraform manifest will be automatically reconciled by the TF-controller with Flux.
You can re-use this template to create multiple Terraform resources, each with a different set of values!
Make sure to delete the newly created RDS resources to not incur additional costs.
Organizations working with Kubernetes have a tremendous need to manage tenancy for numerous software delivery teams. Weave GitOps Workspaces offers tenancy management for Kubernetes clusters at scale. It’s built on top of Flux's powerful approach to managing tenancy, and adds policies that will help you to define finer-grain rules on your tenants.
With WGE Workspaces, all it takes for platform operators to create workspaces is a single CLI command that generates:
Multi tenancy provides users with the ability to define boundaries to multiple engineering teams working on a single cluster. Through a simple interface it adds permissions to the necessary Kubernetes resources to make it easy for customers to manage their multiple tenants.
WGE multi tenancy expands on the multi tenancy feature provided by flux
. In addition to creating the necessary Kubernetes tenancy resources that flux
adds, multi tenancy in WGE also adds the following: - Defining tenancy using a single yaml file that serves as a source of truth for the organization - Makes use of WGE policy features to enforce non Kubernetes native permissions
gitops
command line toolgitops
command line tool is responsible for creating the multi tenancy resources. The tool is distributed as part of WGE offering. It reads the definitions of a yaml file and can either apply the necessary changes directly to the cluster or output it to stdout so it can be saved into a file and pushed to a repo to be reconciled by flux
.
To make use of the policy features, policy agent needs to be installed in the necessary cluster(s).
Below is an example of a tenancy file:
---
+tenants:
+- name: first-tenant
+ namespaces:
+ - first-ns
+- name: second-tenant
+ namespaces:
+ - second-test-ns
+ - second-dev-ns
+ allowedRepositories:
+ - kind: GitRepository
+ url: https://github.com/testorg/testrepo
+ - kind: GitRepository
+ url: https://github.com/testorg/testinfo
+ - kind: Bucket
+ url: minio.example.com
+ - kind: HelmRepository
+ url: https://testorg.github.io/testrepo
+ allowedClusters:
+ - kubeConfig: cluster-1-kubeconfig
+ - kubeConfig: cluster-2-kubeconfig
+ teamRBAC:
+ groupNames:
+ - foo-group
+ - bar-group
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - 'namespaces'
+ - 'pods'
+ verbs:
+ - 'list'
+ - 'get'
+ deploymentRBAC:
+ bindRoles:
+ - name: foo-role
+ kind: Role
+ rules:
+ - apiGroups:
+ - ''
+ resources:
+ - 'namespaces'
+ - 'pods'
+ verbs:
+ - 'list'
+ - 'get'
+serviceAccount:
+name: "reconcilerServiceAccount"
+
The file above defines two tenants: first-tenant
and second-tenant
as follows:
namespaces
: describes which namespaces should be part of the tenant. Meaning that users who are part of the tenant would have access on those namespaces.allowedRepositories
: limits the flux
repositories sources that can be used in the tenant's namespaces. This is done through policies and thus requires policy-agent
to be deployed on the cluster which will stop these sources from being deployed if they aren't allowed as part of the tenant. IT consists of:kind
: the flux
source kind. Can be: GitRepository
, Bucket
and HelmRepository
.url
: the URL for that source.allowedClusters
: limits which secrets containing cluster configuraton can be used. It stops WGE GitopsCluster
and flux Kustomization
from being deployed if they point to a secret not in the list, essentially giving control on which cluster can be added to a multi-cluster setup. Requires policy-agent
.kubeConfig
: name of the secret that can be used for this tenant.teamRBAC
: Generate Roles and Rolebindings for a list of groupNames
. This allows you to easily give an OIDC group access to a tenant's resources. When the Weave Gitops Enterprise UI is configured with your OIDC provider, tenants can log in and view the status of the resources they have been granted access to.deploymentRBAC
: generate Roles and Rolebindings for a service account. Can additionally bind to an existing Roles/ClusterRoles. Would use the global service account if specified in the tenants file, otherwise it will use the created service account which takes the tenant name. If not specified a Rolebinding would be created that binds to cluster-admin
ClusterRole.Global options:
serviceAccount
: Override the name of the generated ServiceAccount
for all tenants. This allows you to easily use the flux controllers' --default-service-account
feature. Tenants do not need to make sure they correctly specify the serviceAccount
when using Kustomization
or HelmRelease
resources. The kustomization-controller and helm-controller will instead look for the default-service-account
in the namespace being reconciled to and use that. Just configure serviceAccount.name
and --default-service-account
to the same value.The command creates the necessary resources to apply multi tenancy on the user's cluster. To use the command to apply the resources directly the user needs to have the necessary configuration to connect to the desired cluster. The command considers the tenancy file as a source of truth and will change the cluster state to match what is currently described in the file.
For more control on a specific tenant a tenancy file should be used, the command allows the creation of the base resources that defines a tenancy through the arguments:
gitops create tenants --name test-tenant --namespace test-ns1 --namespace test-ns2
+
namespace/test-ns1 created
+test-ns1/serviceaccount/test-tenant created
+test-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created
+namespace/test-ns2 created
+test-ns2/serviceaccount/test-tenant created
+test-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created
+policy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created
+
The above will create the namespaces and permissions through a ServiceAccount
with the same name as the tenant, test-tenant
in the case of the above example, in each required namespace. The same can be done through a file as follows:
tenants:
+ - name: test-tenant
+ namespaces:
+ - test-ns1
+ - test-ns2
+
gitops create tenants --from-file tenants.yaml
+
namespace/test-ns1 created
+test-ns1/serviceaccount/test-tenant created
+test-ns1/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created
+namespace/test-ns2 created
+test-ns2/serviceaccount/test-tenant created
+test-ns2/rolebinding.rbac.authorization.k8s.io/test-tenant-service-account-cluster-admin created
+policy.pac.weave.works/weave.policies.tenancy.test-tenant-allowed-application-deploy created
+
To check the resources that would be deployed first use the export
flag:
gitops create tenants --from-file tenants.yaml --export
+
apiVersion: v1
+kind: Namespace
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-ns1
+spec: {}
+status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-tenant
+namespace: test-ns1
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-tenant-service-account-cluster-admin
+namespace: test-ns1
+roleRef:
+apiGroup: rbac.authorization.k8s.io
+kind: ClusterRole
+name: cluster-admin
+subjects:
+- kind: ServiceAccount
+name: test-tenant
+namespace: test-ns1
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-ns2
+spec: {}
+status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-tenant
+namespace: test-ns2
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: test-tenant-service-account-cluster-admin
+namespace: test-ns2
+roleRef:
+apiGroup: rbac.authorization.k8s.io
+kind: ClusterRole
+name: cluster-admin
+subjects:
+- kind: ServiceAccount
+name: test-tenant
+namespace: test-ns2
+---
+apiVersion: pac.weave.works/v2beta2
+kind: Policy
+metadata:
+creationTimestamp: null
+labels:
+ toolkit.fluxcd.io/tenant: test-tenant
+name: weave.policies.tenancy.test-tenant-allowed-application-deploy
+spec:
+category: weave.categories.tenancy
+code: |
+ package weave.tenancy.allowed_application_deploy
+
+ controller_input := input.review.object
+ violation[result] {
+ namespaces := input.parameters.namespaces
+ targetNamespace := controller_input.spec.targetNamespace
+ not contains_array(targetNamespace, namespaces)
+ result = {
+ "issue detected": true,
+ "msg": sprintf("using target namespace %v is not allowed", [targetNamespace]),
+ }
+ }
+ violation[result] {
+ serviceAccountName := controller_input.spec.serviceAccountName
+ serviceAccountName != input.parameters.service_account_name
+ result = {
+ "issue detected": true,
+ "msg": sprintf("using service account name %v is not allowed", [serviceAccountName]),
+ }
+ }
+ contains_array(item, items) {
+ items[_] = item
+ }
+description: Determines which helm release and kustomization can be used in a tenant
+how_to_solve: ""
+id: weave.policies.tenancy.test-tenant-allowed-application-deploy
+name: test-tenant allowed application deploy
+parameters:
+- name: namespaces
+ required: false
+ type: array
+ value:
+ - test-ns1
+ - test-ns2
+- name: service_account_name
+ required: false
+ type: string
+ value: test-tenant
+provider: kubernetes
+severity: high
+standards: []
+tags:
+- tenancy
+targets:
+ kinds:
+ - HelmRelease
+ - Kustomization
+ labels: []
+ namespaces:
+ - test-ns1
+ - test-ns2
+status: {}
+---
+
Applying the resources through the command line is not usually recommended. For WGE the recommended way is to commit the result of the create tenants
command to source control and let flux
handle deployment. To achieve that you can save the result of the export
to a file:
gitops create tenants --from-file tenants.yaml --export > clusters/management/tenants.yaml
+
From the side menu, you can click on the Workspaces tab to go to the workspaces list view.
This view lists workspaces across all clusters. You can filter workspaces by their clusters or their names.
You can go to this view by clicking on the name of the workspace in the Workspaces List View.
In this view you can see all details of the workspace such as its name, namespace, and all resources related to this workspace.
Packages:
+ +Package v1alpha1 contains API Schema definitions for the gitopssets v1alpha1 API group
+Resource Types: +GitOpsSet is the Schema for the gitopssets API
+Field | +Description | +||||||||
---|---|---|---|---|---|---|---|---|---|
+apiVersion +string |
+
+templates.weave.works/v1alpha1
+ |
+||||||||
+kind +string + |
+
+GitOpsSet
+ |
+||||||||
+metadata + + +Kubernetes meta/v1.ObjectMeta + + + |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+||||||||
+spec + + +GitOpsSetSpec + + + |
+
+ + +
|
+||||||||
+status + + +GitOpsSetStatus + + + |
++ | +
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+APIClientGenerator defines a generator that queries an API endpoint and uses +that to generate data.
+Field | +Description | +
---|---|
+interval + + +Kubernetes meta/v1.Duration + + + |
+
+ The interval at which to poll the API endpoint. + |
+
+endpoint + +string + + |
+
+(Optional)
+ This is the API endpoint to use. + |
+
+method + +string + + |
+
+ Method defines the HTTP method to use to talk to the endpoint. + |
+
+jsonPath + +string + + |
+
+ JSONPath is string that is used to modify the result of the API +call. +This can be used to extract a repeating element from a response. +https://kubernetes.io/docs/reference/kubectl/jsonpath/ + |
+
+headersRef + + +HeadersReference + + + |
+
+(Optional)
+ HeadersRef allows optional configuration of a Secret or ConfigMap to add +additional headers to an outgoing request. +For example, a Secret with a key Authorization: Bearer abc123 could be +used to configure an authorization header. + |
+
+body + + +Kubernetes pkg/apis/apiextensions/v1.JSON + + + |
+
+(Optional)
+ Body is set as the body in a POST request. +If set, this will configure the Method to be POST automatically. + |
+
+singleElement + +bool + + |
+
+(Optional)
+ SingleElement means generate a single element with the result of the API +call. +When true, the response must be a JSON object and will be returned as a +single element, i.e. only one element will be generated containing the +entire object. + |
+
+secretRef + + +Kubernetes core/v1.LocalObjectReference + + + |
+
+ Reference to Secret in same namespace with a field “caFile” which +provides the Certificate Authority to trust when making API calls. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+ClusterGenerator defines a generator that queries the cluster API for +relevant clusters.
+Field | +Description | +
---|---|
+selector + + +Kubernetes meta/v1.LabelSelector + + + |
+
+(Optional)
+ Selector is used to filter the clusters that you want to target. +If no selector is provided, no clusters will be matched. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+ConfigGenerator loads a referenced ConfigMap or +Secret from the Cluster and makes it available as a resource.
+Field | +Description | +
---|---|
+kind + +string + + |
+
+ Kind of the referent. + |
+
+name + +string + + |
+
+ Name of the referent. + |
+
+(Appears on: +GitOpsSetSpec) +
+GitOpsSetGenerator is the top-level set of generators for this GitOpsSet.
+Field | +Description | +
---|---|
+list + + +ListGenerator + + + |
++ | +
+pullRequests + + +PullRequestGenerator + + + |
++ | +
+gitRepository + + +GitRepositoryGenerator + + + |
++ | +
+ociRepository + + +OCIRepositoryGenerator + + + |
++ | +
+matrix + + +MatrixGenerator + + + |
++ | +
+cluster + + +ClusterGenerator + + + |
++ | +
+apiClient + + +APIClientGenerator + + + |
++ | +
+imagePolicy + + +ImagePolicyGenerator + + + |
++ | +
+config + + +ConfigGenerator + + + |
++ | +
+(Appears on: +MatrixGenerator) +
+GitOpsSetNestedGenerator describes the generators usable by the MatrixGenerator. +This is a subset of the generators allowed by the GitOpsSetGenerator because the CRD format doesn’t support recursive declarations.
+Field | +Description | +
---|---|
+name + +string + + |
+
+(Optional)
+ Name is an optional field that will be used to prefix the values generated +by the nested generators, this allows multiple generators of the same +type in a single Matrix generator. + |
+
+list + + +ListGenerator + + + |
++ | +
+gitRepository + + +GitRepositoryGenerator + + + |
++ | +
+ociRepository + + +OCIRepositoryGenerator + + + |
++ | +
+pullRequests + + +PullRequestGenerator + + + |
++ | +
+cluster + + +ClusterGenerator + + + |
++ | +
+apiClient + + +APIClientGenerator + + + |
++ | +
+imagePolicy + + +ImagePolicyGenerator + + + |
++ | +
+config + + +ConfigGenerator + + + |
++ | +
+(Appears on: +GitOpsSet) +
+GitOpsSetSpec defines the desired state of GitOpsSet
+Field | +Description | +
---|---|
+suspend + +bool + + |
+
+(Optional)
+ Suspend tells the controller to suspend the reconciliation of this +GitOpsSet. + |
+
+generators + + +[]GitOpsSetGenerator + + + |
+
+ Generators generate the data to be inserted into the provided templates. + |
+
+templates + + +[]GitOpsSetTemplate + + + |
+
+ Templates are a set of YAML templates that are rendered into resources +from the data supplied by the generators. + |
+
+serviceAccountName + +string + + |
+
+(Optional)
+ The name of the Kubernetes service account to impersonate +when reconciling this Kustomization. + |
+
+(Appears on: +GitOpsSet) +
+GitOpsSetStatus defines the observed state of GitOpsSet
+Field | +Description | +
---|---|
+ReconcileRequestStatus + + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + + |
+
+
+(Members of |
+
+observedGeneration + +int64 + + |
+
+(Optional)
+ ObservedGeneration is the last observed generation of the HelmRepository +object. + |
+
+conditions + + +[]Kubernetes meta/v1.Condition + + + |
+
+(Optional)
+ Conditions holds the conditions for the GitOpsSet + |
+
+inventory + + +ResourceInventory + + + |
+
+(Optional)
+ Inventory contains the list of Kubernetes resource object references that +have been successfully applied + |
+
+(Appears on: +GitOpsSetSpec) +
+GitOpsSetTemplate describes a resource to create
+Field | +Description | +
---|---|
+repeat + +string + + |
+
+ Repeat is a JSONPath string defining that the template content should be +repeated for each of the matching elements in the JSONPath expression. +https://kubernetes.io/docs/reference/kubectl/jsonpath/ + |
+
+content + + +k8s.io/apimachinery/pkg/runtime.RawExtension + + + |
+
+ Content is the YAML to be templated and generated. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+GitRepositoryGenerator generates from files in a Flux GitRepository resource.
+Field | +Description | +
---|---|
+repositoryRef + +string + + |
+
+ RepositoryRef is the name of a GitRepository resource to be generated from. + |
+
+files + + +[]RepositoryGeneratorFileItem + + + |
+
+ Files is a set of rules for identifying files to be parsed. + |
+
+directories + + +[]RepositoryGeneratorDirectoryItem + + + |
+
+ Directories is a set of rules for identifying directories to be +generated. + |
+
+(Appears on: +APIClientGenerator) +
+HeadersReference references either a Secret or ConfigMap to be used for +additional request headers.
+Field | +Description | +
---|---|
+kind + +string + + |
+
+ The resource kind to get headers from. + |
+
+name + +string + + |
+
+ Name of the resource in the same namespace to apply headers from. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+ImagePolicyGenerator generates from the ImagePolicy.
+Field | +Description | +
---|---|
+policyRef + +string + + |
+
+ PolicyRef is the name of a ImagePolicy resource to be generated from. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+ListGenerator generates from a hard-coded list.
+Field | +Description | +
---|---|
+elements + + +[]Kubernetes pkg/apis/apiextensions/v1.JSON + + + |
++ | +
+(Appears on: +GitOpsSetGenerator) +
+MatrixGenerator defines a matrix that combines generators. +The matrix is a cartesian product of the generators.
+Field | +Description | +
---|---|
+generators + + +[]GitOpsSetNestedGenerator + + + |
+
+ Generators is a list of generators to be combined. + |
+
+singleElement + +bool + + |
+
+(Optional)
+ SingleElement means generate a single element with the result of the +merged generator elements. +When true, the matrix elements will be merged to a single element, with +whatever prefixes they have. +It’s recommended that you use the Name field to separate out elements. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+OCIRepositoryGenerator generates from files in a Flux OCIRepository resource.
+Field | +Description | +
---|---|
+repositoryRef + +string + + |
+
+ RepositoryRef is the name of a OCIRepository resource to be generated from. + |
+
+files + + +[]RepositoryGeneratorFileItem + + + |
+
+ Files is a set of rules for identifying files to be parsed. + |
+
+directories + + +[]RepositoryGeneratorDirectoryItem + + + |
+
+ Directories is a set of rules for identifying directories to be +generated. + |
+
+(Appears on: +GitOpsSetGenerator, +GitOpsSetNestedGenerator) +
+PullRequestGenerator defines a generator that queries a Git hosting service +for relevant PRs.
+Field | +Description | +
---|---|
+interval + + +Kubernetes meta/v1.Duration + + + |
+
+ The interval at which to check for repository updates. + |
+
+driver + +string + + |
+
+ Determines which git-api protocol to use. + |
+
+serverURL + +string + + |
+
+(Optional)
+ This is the API endpoint to use. + |
+
+repo + +string + + |
+
+ This should be the Repo you want to query. +e.g. my-org/my-repo + |
+
+secretRef + + +Kubernetes core/v1.LocalObjectReference + + + |
+
+ Reference to Secret in same namespace with a field “password” which is an +auth token that can query the Git Provider API. + |
+
+labels + +[]string + + |
+
+(Optional)
+ Labels is used to filter the PRs that you want to target. +This may be applied on the server. + |
+
+forks + +bool + + |
+
+(Optional)
+ Fork is used to filter out forks from the target PRs if false, +or to include forks if true + |
+
+(Appears on: +GitRepositoryGenerator, +OCIRepositoryGenerator) +
+RepositoryGeneratorDirectoryItem stores the information about a specific +directory to be generated from.
+Field | +Description | +
---|---|
+path + +string + + |
++ | +
+exclude + +bool + + |
++ | +
+(Appears on: +GitRepositoryGenerator, +OCIRepositoryGenerator) +
+RepositoryGeneratorFileItem defines a path to a file to be parsed when generating.
+Field | +Description | +
---|---|
+path + +string + + |
+
+ Path is the name of a file to read and generate from can be JSON or YAML. + |
+
+(Appears on: +GitOpsSetStatus) +
+ResourceInventory contains a list of Kubernetes resource object references that have been applied by a Kustomization.
+Field | +Description | +
---|---|
+entries + + +[]ResourceRef + + + |
+
+ Entries of Kubernetes resource object references. + |
+
+(Appears on: +ResourceInventory) +
+ResourceRef contains the information necessary to locate a resource within a cluster.
+Field | +Description | +
---|---|
+id + +string + + |
+
+ ID is the string representation of the Kubernetes resource object’s metadata, +in the format ‘namespace_name_group_kind’. + |
+
+v + +string + + |
+
+ Version is the API version of the Kubernetes resource object’s kind. + |
+
This page was automatically generated with gen-crd-api-reference-docs
Site powered by Netlify ©
+Copyright © {{ build_date_utc.strftime('%Y') }} Weaveworks
+
+ Made with
+
+ Material for MkDocs
+
+