From 7a3efce1b130065233ec5d1f1d10e918deaead7f Mon Sep 17 00:00:00 2001 From: Anoop Gopalakrishnan Date: Fri, 12 Jan 2024 21:57:36 -0800 Subject: [PATCH] feat: Add kubevela support doc - Gives example of using with kubevela --- docs/kubevela/README.md | 104 ++++++++++ docs/kubevela/cnpg-cluster.cue | 337 +++++++++++++++++++++++++++++++++ docs/kubevela/gateway.cue | 197 +++++++++++++++++++ docs/kubevela/vela.yaml | 66 +++++++ main.go | 2 +- pkg/db/db.go | 20 +- 6 files changed, 721 insertions(+), 5 deletions(-) create mode 100644 docs/kubevela/README.md create mode 100644 docs/kubevela/cnpg-cluster.cue create mode 100644 docs/kubevela/gateway.cue create mode 100644 docs/kubevela/vela.yaml diff --git a/docs/kubevela/README.md b/docs/kubevela/README.md new file mode 100644 index 0000000..0e02502 --- /dev/null +++ b/docs/kubevela/README.md @@ -0,0 +1,104 @@ + +# Deploying Fern Application on k3d with KubeVela and Cloud Native PostgreSQL + +This README provides detailed instructions on deploying the Fern application in a Kubernetes environment using k3d, KubeVela, and Cloud Native PostgreSQL (cnpg). The process includes setting up a k3d cluster, installing KubeVela, deploying Cloud Native PostgreSQL using Helm, and finally, deploying the Fern application as defined in `vela.yaml`. Additionally, this guide covers the installation of custom ComponentDefinitions for an enhanced gateway trait and Cloud Native PostgreSQL. + +## Prerequisites + +Before starting, ensure you have the following tools installed on your system: + +- Docker +- k3d +- kubectl +- Helm +- KubeVela CLI + +## Step 1: Create a k3d Cluster + +Create a new k3d cluster with the following command: + +```bash +k3d cluster create my-k3d-cluster --port "8080:8080@loadbalancer" --agents 3 +``` + +This command will set up a new Kubernetes cluster named `my-k3d-cluster` running in Docker. + +## Step 2: Install KubeVela + +Install KubeVela in your k3d cluster using Helm: + +```bash +helm repo add kubevela https://charts.kubevela.net/core +helm repo update +helm install --create-namespace -n vela-system kubevela kubevela/vela-core +``` + +Confirm the installation by checking the deployed pods: + +```bash +kubectl get pods -n vela-system +``` + +## Step 3: Install Cloud Native PostgreSQL + +Add the Cloud Native PostgreSQL Helm repository and install it: + +```bash +helm repo add cnpg https://cloudnative-pg.github.io/charts +helm repo update +helm install cnpg cnpg/cloud-native-pg +``` + +## Step 4: Install Custom ComponentDefinitions + +Before deploying the Fern application, add the following custom ComponentDefinitions: + +1. **Gateway Component (gateway.cue):** + + This updates the existing gateway trait to support the service type LoadBalancer. + + ```bash + kubectl apply -f gateway.cue + ``` + +2. **Cloud Native PostgreSQL Component (cnpg.cue):** + + Introduces a new component definition for Cloud Native PostgreSQL. + + ```bash + kubectl apply -f cnpg.cue + ``` + +## Step 5: Deploy the Fern Application + +Deploy your application using the provided `vela.yaml` in a namespace called fern: + +```bash +kubectls creante ns fern +kubectl apply -f vela.yaml +``` + +## Verifying the Deployment + +To check the status of your deployment, use: + +```bash +kubectl get all -n fern +``` + +## Additional Notes + +- Ensure Docker is running prior to initiating the k3d cluster. +- Customize `gateway.cue` and `cnpg.cue` according to your specific needs. +- Adjust `vela.yaml` to fit the configuration of your Fern application. + +## Contributing + +We welcome your contributions! Feel free to submit pull requests or open issues to enhance the documentation or deployment procedures. + +--- + +For questions or feedback, please create an issue in this repository. + +Thank you for using or contributing to this project! + diff --git a/docs/kubevela/cnpg-cluster.cue b/docs/kubevela/cnpg-cluster.cue new file mode 100644 index 0000000..d610315 --- /dev/null +++ b/docs/kubevela/cnpg-cluster.cue @@ -0,0 +1,337 @@ +"cloud-native-postgres": { + alias: "cnpg" + annotations: {} + description: "Cloud Native Postgres" + labels: { + } + type: "component" +} + +template: { + output: { + apiVersion: "postgresql.cnpg.io/v1" + kind: "Cluster" + metadata: { + name: parameter.name + } + spec: { + + if parameter.description != "" { + description: parameter.description + } + + if parameter.imageName == "" { + imageName: "ghcr.io/cloudnative-pg/postgresql:13.13-13" + } + + if parameter.instances > 0 { + instances: parameter.instances + } + if parameter.startDelay > 0 { + startDelay: parameter.startDelay + } + + if parameter.stopDelay > 0 { + stopDelay: parameter.stopDelay + } + if parameter.primaryUpdateStrategy != "" { + primaryUpdateStrategy: parameter.primaryUpdateStrategy + } + + // postgresql: { + // parameters: { + // if parameter.sharedBuffers != "" { + // "shared_buffers": parameter.sharedBuffers + // } + // if parameter.maxStatStatements != "" { + // "pg_stat_statements.max": parameter.maxStatStatements + // } + // if parameter.trackStatStatements != "" { + // "pg_stat_statements.track": parameter.trackStatStatements + // } + // if parameter.logMinDuration != "" { + // "auto_explain.log_min_duration": parameter.logMinDuration + // } + // } + // if len(parameter.pgHba) > 0 { + // pg_hba: parameter.pgHba + // } + // } + bootstrap: { + initdb: { + if parameter.initDatabase != "" { + database: parameter.initDatabase + } + if parameter.initOwner != "" { + owner: parameter.initOwner + } + + if parameter.initSecretName != "" { + secret: { + name: parameter.initSecretName + } + } + } + } + if parameter.enableSuperuser { + enableSuperuserAccess: parameter.enableSuperuser + superuserSecret: { + if parameter.superuserSecretName != "" { + name: parameter.superuserSecretName + } + } + } + storage: { + if parameter.storageClass != "" { + storageClass: parameter.storageClass + } + if parameter.storageSize != "" { + size: parameter.storageSize + } + } + + if parameter.backupPath != "" { + backup: { + barmanObjectStore: { + if parameter.backupPath != "" && parameter.backupEndpointURL != "" { + destinationPath: parameter.backupPath + endpointURL: parameter.backupEndpointURL + s3Credentials: { + if parameter.backupAccessKeyID != "" && parameter.backupAccessKeyName != "" { + accessKeyId: { + name: parameter.backupAccessKeyName + key: parameter.backupAccessKeyID + } + } + if parameter.backupSecretKey != "" && parameter.backupSecretKeyName != "" { + secretAccessKey: { + name: parameter.backupSecretKeyName + key: parameter.backupSecretKey + } + } + } + wal: { + if parameter.walCompression != "" { + compression: parameter.walCompression + } + if parameter.walEncryption != "" { + encryption: parameter.walEncryption + } + } + data: { + if parameter.dataCompression != "" { + compression: parameter.dataCompression + } + if parameter.dataEncryption != "" { + encryption: parameter.dataEncryption + } + immediateCheckpoint: parameter.immediateCheckpoint + if parameter.backupJobs > 0 { + jobs: parameter.backupJobs + } + } + } + if parameter.retentionPolicy != "" { + retentionPolicy: parameter.retentionPolicy + } + } + } + } + + resources: { + requests: { + if parameter.requestMemory != "" { + memory: parameter.requestMemory + } + if parameter.requestCPU != "" { + cpu: parameter.requestCPU + } + } + limits: { + if parameter.limitMemory != "" { + memory: parameter.limitMemory + } + if parameter.limitCPU != "" { + cpu: parameter.limitCPU + } + } + } + // if parameter.enablePodAntiAffinity && parameter.topologyKey != "" { + // affinity: { + // enablePodAntiAffinity: parameter.enablePodAntiAffinity + // topologyKey: parameter.topologyKey + // } + // } + // nodeMaintenanceWindow: { + // inProgress: parameter.inProgress + // reusePVC: parameter.reusePVC + // } + } + } + + outputs: { + if parameter.initSecretName != _|_ { + secret: { + apiVersion: "v1" + kind: "Secret" + metadata: { + name: parameter.initSecretName + namespace: context.namespace + } + type: "Opaque" + stringData: { + username: parameter.initOwner + password: parameter.initPassword + database: parameter.initDatabase + } + } + } + } + + parameter: { + + // +usage=Specify the name of the cluster + name: string + + // +usage=Provide a description for the cluster + description: string | *"" // Default to empty string + + // +usage=Specify the image name for PostgreSQL + imageName: string | *"" // Default to empty string + + // +usage=Set the number of instances + instances: int | *1 // Default to 0 + + // +usage=Specify the start delay in seconds + startDelay: int | *0 // Default to 0 + + // +usage=Specify the stop delay in seconds + stopDelay: int | *0 // Default to 0 + + // +usage=Set the primary update strategy + primaryUpdateStrategy: string | *"" // Default to empty string + + // +usage=Specify the shared buffers size + sharedBuffers: string | *"" // Default to empty string + + // +usage=Set the maximum number of statements for pg_stat + maxStatStatements: string | *"" // Default to empty string + + // +usage=Set the tracking level for pg_stat statements + trackStatStatements: string | *"" // Default to empty string + + // +usage=Set the minimum log duration for auto explain + logMinDuration: string | *"" // Default to empty string + + // +usage=Define host-based authentication rules + pgHba: [{ + type: string + database: string + user: string + address: string + method: string + }] | *[] // Default to empty list + + // +usage=Specify the initial database name + initDatabase: string | *"" // Default to empty string + + // +usage=Specify the owner of the initial database + initOwner: string | *"" // Default to empty string + + // +usage=Specify the name of the secret for the initial database + initSecretName: string | *"" // Default to empty string + + // +usage=Specify the password for the initial database + initPassword: string | *"" // Default to empty string + + // +usage=Enable or disable superuser access + enableSuperuser: bool | *false // Default to false + + // +usage=Specify the name of the superuser secret + superuserSecretName: string | *"" // Default to empty string + + // +usage=Set the storage class + storageClass: string | *"" // Default to empty string + + // +usage=Define the storage size + storageSize: string | *"" // Default to empty string + + // +usage=Specify the backup destination path + backupPath: string | *"" // Default to empty string + + // +usage=Set the backup endpoint URL + backupEndpointURL: string | *"" // Default to empty string + + // +usage=Specify the access key ID for backup + backupAccessKeyID: string | *"" // Default to empty string + + // +usage=Specify the name of the secret containing the access key ID for backup + backupAccessKeyName: string | *"" // Default to empty string + + // +usage=Specify the secret access key for backup + backupSecretKey: string | *"" // Default to empty string + + // +usage=Specify the name of the secret containing the secret access key for backup + backupSecretKeyName: string | *"" // Default to empty string + + // +usage=Set the compression method for WAL + walCompression: string | *"" // Default to empty string + + // +usage=Set the encryption method for WAL + walEncryption: string | *"" // Default to empty string + + // +usage=Set the compression method for data + dataCompression: string | *"" // Default to empty string + + // +usage=Set the encryption method for data + dataEncryption: string | *"" // Default to empty string + + // +usage=Specify whether to perform an immediate checkpoint + immediateCheckpoint: bool | *false // Default to false + + // +usage=Set the number of backup jobs + backupJobs: int | *0 // Default to 0 + + // +usage=Specify the retention policy for backup + retentionPolicy: string | *"" // Default to empty string + + // +usage=Set the requested memory + requestMemory: string | *"" // Default to empty string + + // +usage=Set the requested CPU + requestCPU: string | *"" // Default to empty string + + // +usage=Set the memory limit + limitMemory: string | *"" // Default to empty string + + // +usage=Set the CPU limit + limitCPU: string | *"" // Default to empty string + + // +usage=Enable or disable pod anti-affinity + enablePodAntiAffinity: bool | *false // Default to false + + // +usage=Specify the topology key for pod anti-affinity + topologyKey: string | *"" // Default to empty string + + // +usage=Set the node maintenance window in progress state + inProgress: bool | *false // Default to false + + // +usage=Specify whether to reuse PVC during maintenance + reusePVC: bool | *false // Default to false + } + // parameter: { + // // enableSuperuser: true + // // superuserSecretName: postgres-super-app + // name: "postgres" + // namepsace: "fern" + // initDatabase: "fern" + // initOwner: "fern" + // initSecretName: "fern-secret" + // instances: 2 + // storageSize: "0.5Gi" + // } + // + // context: { + // namespace: "fern" + // } +} diff --git a/docs/kubevela/gateway.cue b/docs/kubevela/gateway.cue new file mode 100644 index 0000000..e649adc --- /dev/null +++ b/docs/kubevela/gateway.cue @@ -0,0 +1,197 @@ +import "strconv" + +gateway: { + alias: "" + annotations: {} + attributes: { + appliesToWorkloads: ["deployments.apps", "statefulsets.apps"] + podDisruptive: false + status: { + customStatus: """ + let nameSuffix = { + if parameter.name != _|_ { "-" + parameter.name } + if parameter.name == _|_ { "" } + } + let ingressMetaName = context.name + nameSuffix + let ig = [for i in context.outputs if (i.kind == "Ingress") && (i.metadata.name == ingressMetaName) {i}][0] + igs: *null | string + if ig != _|_ if ig.status != _|_ if ig.status.loadbalancer != _|_ { + igs: ig.status.loadbalancer.ingress[0] + } + igr: *null | string + if ig != _|_ if ig.spec != _|_ { + igr: ig.spec.rules[0] + } + if igs == _|_ { + message: "No loadBalancer found, visiting by using 'vela port-forward " + context.appName + "'\\n" + } + if igs != _|_ { + if igs.ip != _|_ { + if igr.host != _|_ { + message: "Visiting URL: " + igr.host + ", IP: " + igs.ip + "\\n" + } + if igr.host == _|_ { + message: "Host not specified, visit the cluster or load balancer in front of the cluster, IP: " + igs.ip + "\\n" + } + } + if igs.ip == _|_ { + if igr.host != _|_ { + message: "Visiting URL: " + igr.host + "\\n" + } + if igs.host == _|_ { + message: "Host not specified, visit the cluster or load balancer in front of the cluster\\n" + } + } + } + """ + healthPolicy: """ + let nameSuffix = { + if parameter.name != _|_ { "-" + parameter.name } + if parameter.name == _|_ { "" } + } + let ingressMetaName = context.name + nameSuffix + let igstat = len([for i in context.outputs if (i.kind == "Ingress") && (i.metadata.name == ingressMetaName) {i}]) > 0 + isHealth: igstat + """ + } + } + description: "Enable public web traffic for the component, the ingress API matches K8s v1.20+." + labels: {} + type: "trait" +} + +template: { + let nameSuffix = { + if parameter.name != _|_ {"-" + parameter.name} + if parameter.name == _|_ {""} + } + let serviceOutputName = "service" + nameSuffix + let serviceMetaName = context.name + nameSuffix + + outputs: (serviceOutputName): { + apiVersion: "v1" + kind: "Service" + metadata: name: "\(serviceMetaName)" + spec: { + if parameter.exposeType != _|_ { + type: parameter.exposeType + } + selector: "app.oam.dev/component": context.name + ports: [ + for k, v in parameter.http { + name: "port-" + strconv.FormatInt(v, 10) + port: v + targetPort: v + }, + ] + } + } + + let ingressOutputName = "ingress" + nameSuffix + let ingressMetaName = context.name + nameSuffix + legacyAPI: context.clusterVersion.minor < 19 + + outputs: (ingressOutputName): { + if legacyAPI { + apiVersion: "networking.k8s.io/v1beta1" + } + if !legacyAPI { + apiVersion: "networking.k8s.io/v1" + } + kind: "Ingress" + metadata: { + name: "\(ingressMetaName)" + annotations: { + if !parameter.classInSpec { + "kubernetes.io/ingress.class": parameter.class + } + if parameter.gatewayHost != _|_ { + "ingress.controller/host": parameter.gatewayHost + } + if parameter.annotations != _|_ { + for key, value in parameter.annotations { + "\(key)": "\(value)" + } + } + } + labels: { + if parameter.labels != _|_ { + for key, value in parameter.labels { + "\(key)": "\(value)" + } + } + } + } + spec: { + if parameter.classInSpec { + ingressClassName: parameter.class + } + if parameter.secretName != _|_ { + tls: [{ + hosts: [ + parameter.domain, + ] + secretName: parameter.secretName + }] + } + rules: [{ + if parameter.domain != _|_ { + host: parameter.domain + } + http: paths: [ + for k, v in parameter.http { + path: k + pathType: parameter.pathType + backend: { + if legacyAPI { + serviceName: serviceMetaName + servicePort: v + } + if !legacyAPI { + service: { + name: serviceMetaName + port: number: v + } + } + } + }, + ] + }] + } + } + + parameter: { + // +usage=Specify the domain you want to expose + domain?: string + + // +usage=Specify the mapping relationship between the http path and the workload port + http: [string]: int + + // +usage=Specify what kind of Service you want. options: "ClusterIP", "NodePort", "LoadBalancer" + exposeType: *"ClusterIP" | "NodePort" | "LoadBalancer" + + // +usage=Specify the class of ingress to use + class: *"nginx" | string + + // +usage=Set ingress class in '.spec.ingressClassName' instead of 'kubernetes.io/ingress.class' annotation. + classInSpec: *false | bool + + // +usage=Specify the secret name you want to quote to use tls. + secretName?: string + + // +usage=Specify the host of the ingress gateway, which is used to generate the endpoints when the host is empty. + gatewayHost?: string + + // +usage=Specify a unique name for this gateway, required to support multiple gateway traits on a component + name?: string + + // +usage=Specify a pathType for the ingress rules, defaults to "ImplementationSpecific" + pathType: *"ImplementationSpecific" | "Prefix" | "Exact" + + // +usage=Specify the annotations to be added to the ingress + annotations?: [string]: string + + // +usage=Specify the labels to be added to the ingress + labels?: [string]: string + } +} diff --git a/docs/kubevela/vela.yaml b/docs/kubevela/vela.yaml new file mode 100644 index 0000000..ed23d6a --- /dev/null +++ b/docs/kubevela/vela.yaml @@ -0,0 +1,66 @@ +apiVersion: core.oam.dev/v1beta1 +kind: Application +metadata: + name: fern-reporter-server + namespace: fern +spec: + components: + - name: postgres + type: cloud-native-postgres + properties: + name: postgres + namespace: fern + instances: 1 + storageSize: 0.5Gi + - name: fern-server + type: webservice + properties: + image: anoop2811/fern-reporter:pr7 + imagePullPolicy: Always + traits: + - type: gateway + properties: + exposeType: LoadBalancer + class: traefik + http: + "/": 8080 + - type: service-binding + properties: + envMappings: + FERN_USERNAME: + secret: postgres-app + key: username + FERN_PASSWORD: + secret: postgres-app + key: password + FERN_HOST: + secret: postgres-app + key: host + FERN_PORT: + secret: postgres-app + key: port + FERN_DATABASE: + secret: postgres-app + key: dbname + policies: + - name: debug + type: debug + workflow: + steps: + - name: deploy-db + type: apply-component + properties: + component: postgres + namespace: fern + - name: suspend + type: suspend + properties: + duration: 30s + - name: deploy-fern-server + type: apply-component + dependsOn: + - deploy-db + properties: + component: fern-server + namespace: fern + cpu: 0.5 diff --git a/main.go b/main.go index 5e72e1b..ecde612 100644 --- a/main.go +++ b/main.go @@ -32,7 +32,7 @@ func initConfig() { } func initDb() { - db.Init() + db.Initialize() } func initServer() { diff --git a/pkg/db/db.go b/pkg/db/db.go index a5098ef..f2e1e36 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -22,7 +22,7 @@ var gdb *gorm.DB //go:embed migrations var migrations embed.FS -func Init() { +func Initialize() { pkger.Include("/pkg/db") dbUrl := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable", config.GetDb().Username, @@ -32,9 +32,21 @@ func Init() { config.GetDb().Database, ) - pdb, _ := sql.Open("postgres", dbUrl) - driver, _ := p.WithInstance(pdb, &p.Config{}) - source, _ := iofs.New(migrations, "migrations") + pdb, err := sql.Open("postgres", dbUrl) + if err != nil { + log.Fatalln(err) + } + + driver, err := p.WithInstance(pdb, &p.Config{}) + if err != nil { + log.Fatalln(err) + } + + source, err := iofs.New(migrations, "migrations") + if err != nil { + log.Fatalln(err) + } + m, err := migrate.NewWithInstance("iofs", source, "postgres", driver) if err != nil { log.Fatalln(err)