diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 000000000..5afcb5b07 --- /dev/null +++ b/.github/README.md @@ -0,0 +1,66 @@ +# Workflow Overview + +This repository is equipped with automated workflows that streamline key processes for PRs, changes to the `main` branch, and releases. These workflows ensure smooth development, testing, and deployment cycles. + +## PR Opened 🚦 +When a developer opens a PR, several automated checks are triggered to validate the changes: + +- **[Build the Project](workflows/paladin-PR-build.yml):** + Runs essential tasks to ensure code quality and reliability: + - **Build and Test:** Compiles the code and runs tests for all subdirectories. + - **[Build Docker Images](workflows/build-image.yaml):** Builds Docker images based on PR changes for local validation. + > **Note:** These images are **not published** to a registry. + - **[Template the Helm Chart](workflows/build-chart.yaml):** Rebuilds and validates Helm charts for correctness. + > **Note:** Charts are **not published** but tested locally. + + +## Changes Pushed to Main 🌟 +Once changes are merged into the `main` branch, workflows prepare the project for production: + +- **[Build the Project](workflows/paladin-PR-build.yml):** + Similar to PR checks, this ensures the integrity of the main branch: + - **Build and Test:** Compiles code and runs tests for all subdirectories. + +- **[Publish Docker Images](workflows/cross-build-images.yaml):** + Produces production-grade, cross-platform Docker images and publishes them to the container registry: + - **Registry:** `ghcr.io/` + - **Tagging:** Images are tagged with `main`. + +- **[Update Documentation](workflows/docs.yaml):** + Detects documentation updates and publishes the latest content to the documentation site. + + +## Release Time 🚀 +Releases deliver artifacts and resources to users and deployment targets through these workflows: + +- **[Release Orchestrator](workflows/release.yaml):** + Triggered by a version tag (e.g., `v1.2.3`), this workflow coordinates all release activities: + - **[Release Docker Images](workflows/release-images.yaml):** + Builds and **publishes Docker images** tagged with the release version (e.g., `v1.2.3`) and `latest`. + - **Registries:** + - `ghcr.io/`. + - `docker.io/`. + > `latest` is configurable + - **[Release Helm Chart](workflows/release-charts.yaml):** + Packages and **publishes Helm charts** to the chart repository tagged with the release version (e.g., `v1.2.3`) and `latest`. + > `latest` is configurable + - **[Release TypeScript SDK](workflows/release-typescript-sdk.yaml):** + Updates and **publishes the TypeScript SDK** to its registry: + - **Version:** Defined in [package.json](../sdk/typescript/package.json). + +### Releasing Options: +1. **Automatic:** Push a Git tag in the format `vX.Y.Z` (e.g., `v1.2.3`), and the workflows handle the release, marking it as the latest. +2. **Manual:** Trigger the [release workflow](https://github.com/LF-Decentralized-Trust-labs/paladin/actions/workflows/release.yaml) via the GitHub Actions interface, specifying the version and selecting the "latest" option if needed. + + +## Manual Actions 🛠️ +Workflows can also be triggered manually when needed. Available options include: + +- **[Release Orchestrator](workflows/release.yaml)** +- **[Release Docker Images](workflows/release-images.yaml)** +- **[Release Helm Chart](workflows/release-charts.yaml)** +- **[Release TypeScript SDK](workflows/release-typescript-sdk.yaml)** +- **[Build Helm Chart](workflows/build-chart.yaml)** +- **[Build Docker Images](workflows/build-image.yaml)** +- **[Cross-Platform Docker Image Build](workflows/cross-build-images.yaml)** + \ No newline at end of file diff --git a/.github/workflows/release-charts.yaml b/.github/workflows/release-charts.yaml index 7aa4ca14d..584fc8c60 100644 --- a/.github/workflows/release-charts.yaml +++ b/.github/workflows/release-charts.yaml @@ -1,5 +1,10 @@ name: Helm Chart release +permissions: + contents: write + packages: write + id-token: write + on: workflow_call: inputs: @@ -135,8 +140,23 @@ jobs: - name: Run chart-releaser uses: helm/chart-releaser-action@v1.6.0 with: - mark_as_latest: ${{ inputs.latest }} + mark_as_latest: false # the release is marked as latest in the next step charts_dir: "operator/charts" skip_existing: true env: CR_TOKEN: "${{ env.CR_TOKEN }}" + + - name: Prepare CRs artifacts + run: ./gradlew prepareArtifacts -PartifactDir=${{ github.workspace }}/artifacts + - name: Release + uses: softprops/action-gh-release@v2 + with: + tag_name: ${{ inputs.chart_tag }} + body: "Release ${{ inputs.chart_tag }}" + generate_release_notes: true + make_latest: ${{ inputs.latest }} + files: | + ${{ github.workspace }}/artifacts/basenet.yaml + ${{ github.workspace }}/artifacts/devnet.yaml + ${{ github.workspace }}/artifacts/artifacts.tar.gz + \ No newline at end of file diff --git a/.github/workflows/release-typescript-sdk.yaml b/.github/workflows/release-typescript-sdk.yaml index e2e6fc9df..06ab9a67e 100644 --- a/.github/workflows/release-typescript-sdk.yaml +++ b/.github/workflows/release-typescript-sdk.yaml @@ -30,6 +30,7 @@ jobs: uses: ./.github/actions/setup - name: Publish to npm + continue-on-error: true # this can fail if the version is already published working-directory: sdk/typescript shell: bash run: | diff --git a/operator/.gitignore b/operator/.gitignore index 36c897468..be86f558d 100644 --- a/operator/.gitignore +++ b/operator/.gitignore @@ -21,7 +21,6 @@ go.work # editor and IDE paraphernalia .idea -.vscode *.swp *.swo *~ @@ -37,4 +36,8 @@ manager # Generated by gradle **/charts/paladin-operator/templates/samples/*.yaml -**/charts/paladin-operator-crd/templates/*.yaml \ No newline at end of file +**/charts/paladin-operator-crd/templates/*.yaml + +**/artifacts/** + +**/__snapshot__/** \ No newline at end of file diff --git a/operator/.golangci.yml b/operator/.golangci.yml index 754f58ca1..11cccee33 100644 --- a/operator/.golangci.yml +++ b/operator/.golangci.yml @@ -15,6 +15,12 @@ issues: - path: "internal/*" linters: - dupl + - path: ".*_test.go" + linters: + - errcheck + - path: "contractpkg/*" + linters: + - errcheck linters: disable-all: true enable: @@ -32,7 +38,6 @@ linters: - misspell - nakedret - prealloc - - staticcheck - typecheck - unconvert - unparam diff --git a/operator/.vscode/launch.json b/operator/.vscode/launch.json new file mode 100644 index 000000000..b8513b397 --- /dev/null +++ b/operator/.vscode/launch.json @@ -0,0 +1,55 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Run Controller", + "type": "go", + "preLaunchTask": "check-running-on-kind", + "request": "launch", + "mode": "debug", + "program": "${workspaceFolder}/cmd/main.go", + "env": { + "KUBE_LOCAL": "true", + "WATCH_NAMESPACE": "default" + }, + "presentation": { + "hidden": false + } + }, + { + "name": "Run Contract Generator", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/contractpkg", + "args": [ + "generate", + "contract_map.json" + ] + }, + { + "name": "Run Temaplte Generator", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/contractpkg", + "args": [ + "template", + "../config/samples", + "../charts/paladin-operator/templates/samples" + ] + }, + { + "name": "Run Artifacts Generator", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/contractpkg", + "args": [ + "combine", + "../config/samples", + "../artifacts" + ] + } + ] + } \ No newline at end of file diff --git a/operator/Makefile b/operator/Makefile index 269dd9c32..c2ffb1f56 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -125,7 +125,7 @@ kind-delete: ## Delete the Kind cluster. ##@ Build .PHONY: build -build: manifests generate fmt vet ## Build manager binary. +build: manifests generate fmt vet test ## Build manager binary. go build -o bin/manager cmd/main.go .PHONY: run diff --git a/operator/api/v1alpha1/paladin_types.go b/operator/api/v1alpha1/paladin_types.go index 28cdb1766..7d798c287 100644 --- a/operator/api/v1alpha1/paladin_types.go +++ b/operator/api/v1alpha1/paladin_types.go @@ -33,13 +33,20 @@ type PaladinSpec struct { // Adds signing modules that load their key materials from a k8s secret SecretBackedSigners []SecretBackedSigner `json:"secretBackedSigners,omitempty"` - // Optionally bind to a local besu node deployed with this operator - // (vs. configuring a connection to a production blockchain network) + // Deprecated: Use 'baseLedgerEndpoint' instead. Example: + // { "baseLedgerEndpoint": {"type": "local", "local": {"nodeName": "node-name"}} } + // + // Optionally bind to a local Besu node deployed with this operator + // (vs. configuring a connection to a production blockchain network). + // +optional BesuNode string `json:"besuNode,omitempty"` - // AuthConfig is used to provide authentication details for blockchain connections - // If this is set, it will override the auth details in the config - AuthConfig *AuthConfig `json:"authConfig,omitempty"` + // Deprecated: Use 'baseLedgerEndpoint' instead. Example: + // { "baseLedgerEndpoint": {"type": "network", "network": {"auth": {}}} } + AuthConfig *Auth `json:"authConfig,omitempty"` + + // BaseLedgerEndpoint specifies the base endpoint for the ledger + BaseLedgerEndpoint *BaseLedgerEndpoint `json:"baseLedgerEndpoint,omitempty"` // Optionally tune the service definition. // We merge any configuration you add (such as node ports) for the following services: @@ -56,6 +63,39 @@ type PaladinSpec struct { // Transports are configured individually on each node, as they reference security details specific to that node Transports []TransportConfig `json:"transports"` } +type BaseLedgerEndpointType string + +const ( + EndpointTypeLocal BaseLedgerEndpointType = "local" + EndpointTypeNetwork BaseLedgerEndpointType = "endpoint" +) + +type BaseLedgerEndpoint struct { + // Type specifies the type of the endpoint. + // +kubebuilder:validation:Enum=local;network + Type BaseLedgerEndpointType `json:"type"` + + // Local specifies the configuration when the type is 'local'. + // +optional + Local *LocalLedgerEndpoint `json:"local,omitempty"` + + // Network specifies the configuration when the type is 'network'. + // +optional + Endpoint *NetworkLedgerEndpoint `json:"endpoint,omitempty"` +} + +// LocalLedgerEndpoint defines the configuration for local endpoints. +type LocalLedgerEndpoint struct { + // NodeName specifies the name of the local node. + NodeName string `json:"nodeName"` +} + +// NetworkLedgerEndpoint defines the configuration for network endpoints. +type NetworkLedgerEndpoint struct { + JSONRPC string `json:"jsonrpc"` + WS string `json:"ws"` + Auth *Auth `json:"auth,omitempty"` +} type LabelReference struct { // Label selectors provide a flexible many-to-many mapping between nodes and domains in a namespace. @@ -143,17 +183,21 @@ type SecretBackedSigner struct { KeySelector string `json:"keySelector"` } -type AuthMethod string +type AuthType string -const AuthMethodSecret AuthMethod = "secret" +const ( + // AuthTypeSecret is used to authenticate with a secret + // The secret must contain keys "username" and "password" + AuthTypeSecret AuthType = "secret" +) -type AuthConfig struct { +type Auth struct { // auth method to use for the connection // +kubebuilder:validation:Enum=secret - AuthMethod AuthMethod `json:"authMethod"` + Type AuthType `json:"type"` - // SecretAuth is used to provide the name of the secret to use for authentication - AuthSecret *AuthSecret `json:"authSecret,omitempty"` + // Secret is used to provide the name of the secret to use for authentication + Secret *AuthSecret `json:"secretRef,omitempty"` } type AuthSecret struct { diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go index 4d8fbb2e6..4fc83d2fd 100644 --- a/operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -27,21 +27,21 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthConfig) DeepCopyInto(out *AuthConfig) { +func (in *Auth) DeepCopyInto(out *Auth) { *out = *in - if in.AuthSecret != nil { - in, out := &in.AuthSecret, &out.AuthSecret + if in.Secret != nil { + in, out := &in.Secret, &out.Secret *out = new(AuthSecret) **out = **in } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthConfig. -func (in *AuthConfig) DeepCopy() *AuthConfig { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Auth. +func (in *Auth) DeepCopy() *Auth { if in == nil { return nil } - out := new(AuthConfig) + out := new(Auth) in.DeepCopyInto(out) return out } @@ -61,6 +61,31 @@ func (in *AuthSecret) DeepCopy() *AuthSecret { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BaseLedgerEndpoint) DeepCopyInto(out *BaseLedgerEndpoint) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalLedgerEndpoint) + **out = **in + } + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(NetworkLedgerEndpoint) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseLedgerEndpoint. +func (in *BaseLedgerEndpoint) DeepCopy() *BaseLedgerEndpoint { + if in == nil { + return nil + } + out := new(BaseLedgerEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Besu) DeepCopyInto(out *Besu) { *out = *in @@ -311,6 +336,41 @@ func (in *LabelReference) DeepCopy() *LabelReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalLedgerEndpoint) DeepCopyInto(out *LocalLedgerEndpoint) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalLedgerEndpoint. +func (in *LocalLedgerEndpoint) DeepCopy() *LocalLedgerEndpoint { + if in == nil { + return nil + } + out := new(LocalLedgerEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkLedgerEndpoint) DeepCopyInto(out *NetworkLedgerEndpoint) { + *out = *in + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(Auth) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkLedgerEndpoint. +func (in *NetworkLedgerEndpoint) DeepCopy() *NetworkLedgerEndpoint { + if in == nil { + return nil + } + out := new(NetworkLedgerEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Paladin) DeepCopyInto(out *Paladin) { *out = *in @@ -670,7 +730,12 @@ func (in *PaladinSpec) DeepCopyInto(out *PaladinSpec) { } if in.AuthConfig != nil { in, out := &in.AuthConfig, &out.AuthConfig - *out = new(AuthConfig) + *out = new(Auth) + (*in).DeepCopyInto(*out) + } + if in.BaseLedgerEndpoint != nil { + in, out := &in.BaseLedgerEndpoint, &out.BaseLedgerEndpoint + *out = new(BaseLedgerEndpoint) (*in).DeepCopyInto(*out) } in.Service.DeepCopyInto(&out.Service) diff --git a/operator/build.gradle b/operator/build.gradle index 83ad813ae..0a6c28fd4 100644 --- a/operator/build.gradle +++ b/operator/build.gradle @@ -61,6 +61,8 @@ ext { operatorImageTag = project.hasProperty('operatorImageTag') ? project.operatorImageTag : 'test' paladinImageName = project.hasProperty('paladinImageName') ? project.paladinImageName : 'paladin' paladinImageTag = project.hasProperty('paladinImageTag') ? project.paladinImageTag : 'test' + + artifactDir = project.hasProperty('artifactDir') ? project.artifactDir : 'artifacts' } def printClusterStatus(String namespace) { @@ -158,7 +160,7 @@ task copyZetoSolidity(type: Copy) { task buildContractSamples(type: Exec, dependsOn: [copySolidity, copyZetoSolidity]) { commandLine "go", "run", "./contractpkg", "generate", "./contractpkg/contract_map.json" } - + // Task to build the Paladin image from the parent project task buildPaladinImage { dependsOn ':docker' // Builds the Paladin image in the parent project @@ -216,6 +218,10 @@ task prepareOperatorChart(type: Exec, dependsOn: [buildContractSamples]) { commandLine "go", "run", "./contractpkg", "template", "config/samples", "charts/paladin-operator/templates/samples" } +task prepareArtifacts(type: Exec, dependsOn: [buildContractSamples]) { + commandLine "go", "run", "./contractpkg", "artifacts", "config/samples", "${artifactDir}" +} + // Task to install the operator using Helm task installOperator(type: Exec, dependsOn: [installCrds, promoteKindImages, prepareOperatorChart]) { executable 'make' diff --git a/operator/charts/paladin-operator-crd/Chart.yaml b/operator/charts/paladin-operator-crd/Chart.yaml index 00b5cb88c..64325520c 100644 --- a/operator/charts/paladin-operator-crd/Chart.yaml +++ b/operator/charts/paladin-operator-crd/Chart.yaml @@ -1,21 +1,13 @@ apiVersion: v2 name: paladin-operator-crd description: A Helm chart for Kubernetes -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. + type: application -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.0.1" + +# The chart is a dependency of the paladin-operator chart, +# please make sure to update the paladin-operator chart dependencies if you update this chart version. +# +# This chart version is NOT overwritten at the release, if you want to release a new CRD chart you must bump the version in this file and run the release pipeline +version: 0.0.2 + +appVersion: "0.0.2" diff --git a/operator/charts/paladin-operator/.helmignore b/operator/charts/paladin-operator/.helmignore index 0e8a0eb36..260a32bbb 100644 --- a/operator/charts/paladin-operator/.helmignore +++ b/operator/charts/paladin-operator/.helmignore @@ -21,3 +21,7 @@ .idea/ *.tmproj .vscode/ + +# Ignore snapshots +*/tests/ +*.snap* diff --git a/operator/charts/paladin-operator/Chart.yaml b/operator/charts/paladin-operator/Chart.yaml index 3c654fcfe..326c7eb0d 100644 --- a/operator/charts/paladin-operator/Chart.yaml +++ b/operator/charts/paladin-operator/Chart.yaml @@ -2,30 +2,17 @@ apiVersion: v2 name: paladin-operator description: A Helm chart for Kubernetes -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. type: application -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.2 +# The chart version is overwritten at the release +version: 0.0.0 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.0.2" +# The app version is overwritten at the release +appVersion: "0.0.0" dependencies: - name: paladin-operator-crd - version: 0.0.1 + version: 0.0.2 repository: "file://../paladin-operator-crd/" condition: installCRDs diff --git a/operator/charts/paladin-operator/tests/snapshot_test.yaml b/operator/charts/paladin-operator/tests/snapshot_test.yaml index c0c83d923..945b0e7e9 100644 --- a/operator/charts/paladin-operator/tests/snapshot_test.yaml +++ b/operator/charts/paladin-operator/tests/snapshot_test.yaml @@ -1,16 +1,33 @@ suite: Snapshot tests tests: - it: Basic snapshot test - when: snapshot asserts: - matchSnapshot: {} set: unittest: true - it: Change namespace - when: snapshot asserts: - matchSnapshot: {} set: unittest: true operator.namespace: test + - it: Mode none + asserts: + - matchSnapshot: {} + set: + unittest: true + mode: none + - it: Mode basenet + asserts: + - matchSnapshot: {} + set: + unittest: true + mode: basenet + - it: Mode devnet + asserts: + - matchSnapshot: {} + set: + unittest: true + mode: devnet + diff --git a/operator/charts/paladin-operator/values.yaml b/operator/charts/paladin-operator/values.yaml index 61605ccdf..f0a5992b4 100644 --- a/operator/charts/paladin-operator/values.yaml +++ b/operator/charts/paladin-operator/values.yaml @@ -1,7 +1,7 @@ # Installation mode. This setting determines which Custom Resources (CRs) will be installed by default when deploying this chart. # Supported modes: -# - devnet: Installs a default Paladin network (3 nodes) along with the related Smart Contracts. -# - smartcontractdeployment: Deploys the Smart Contracts without installing the Paladin network. +# - devnet: Installs a default Paladin network (3 nodes) along with the related Domains and Smart Contracts CRs. +# - basenet: Deploys the Domains and Smart Contracts CRs without installing the Paladin network. # - none (or left empty): Only the operator will be installed. mode: devnet diff --git a/operator/config/crd/bases/core.paladin.io_paladins.yaml b/operator/config/crd/bases/core.paladin.io_paladins.yaml index 7f5ac99ed..0d878db77 100644 --- a/operator/config/crd/bases/core.paladin.io_paladins.yaml +++ b/operator/config/crd/bases/core.paladin.io_paladins.yaml @@ -47,16 +47,11 @@ spec: properties: authConfig: description: |- - AuthConfig is used to provide authentication details for blockchain connections - If this is set, it will override the auth details in the config + Deprecated: Use 'baseLedgerEndpoint' instead. Example: + { "baseLedgerEndpoint": {"type": "network", "network": {"auth": {}}} } properties: - authMethod: - description: auth method to use for the connection - enum: - - secret - type: string - authSecret: - description: SecretAuth is used to provide the name of the secret + secretRef: + description: Secret is used to provide the name of the secret to use for authentication properties: name: @@ -65,13 +60,76 @@ spec: required: - name type: object + type: + description: auth method to use for the connection + enum: + - secret + type: string required: - - authMethod + - type + type: object + baseLedgerEndpoint: + description: BaseLedgerEndpoint specifies the base endpoint for the + ledger + properties: + endpoint: + description: Network specifies the configuration when the type + is 'network'. + properties: + auth: + properties: + secretRef: + description: Secret is used to provide the name of the + secret to use for authentication + properties: + name: + description: The name of the secret to use for authentication + type: string + required: + - name + type: object + type: + description: auth method to use for the connection + enum: + - secret + type: string + required: + - type + type: object + jsonrpc: + type: string + ws: + type: string + required: + - jsonrpc + - ws + type: object + local: + description: Local specifies the configuration when the type is + 'local'. + properties: + nodeName: + description: NodeName specifies the name of the local node. + type: string + required: + - nodeName + type: object + type: + description: Type specifies the type of the endpoint. + enum: + - local + - network + type: string + required: + - type type: object besuNode: description: |- - Optionally bind to a local besu node deployed with this operator - (vs. configuring a connection to a production blockchain network) + Deprecated: Use 'baseLedgerEndpoint' instead. Example: + { "baseLedgerEndpoint": {"type": "local", "local": {"nodeName": "node-name"}} } + + Optionally bind to a local Besu node deployed with this operator + (vs. configuring a connection to a production blockchain network). type: string config: description: Settings from this config will be loaded as YAML and diff --git a/operator/config/samples/core_v1alpha1_paladin_node1.yaml b/operator/config/samples/core_v1alpha1_paladin_node1.yaml index cb1827b84..238ec3be3 100644 --- a/operator/config/samples/core_v1alpha1_paladin_node1.yaml +++ b/operator/config/samples/core_v1alpha1_paladin_node1.yaml @@ -19,7 +19,10 @@ spec: database: mode: sidecarPostgres migrationMode: auto - besuNode: node1 + baseLedgerEndpoint: + type: local + local: + nodeName: node1 secretBackedSigners: - name: signer-1 secret: node1.keys diff --git a/operator/config/samples/core_v1alpha1_paladin_node2.yaml b/operator/config/samples/core_v1alpha1_paladin_node2.yaml index 5b6bdf87f..7f022ffbf 100644 --- a/operator/config/samples/core_v1alpha1_paladin_node2.yaml +++ b/operator/config/samples/core_v1alpha1_paladin_node2.yaml @@ -15,7 +15,10 @@ spec: database: mode: sidecarPostgres migrationMode: auto - besuNode: node2 + baseLedgerEndpoint: + type: local + local: + nodeName: node2 secretBackedSigners: - name: signer-1 secret: node2.keys diff --git a/operator/config/samples/core_v1alpha1_paladin_node3.yaml b/operator/config/samples/core_v1alpha1_paladin_node3.yaml index 429ff09a7..73955eb26 100644 --- a/operator/config/samples/core_v1alpha1_paladin_node3.yaml +++ b/operator/config/samples/core_v1alpha1_paladin_node3.yaml @@ -15,7 +15,10 @@ spec: database: mode: sidecarPostgres migrationMode: auto - besuNode: node3 + baseLedgerEndpoint: + type: local + local: + nodeName: node3 secretBackedSigners: - name: signer-1 secret: node3.keys diff --git a/operator/contractpkg/main.go b/operator/contractpkg/main.go index d26064cc1..bcc5751ae 100644 --- a/operator/contractpkg/main.go +++ b/operator/contractpkg/main.go @@ -17,6 +17,8 @@ limitations under the License. package main import ( + "archive/tar" + "compress/gzip" "encoding/json" "fmt" "io" @@ -34,29 +36,15 @@ import ( "sigs.k8s.io/yaml" ) -func main() { +// file names that are basenet specific +var basenet = []string{"issuer", "paladindomain", "paladinregistry", "smartcontractdeployment", "transactioninvoke"} - if len(os.Args) < 2 { - fmt.Fprintln(os.Stderr, fmt.Errorf("usage: go run ./contractpkg generate|template [ARGS]")) - os.Exit(1) - return - } - switch os.Args[1] { - case "generate": - if err := generateSmartContracts(); err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - os.Exit(1) - } - case "template": - if err := template(); err != nil { - fmt.Fprintln(os.Stderr, err.Error()) - os.Exit(1) - } - default: - fmt.Fprintln(os.Stderr, fmt.Errorf("usage: go run ./contractpkg generate|template [ARGS]")) - os.Exit(1) - } - os.Exit(0) +// file names that are devnet specific +var devnet = []string{"besu_node", "paladin_node", "genesis", "paladinregistration"} + +var scope = map[string][]string{ + "basenet": basenet, + "devnet": append(devnet, basenet...), } type ContractMap map[string]*ContractMapBuild @@ -67,9 +55,15 @@ type ContractMapBuild struct { Params any `json:"params"` } +var cmd = map[string]func() error{ + "generate": generateSmartContracts, + "template": template, + "artifacts": generateArtifacts, +} + func generateSmartContracts() error { if len(os.Args) < 3 { - return fmt.Errorf("usage: go run ./contractpkg generate [path/to/contractMap.json]") + return fmt.Errorf("usage: go run ./%s %s [path/to/contractMap.json]", filepath.Base(os.Args[0]), os.Args[1]) } var buildMap ContractMap @@ -197,7 +191,7 @@ func (m *ContractMap) process(name string, b *ContractMapBuild) error { // adjust all .yaml files in the directory to use the new template syntax func template() error { if len(os.Args) < 4 { - return fmt.Errorf("usage: go run ./contractpkg template [src] [dist]") + return fmt.Errorf("usage: go run ./%s %s [src] [dist]", filepath.Base(os.Args[0]), os.Args[1]) } srcDir := os.Args[2] destDir := os.Args[3] @@ -258,25 +252,26 @@ func template() error { newContent := pattern.ReplaceAllString(string(content), "{{ `{{${1}}}` }}") // Add conditional wrapper around the content - conditions := []string{"(eq .Values.mode \"devnet\")"} - - if strings.Contains(file, "smartcontractdeployment") { - // Include additional condition if file contains "smartcontractdeployment" - conditions = append(conditions, "(eq .Values.mode \"smartcontractdeployment\")") - } - - // Build the condition string for the template + vScopes := scopes(file) + conditions := []string{} var condition string - if len(conditions) == 1 { - // Single condition doesn't need 'or' - condition = conditions[0] - } else { - // Multiple conditions use 'or' to combine them - condition = fmt.Sprintf("(or %s)", strings.Join(conditions, " ")) + for _, s := range vScopes { + conditions = append(conditions, fmt.Sprintf("(eq .Values.mode \"%s\")", s)) + + // Build the condition string for the template + if len(conditions) == 1 { + // Single condition doesn't need 'or' + condition = conditions[0] + } else { + // Multiple conditions use 'or' to combine them + condition = fmt.Sprintf("(or %s)", strings.Join(conditions, " ")) + } } // Wrap newContent with the conditional template - newContent = fmt.Sprintf("{{- if %s }}\n\n%s\n{{- end }}", condition, newContent) + if len(condition) != 0 { + newContent = fmt.Sprintf("{{- if %s }}\n\n%s\n{{- end }}", condition, newContent) + } // Write the modified content back to the same file err = os.WriteFile(file, []byte(newContent), fs.FileMode(0644)) @@ -290,6 +285,150 @@ func template() error { return nil } +func generateArtifacts() error { + if len(os.Args) < 4 { + return fmt.Errorf("usage: go run ./%s %s [srcDir] [outDir]", filepath.Base(os.Args[0]), os.Args[1]) + } + srcDir := os.Args[2] + outDir := os.Args[3] + + // Create the output directory if it doesn't exist + err := os.MkdirAll(outDir, 0755) + if err != nil { + return fmt.Errorf("Error creating directory %s: %v", outDir, err) + } + + // For each scope, combine the YAML files + for scopeName := range scope { + combinedContent := "" + // Collect all files that match the scope + files, err := filepath.Glob(filepath.Join(srcDir, "*.yaml")) + if err != nil { + return fmt.Errorf("Error finding YAML files in %s: %v", srcDir, err) + } + + for _, file := range files { + filename := filepath.Base(file) + // Check if the file belongs to the current scope + if fileBelongsToScope(filename, scopeName) { + content, err := os.ReadFile(file) + if err != nil { + return fmt.Errorf("Error reading file %s: %v", file, err) + } + // Add a YAML document separator if needed + if len(combinedContent) > 0 { + combinedContent += "\n---\n" + } + combinedContent += string(content) + } + } + + // Write the combined content to a file + if combinedContent != "" { + outFile := filepath.Join(outDir, fmt.Sprintf("%s.yaml", scopeName)) + err = os.WriteFile(outFile, []byte(combinedContent), 0644) + if err != nil { + return fmt.Errorf("Error writing combined YAML file %s: %v", outFile, err) + } + fmt.Printf("Combined YAML for scope '%s' written to %s\n", scopeName, outFile) + } else { + fmt.Printf("No YAML files found for scope '%s'\n", scopeName) + } + } + + // Create a .tar.gz archive for all YAML files in the source directory + err = createTarGz(srcDir, filepath.Join(outDir, "artifacts.tar.gz")) + if err != nil { + return fmt.Errorf("Error creating tar.gz archive: %v", err) + } + + fmt.Printf("Tar.gz archive created at %s\n", filepath.Join(outDir, "artifacts.tar.gz")) + return nil +} + +// createTarGz compresses all YAML files in the source directory into a .tar.gz archive +func createTarGz(srcDir, destFile string) error { + // Create the output file + outFile, err := os.Create(destFile) + if err != nil { + return fmt.Errorf("Error creating tar.gz file %s: %v", destFile, err) + } + defer outFile.Close() + + // Create a gzip writer + gw := gzip.NewWriter(outFile) + defer gw.Close() + + // Create a tar writer + tw := tar.NewWriter(gw) + defer tw.Close() + + // Walk through the source directory and add .yaml files to the archive + err = filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories + if info.IsDir() { + return nil + } + + // Only add YAML files + if filepath.Ext(path) == ".yaml" { + file, err := os.Open(path) + if err != nil { + return fmt.Errorf("Error opening file %s: %v", path, err) + } + defer file.Close() + + // Create a tar header for the file + header := &tar.Header{ + Name: filepath.Base(path), + Size: info.Size(), + Mode: int64(info.Mode()), + ModTime: info.ModTime(), + } + if err := tw.WriteHeader(header); err != nil { + return fmt.Errorf("Error writing tar header for file %s: %v", path, err) + } + + // Copy the file content to the tar writer + _, err = io.Copy(tw, file) + if err != nil { + return fmt.Errorf("Error writing file %s to tar: %v", path, err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("Error walking the directory %s: %v", srcDir, err) + } + + return nil +} +func fileBelongsToScope(filename, scopeName string) bool { + for _, s := range scopes(filename) { + if s == scopeName { + return true + } + } + return false +} + +func scopes(filename string) []string { + var s []string + for k, v := range scope { + for _, f := range v { + if strings.Contains(filename, f) { + s = append(s, k) + break + } + } + } + return s +} + // Helper function to copy a file from src to dst func copyFile(src, dst string) error { // Open the source file @@ -320,3 +459,28 @@ func copyFile(src, dst string) error { return nil } + +func usageMessage() string { + commands := []string{} + for k := range cmd { + commands = append(commands, k) + } + return fmt.Sprintf("usage: go run ./%s %s [ARGS]", filepath.Base(os.Args[0]), strings.Join(commands, "|")) +} + +func main() { + + if len(os.Args) < 2 { + fmt.Fprintln(os.Stderr, fmt.Errorf(usageMessage())) + os.Exit(1) + } + if f, ok := cmd[os.Args[1]]; ok { + if err := f(); err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + return + } + fmt.Fprintln(os.Stderr, fmt.Errorf(usageMessage())) + os.Exit(1) +} diff --git a/operator/internal/controller/besu_controller_test.go b/operator/internal/controller/besu_controller_test.go index ae9b19385..d07fc59c1 100644 --- a/operator/internal/controller/besu_controller_test.go +++ b/operator/internal/controller/besu_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("Besu Controller", func() { func TestBesu_GetLabels(t *testing.T) { // Mock configuration config := config.Config{ - Paladin: config.Template{ + Besu: config.Template{ Labels: map[string]string{ "env": "production", "tier": "backend", @@ -128,11 +128,14 @@ func TestBesu_GetLabels(t *testing.T) { // Assertions expectedLabels := map[string]string{ - "app": "besu-test-node", - "env": "production", - "tier": "backend", - "version": "v1", + "env": "production", + "tier": "backend", + "version": "v1", + "app.kubernetes.io/instance": "test-node", + "app.kubernetes.io/name": "besu-test-node", + "app.kubernetes.io/part-of": "paladin", } + assert.Equal(t, len(expectedLabels), len(labels), "labels should have the same length") assert.Equal(t, expectedLabels, labels, "labels should match expected labels") } diff --git a/operator/internal/controller/common_test.go b/operator/internal/controller/common_test.go index 060bb1d82..178b110b4 100644 --- a/operator/internal/controller/common_test.go +++ b/operator/internal/controller/common_test.go @@ -2,10 +2,193 @@ package controller import ( "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "context" + "sort" + + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + corev1alpha1 "github.com/kaleido-io/paladin/operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" ) +// Test mergeServicePorts +func TestMergeServicePorts(t *testing.T) { + svcSpec := &corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(80)}, + {Name: "https", Port: 443, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(443)}, + }, + } + + requiredPorts := []corev1.ServicePort{ + {Name: "http", Port: 8080, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080)}, + {Name: "metrics", Port: 9090, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(9090)}, + } + + expectedPorts := []corev1.ServicePort{ + {Name: "http", Port: 80, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(8080)}, + {Name: "https", Port: 443, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(443)}, + {Name: "metrics", Port: 9090, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(9090)}, + } + + mergeServicePorts(svcSpec, requiredPorts) + + sort.Slice(svcSpec.Ports, func(i, j int) bool { + return svcSpec.Ports[i].Name < svcSpec.Ports[j].Name + }) + + assert.Equal(t, 3, len(svcSpec.Ports), "Expected 3 ports") + assert.Equal(t, expectedPorts, svcSpec.Ports) +} + +// Test deDupAndSortInLocalNS +func TestDeDupAndSortInLocalNS(t *testing.T) { + var podCRMap = CRMap[corev1.Pod, *corev1.Pod, *corev1.PodList]{ + NewList: func() *corev1.PodList { return new(corev1.PodList) }, + ItemsFor: func(list *corev1.PodList) []corev1.Pod { return list.Items }, + AsObject: func(item *corev1.Pod) *corev1.Pod { return item }, + } + + mockList := &corev1.PodList{ + Items: []corev1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "pod-a"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod-b"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod-a"}}, // Duplicate + }, + } + + sorted := deDupAndSortInLocalNS(podCRMap, mockList) + + expectedNames := []string{"pod-a", "pod-b"} + actualNames := make([]string, len(sorted)) + for i, pod := range sorted { + actualNames[i] = pod.GetName() + } + + assert.Equal(t, 2, len(sorted), "Expected 2 pods") + assert.Equal(t, expectedNames, actualNames, "Expected pod names to match") +} + +// Test setCondition +func TestSetCondition(t *testing.T) { + var conditions []metav1.Condition + + setCondition(&conditions, corev1alpha1.ConditionType("Ready"), metav1.ConditionTrue, corev1alpha1.ConditionReason("DeploymentSucceeded"), "Deployment successful") + + require.Equal(t, 1, len(conditions), "Expected 1 condition") + + condition := conditions[0] + assert.False(t, condition.Type != "Ready" || condition.Status != metav1.ConditionTrue || condition.Reason != "DeploymentSucceeded", "Condition type should be Ready") +} + +// Mock client for reconcileAll +type mockClient struct{} + +func (m *mockClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error { + podList := obj.(*corev1.PodList) + *podList = corev1.PodList{ + Items: []corev1.Pod{ + {ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "test-ns"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pod-2", Namespace: "test-ns"}}, + }, + } + return nil +} + +// MockRateLimitingQueue is a simple implementation of the workqueue.RateLimitingInterface for testing purposes. +type MockRateLimitingQueue struct { + items []reconcile.Request +} + +func (q *MockRateLimitingQueue) Add(item interface{}) { + req, ok := item.(reconcile.Request) + if ok { + q.items = append(q.items, req) + } +} +func (q *MockRateLimitingQueue) Len() int { + return len(q.items) +} +func (q *MockRateLimitingQueue) Get() (item interface{}, shutdown bool) { + if len(q.items) == 0 { + return nil, true + } + item, q.items = q.items[0], q.items[1:] + return item, false +} +func (q *MockRateLimitingQueue) Done(item interface{}) {} +func (q *MockRateLimitingQueue) ShutDown() {} +func (q *MockRateLimitingQueue) ShuttingDown() bool { return false } +func (q *MockRateLimitingQueue) ShutDownWithDrain() {} +func (q *MockRateLimitingQueue) AddRateLimited(item interface{}) {} +func (q *MockRateLimitingQueue) Forget(item interface{}) {} +func (q *MockRateLimitingQueue) NumRequeues(item interface{}) int { return 0 } +func (q *MockRateLimitingQueue) AddAfter(item interface{}, duration time.Duration) {} + +func TestReconcileAll(t *testing.T) { + // Define CRMap + var podCRMap = CRMap[corev1.Pod, *corev1.Pod, *corev1.PodList]{ + NewList: func() *corev1.PodList { + return &corev1.PodList{} + }, + ItemsFor: func(list *corev1.PodList) []corev1.Pod { + return list.Items + }, + AsObject: func(item *corev1.Pod) *corev1.Pod { + return item + }, + } + + // Scheme setup + scheme := runtime.NewScheme() + _ = corev1.AddToScheme(scheme) + + // Fake client setup + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects( + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-1", Namespace: "test-ns"}}, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-2", Namespace: "test-ns"}}, + ). + Build() + + // Mock queue to capture reconcile requests + mockQueue := &MockRateLimitingQueue{} + + // Create the handler + handler := reconcileAll(podCRMap, client) + + // Simulate a create event for a new pod + p := corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod-3", Namespace: "test-ns"}} + e := event.CreateEvent{Object: &p} + handler.Create(context.TODO(), e, mockQueue) + + // Verify the captured requests + expectedRequests := sets.NewString( + "pod-1/test-ns", + "pod-2/test-ns", + ) + + actualRequests := sets.NewString() + for _, req := range mockQueue.items { + actualRequests.Insert(req.Name + "/" + req.Namespace) + } + + assert.Equal(t, expectedRequests, actualRequests, "Expected reconcile requests to match") +} + func TestMapToStruct(t *testing.T) { type example struct { diff --git a/operator/internal/controller/paladin_controller.go b/operator/internal/controller/paladin_controller.go index 25b1d7a4d..05d74f8e0 100644 --- a/operator/internal/controller/paladin_controller.go +++ b/operator/internal/controller/paladin_controller.go @@ -707,7 +707,7 @@ func (r *PaladinReconciler) generatePaladinConfig(ctx context.Context, node *cor } // Override the default config with the user provided config - if err := r.generatePaladinAuthConfig(ctx, node, &pldConf); err != nil { + if err := r.generatePaladinBlockchainConfig(ctx, node, &pldConf); err != nil { return "", nil, err } @@ -736,29 +736,64 @@ func (r *PaladinReconciler) generatePaladinConfig(ctx context.Context, node *cor return "", nil, err } - // Bind to the a local besu node if we've been configured with one - if node.Spec.BesuNode != "" { - pldConf.Blockchain.HTTP.URL = fmt.Sprintf("http://%s:8545", generateBesuServiceHostname(node.Spec.BesuNode, node.Namespace)) - pldConf.Blockchain.WS.URL = fmt.Sprintf("ws://%s:8546", generateBesuServiceHostname(node.Spec.BesuNode, node.Namespace)) - } b, err := yaml.Marshal(&pldConf) return string(b), tlsSecrets, err } +func (r *PaladinReconciler) generatePaladinBlockchainConfig(ctx context.Context, node *corev1alpha1.Paladin, pldConf *pldconf.PaladinConfig) error { + if node.Spec.BaseLedgerEndpoint == nil { + // Alternatively, the config can be provided in the spec.config + + // fallback: check the deprecated fields + if node.Spec.BesuNode != "" { + pldConf.Blockchain.HTTP.URL = fmt.Sprintf("http://%s:8545", generateBesuServiceHostname(node.Spec.BesuNode, node.Namespace)) + pldConf.Blockchain.WS.URL = fmt.Sprintf("ws://%s:8546", generateBesuServiceHostname(node.Spec.BesuNode, node.Namespace)) + } else { + if err := r.generatePaladinAuthConfig(ctx, node, node.Spec.AuthConfig, pldConf); err != nil { + return err + } + } -func (r *PaladinReconciler) generatePaladinAuthConfig(ctx context.Context, node *corev1alpha1.Paladin, pldConf *pldconf.PaladinConfig) error { - // generate the Paladin auth config - if node.Spec.AuthConfig == nil { return nil } + endpoint := node.Spec.BaseLedgerEndpoint + switch endpoint.Type { + case corev1alpha1.EndpointTypeLocal: + lEndpoint := endpoint.Local + if lEndpoint == nil { + return fmt.Errorf("local endpoint is nil") + } + pldConf.Blockchain.HTTP.URL = fmt.Sprintf("http://%s:8545", generateBesuServiceHostname(lEndpoint.NodeName, node.Namespace)) + pldConf.Blockchain.WS.URL = fmt.Sprintf("ws://%s:8546", generateBesuServiceHostname(lEndpoint.NodeName, node.Namespace)) + case corev1alpha1.EndpointTypeNetwork: + nEndpoint := endpoint.Endpoint + if nEndpoint == nil { + return fmt.Errorf("network endpoint is nil") + } + pldConf.Blockchain.HTTP.URL = nEndpoint.JSONRPC + pldConf.Blockchain.WS.URL = nEndpoint.WS + if err := r.generatePaladinAuthConfig(ctx, node, nEndpoint.Auth, pldConf); err != nil { + return err + } + default: + return fmt.Errorf("unsupported endpoint type '%s'", endpoint.Type) + } - switch node.Spec.AuthConfig.AuthMethod { - case corev1alpha1.AuthMethodSecret: - if node.Spec.AuthConfig.AuthSecret == nil { - return fmt.Errorf("AuthSecret must be provided when using AuthMethodSecret") + return nil +} +func (r *PaladinReconciler) generatePaladinAuthConfig(ctx context.Context, node *corev1alpha1.Paladin, authConfig *corev1alpha1.Auth, pldConf *pldconf.PaladinConfig) error { + + if authConfig == nil { + return nil + } + + switch authConfig.Type { + case corev1alpha1.AuthTypeSecret: + if authConfig.Secret == nil { + return fmt.Errorf("AuthSecret must be provided when using AuthTypeSecret") } - secretName := node.Spec.AuthConfig.AuthSecret.Name + secretName := authConfig.Secret.Name if secretName == "" { - return fmt.Errorf("AuthSecret must be provided when using AuthMethodSecret") + return fmt.Errorf("AuthSecret must be provided when using AuthTypeSecret") } sec := &corev1.Secret{} if err := r.Client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: node.Namespace}, sec); err != nil { @@ -767,8 +802,12 @@ func (r *PaladinReconciler) generatePaladinAuthConfig(ctx context.Context, node if sec.Data == nil { return fmt.Errorf("Secret %s has no data", secretName) } - mapToStruct(sec.Data, &pldConf.Blockchain.HTTP.Auth) - mapToStruct(sec.Data, &pldConf.Blockchain.WS.Auth) + if err := mapToStruct(sec.Data, &pldConf.Blockchain.HTTP.Auth); err != nil { + return err + } + if err := mapToStruct(sec.Data, &pldConf.Blockchain.WS.Auth); err != nil { + return err + } } return nil } diff --git a/operator/internal/controller/paladin_controller_test.go b/operator/internal/controller/paladin_controller_test.go index cf51e1b01..aeec3189b 100644 --- a/operator/internal/controller/paladin_controller_test.go +++ b/operator/internal/controller/paladin_controller_test.go @@ -189,31 +189,17 @@ func TestPaladin_GetLabels(t *testing.T) { // Assertions expectedLabels := map[string]string{ - "app": "paladin-test-node", - "env": "production", - "tier": "backend", - "version": "v1", + "env": "production", + "tier": "backend", + "version": "v1", + "app.kubernetes.io/instance": "test-node", + "app.kubernetes.io/name": "paladin-test-node", + "app.kubernetes.io/part-of": "paladin", } assert.Equal(t, expectedLabels, labels, "labels should match expected labels") } -// package controllers - -// import ( -// "context" -// "fmt" -// "testing" - -// "github.com/stretchr/testify/assert" -// corev1 "k8s.io/api/core/v1" -// "k8s.io/apimachinery/pkg/types" -// "sigs.k8s.io/controller-runtime/pkg/client/fake" - -// corev1alpha1 "path/to/your/api/v1alpha1" -// "path/to/your/pldconf" -// ) - func TestGeneratePaladinAuthConfig(t *testing.T) { tests := []struct { name string @@ -230,9 +216,14 @@ func TestGeneratePaladinAuthConfig(t *testing.T) { Namespace: "default", }, Spec: corev1alpha1.PaladinSpec{ - AuthConfig: &corev1alpha1.AuthConfig{ - AuthMethod: corev1alpha1.AuthMethodSecret, - AuthSecret: &corev1alpha1.AuthSecret{Name: "test-secret"}, + BaseLedgerEndpoint: &corev1alpha1.BaseLedgerEndpoint{ + Type: corev1alpha1.EndpointTypeNetwork, + Endpoint: &corev1alpha1.NetworkLedgerEndpoint{ + Auth: &corev1alpha1.Auth{ + Type: corev1alpha1.AuthTypeSecret, + Secret: &corev1alpha1.AuthSecret{Name: "test-secret"}, + }, + }, }, }, }, @@ -274,9 +265,16 @@ func TestGeneratePaladinAuthConfig(t *testing.T) { Namespace: "default", }, Spec: corev1alpha1.PaladinSpec{ - AuthConfig: &corev1alpha1.AuthConfig{ - AuthMethod: corev1alpha1.AuthMethodSecret, - AuthSecret: &corev1alpha1.AuthSecret{Name: "test-secret"}, + BaseLedgerEndpoint: &corev1alpha1.BaseLedgerEndpoint{ + Type: corev1alpha1.EndpointTypeNetwork, + Endpoint: &corev1alpha1.NetworkLedgerEndpoint{ + JSONRPC: "https://besu.node", + WS: "wss://besu.mode", + Auth: &corev1alpha1.Auth{ + Type: corev1alpha1.AuthTypeSecret, + Secret: &corev1alpha1.AuthSecret{Name: "test-secret"}, + }, + }, }, }, }, @@ -291,8 +289,13 @@ func TestGeneratePaladinAuthConfig(t *testing.T) { Namespace: "default", }, Spec: corev1alpha1.PaladinSpec{ - AuthConfig: &corev1alpha1.AuthConfig{ - AuthMethod: corev1alpha1.AuthMethodSecret, + BaseLedgerEndpoint: &corev1alpha1.BaseLedgerEndpoint{ + Type: corev1alpha1.EndpointTypeNetwork, + Endpoint: &corev1alpha1.NetworkLedgerEndpoint{ + Auth: &corev1alpha1.Auth{ + Type: corev1alpha1.AuthTypeSecret, + }, + }, }, }, }, @@ -303,9 +306,14 @@ func TestGeneratePaladinAuthConfig(t *testing.T) { name: "Secret with no data", node: &corev1alpha1.Paladin{ Spec: corev1alpha1.PaladinSpec{ - AuthConfig: &corev1alpha1.AuthConfig{ - AuthMethod: corev1alpha1.AuthMethodSecret, - AuthSecret: &corev1alpha1.AuthSecret{Name: "empty-secret"}, + BaseLedgerEndpoint: &corev1alpha1.BaseLedgerEndpoint{ + Type: corev1alpha1.EndpointTypeNetwork, + Endpoint: &corev1alpha1.NetworkLedgerEndpoint{ + Auth: &corev1alpha1.Auth{ + Type: corev1alpha1.AuthTypeSecret, + Secret: &corev1alpha1.AuthSecret{Name: "empty-secret"}, + }, + }, }, }, }, @@ -339,7 +347,8 @@ func TestGeneratePaladinAuthConfig(t *testing.T) { // Call the method under test pldConf := &pldconf.PaladinConfig{} - err := reconciler.generatePaladinAuthConfig(ctx, tt.node, pldConf) + + err := reconciler.generatePaladinAuthConfig(ctx, tt.node, tt.node.Spec.BaseLedgerEndpoint.Endpoint.Auth, pldConf) // Verify the results if tt.wantErr { diff --git a/operator/internal/controller/smartcontractdeployment_controller.go b/operator/internal/controller/smartcontractdeployment_controller.go index 239a99644..0f6bd00d0 100644 --- a/operator/internal/controller/smartcontractdeployment_controller.go +++ b/operator/internal/controller/smartcontractdeployment_controller.go @@ -203,14 +203,16 @@ func (r *SmartContractDeploymentReconciler) reconcilePaladin(ctx context.Context } scds := &corev1alpha1.SmartContractDeploymentList{} - r.Client.List(ctx, scds, client.InNamespace(paladin.Namespace)) - reqs := make([]ctrl.Request, 0, len(scds.Items)) + reqs := []ctrl.Request{} - for _, scd := range scds.Items { - if scd.Spec.Node == paladin.Name { - reqs = append(reqs, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(&scd)}) + if err := r.Client.List(ctx, scds, client.InNamespace(paladin.Namespace)); err == nil { + for _, scd := range scds.Items { + if scd.Spec.Node == paladin.Name { + reqs = append(reqs, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(&scd)}) + } } } + return reqs } diff --git a/operator/internal/controller/transactioninvoke_controller.go b/operator/internal/controller/transactioninvoke_controller.go index 98a1e64d4..ec4e0b12d 100644 --- a/operator/internal/controller/transactioninvoke_controller.go +++ b/operator/internal/controller/transactioninvoke_controller.go @@ -230,14 +230,16 @@ func (r *TransactionInvokeReconciler) reconcilePaladin(ctx context.Context, obj } tis := &corev1alpha1.TransactionInvokeList{} - r.Client.List(ctx, tis, client.InNamespace(paladin.Namespace)) - reqs := make([]ctrl.Request, 0, len(tis.Items)) + reqs := []ctrl.Request{} - for _, ti := range tis.Items { - if ti.Spec.Node == paladin.Name { - reqs = append(reqs, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(&ti)}) + if err := r.Client.List(ctx, tis, client.InNamespace(paladin.Namespace)); err == nil { + for _, ti := range tis.Items { + if ti.Spec.Node == paladin.Name { + reqs = append(reqs, ctrl.Request{NamespacedName: client.ObjectKeyFromObject(&ti)}) + } } } + return reqs } diff --git a/operator/internal/controller/transactioninvoke_controller_test.go b/operator/internal/controller/transactioninvoke_controller_test.go index a086e4c77..621bf7283 100644 --- a/operator/internal/controller/transactioninvoke_controller_test.go +++ b/operator/internal/controller/transactioninvoke_controller_test.go @@ -47,11 +47,17 @@ var _ = Describe("TransactionInvoke Controller", func() { err := k8sClient.Get(ctx, typeNamespacedName, transactioninvoke) if err != nil && errors.IsNotFound(err) { resource := &corev1alpha1.TransactionInvoke{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "core.paladin.io/v1alpha1", + Kind: "TransactionInvoke", + }, ObjectMeta: metav1.ObjectMeta{ Name: resourceName, Namespace: "default", }, - // TODO(user): Specify other spec details if needed. + Spec: corev1alpha1.TransactionInvokeSpec{ + TxType: "public", + }, } Expect(k8sClient.Create(ctx, resource)).To(Succeed()) } diff --git a/operator/utils/consts.go b/operator/utils/consts.go index b87526a72..39dda3073 100644 --- a/operator/utils/consts.go +++ b/operator/utils/consts.go @@ -15,3 +15,5 @@ */ package utils + +const ApplicationName = "paladin"