From 2728fd22c0d9d17cd240783b1754ee99ccca2a0a Mon Sep 17 00:00:00 2001 From: Vladimir <48120135+izmalk@users.noreply.github.com> Date: Fri, 29 Nov 2024 14:25:25 +0000 Subject: [PATCH] Update additional cases (without Charmed) --- CONTRIBUTING.md | 4 +- README.md | 16 ++--- actions.yaml | 2 +- config.yaml | 6 +- docs/explanation/e-cluster-configuration.md | 14 ++--- docs/explanation/e-hardening.md | 10 ++-- docs/explanation/e-security.md | 59 +++++++++++-------- docs/how-to/h-cluster-migration.md | 17 ++++-- .../h-create-mtls-client-credentials.md | 7 ++- docs/how-to/h-deploy.md | 24 +++++--- docs/how-to/h-enable-encryption.md | 14 +++-- docs/how-to/h-enable-monitoring.md | 6 +- docs/how-to/h-enable-oauth.md | 8 +-- docs/how-to/h-integrate-alerts-dashboards.md | 2 +- docs/how-to/h-manage-units.md | 30 +++++----- docs/how-to/h-upgrade.md | 54 +++++++++-------- docs/index.md | 12 ++-- docs/reference/r-file-system-paths.md | 22 +++---- docs/reference/r-listeners.md | 8 ++- docs/reference/r-performance-tuning.md | 6 +- docs/reference/r-releases/r-rev156_126.md | 18 +++--- docs/reference/r-releases/r-rev156_136.md | 14 ++--- docs/reference/r-requirements.md | 2 +- docs/reference/r-snap-entrypoints.md | 6 +- docs/reference/r-statuses.md | 38 ++++++------ docs/tutorial/t-cleanup-environment.md | 6 +- docs/tutorial/t-deploy.md | 22 +++---- docs/tutorial/t-enable-encryption.md | 12 ++-- docs/tutorial/t-manage-passwords.md | 20 ++++--- docs/tutorial/t-overview.md | 10 ++-- docs/tutorial/t-relate-kafka.md | 26 ++++---- docs/tutorial/t-setup-environment.md | 2 +- metadata.yaml | 4 +- .../prometheus/kafka_metrics.rules | 4 +- src/grafana_dashboards/kafka-metrics.json | 10 ++-- tests/integration/app-charm/actions.yaml | 6 +- tests/integration/app-charm/config.yaml | 2 +- tests/integration/app-charm/metadata.yaml | 2 +- 38 files changed, 287 insertions(+), 238 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85ae2ec1..39d12afc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,13 +40,13 @@ juju model-config logging-config="=INFO;unit=DEBUG" # Build the charm locally charmcraft pack -# Deploy the latest ZooKeeper release +# Deploy the latest Apache ZooKeeper release juju deploy zookeeper --channel edge -n 3 # Deploy the charm juju deploy ./*.charm -n 3 -# After ZooKeeper has initialised, relate the applications +# After Apache ZooKeeper has initialised, relate the applications juju relate kafka zookeeper ``` diff --git a/README.md b/README.md index 8c520aa4..b806b8cf 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ The Charmed Operator can be found on [Charmhub](https://charmhub.io/kafka) and i - SASL/SCRAM auth for Broker-Broker and Client-Broker authentication enabled by default. - Access control management supported with user-provided ACL lists. -As currently Kafka requires a paired ZooKeeper deployment in production, this operator makes use of the [ZooKeeper Operator](https://github.com/canonical/zookeeper-operator) for various essential functions. +As currently Apache Kafka requires a paired Apache ZooKeeper deployment in production, this operator makes use of the [Apache ZooKeeper Operator](https://github.com/canonical/zookeeper-operator) for various essential functions. ### Features checklist @@ -33,7 +33,7 @@ The following are some of the most important planned features and their implemen ## Requirements -For production environments, it is recommended to deploy at least 5 nodes for Zookeeper and 3 for Kafka. +For production environments, it is recommended to deploy at least 5 nodes for Zookeeper and 3 for Apache Kafka. The following requirements are meant to be for production environment: @@ -51,7 +51,7 @@ For more information on how to perform typical tasks, see the How to guides sect ### Deployment -The Kafka and ZooKeeper operators can both be deployed as follows: +The Apache Kafka and Apache ZooKeeper operators can both be deployed as follows: ```shell $ juju deploy zookeeper -n 5 @@ -70,18 +70,18 @@ To watch the process, the `juju status` command can be used. Once all the units juju run-action kafka/leader get-admin-credentials --wait ``` -Apache Kafka ships with `bin/*.sh` commands to do various administrative tasks, e.g `bin/kafka-config.sh` to update cluster configuration, `bin/kafka-topics.sh` for topic management, and many more! The Kafka Charmed Operator provides these commands for administrators to run their desired cluster configurations securely with SASL authentication, either from within the cluster or as an external client. +Apache Kafka ships with `bin/*.sh` commands to do various administrative tasks, e.g `bin/kafka-config.sh` to update cluster configuration, `bin/kafka-topics.sh` for topic management, and many more! The Charmed Apache Kafka Operator provides these commands for administrators to run their desired cluster configurations securely with SASL authentication, either from within the cluster or as an external client. -For example, to list the current topics on the Kafka cluster, run the following command: +For example, to list the current topics on the Apache Kafka cluster, run the following command: ```shell BOOTSTRAP_SERVERS=$(juju run-action kafka/leader get-admin-credentials --wait | grep "bootstrap.servers" | cut -d "=" -f 2) juju ssh kafka/leader 'charmed-kafka.topics --bootstrap-server $BOOTSTRAP_SERVERS --list --command-config /var/snap/charmed-kafka/common/client.properties' ``` -Note that Charmed Apache Kafka cluster is secure-by-default: when no other application is related to Kafka, listeners are disabled, thus preventing any incoming connection. However, even for running the commands above, listeners must be enabled. If there are no other applications, you can deploy a `data-integrator` charm and relate it to Kafka to enable listeners. +Note that Charmed Apache Kafka cluster is secure-by-default: when no other application is related to Apache Kafka, listeners are disabled, thus preventing any incoming connection. However, even for running the commands above, listeners must be enabled. If there are no other applications, you can deploy a `data-integrator` charm and relate it to Apache Kafka to enable listeners. -Available Kafka bin commands can be found with: +Available Apache Kafka bin commands can be found with: ``` snap info charmed-kafka @@ -119,7 +119,7 @@ Use the same action without a password parameter to randomly generate a password Currently, the Charmed Apache Kafka Operator supports 1 or more storage volumes. A 10G storage volume will be installed by default for `log.dirs`. This is used for logs storage, mounted on `/var/snap/kafka/common` -When storage is added or removed, the Kafka service will restart to ensure it uses the new volumes. Additionally, log + charm status messages will prompt users to manually reassign partitions so that the new storage volumes are populated. By default, Kafka will not assign partitions to new directories/units until existing topic partitions are assigned to it, or a new topic is created. +When storage is added or removed, the Apache Kafka service will restart to ensure it uses the new volumes. Additionally, log + charm status messages will prompt users to manually reassign partitions so that the new storage volumes are populated. By default, Apache Kafka will not assign partitions to new directories/units until existing topic partitions are assigned to it, or a new topic is created. ## Relations diff --git a/actions.yaml b/actions.yaml index 6cc39eaa..05c2a239 100644 --- a/actions.yaml +++ b/actions.yaml @@ -28,7 +28,7 @@ set-tls-private-key: get-admin-credentials: description: Get administrator authentication credentials for client commands - The returned client_properties can be used for Kafka bin commands using `--bootstrap-server` and `--command-config` for admin level administration + The returned client_properties can be used for Apache Kafka bin commands using `--bootstrap-server` and `--command-config` for admin level administration This action must be called on the leader unit. get-listeners: diff --git a/config.yaml b/config.yaml index 81633fe5..75b3d838 100644 --- a/config.yaml +++ b/config.yaml @@ -37,7 +37,7 @@ options: type: int default: 1073741824 message_max_bytes: - description: The largest record batch size allowed by Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.This can be set per topic with the topic level max.message.bytes config. + description: The largest record batch size allowed by Apache Kafka (after compression if compression is enabled). If this is increased and there are consumers older than 0.10.2, the consumers' fetch size must also be increased so that they can fetch record batches this large. In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.This can be set per topic with the topic level max.message.bytes config. type: int default: 1048588 offsets_topic_num_partitions: @@ -81,7 +81,7 @@ options: type: int default: 11 zookeeper_ssl_cipher_suites: - description: Specifies the enabled cipher suites to be used in ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the zookeeper.ssl.ciphersuites system property (note the single word "ciphersuites"). The default value of null means the list of enabled cipher suites is determined by the Java runtime being used. + description: Specifies the enabled cipher suites to be used in Apache ZooKeeper TLS negotiation (csv). Overrides any explicit value set via the zookeeper.ssl.ciphersuites system property (note the single word "ciphersuites"). The default value of null means the list of enabled cipher suites is determined by the Java runtime being used. type: string default: "" profile: @@ -113,6 +113,6 @@ options: type: float default: 0.8 expose_external: - description: "String to determine how to expose the Kafka cluster externally from the Kubernetes cluster. Possible values: 'nodeport', 'none'" + description: "String to determine how to expose the Apache Kafka cluster externally from the Kubernetes cluster. Possible values: 'nodeport', 'none'" type: string default: "nodeport" diff --git a/docs/explanation/e-cluster-configuration.md b/docs/explanation/e-cluster-configuration.md index 2222eafd..95305229 100644 --- a/docs/explanation/e-cluster-configuration.md +++ b/docs/explanation/e-cluster-configuration.md @@ -1,18 +1,18 @@ # Overview of a cluster configuration content [Apache Kafka](https://kafka.apache.org) is an open-source distributed event streaming platform that requires an external solution to coordinate and sync metadata between all active brokers. -One of such solutions is [ZooKeeper](https://zookeeper.apache.org). +One of such solutions is [Apache ZooKeeper](https://zookeeper.apache.org). -Here are some of the responsibilities of ZooKeeper in a Kafka cluster: +Here are some of the responsibilities of Apache ZooKeeper in an Apache Kafka cluster: - **Cluster membership**: through regular heartbeats, it keeps tracks of the brokers entering and leaving the cluster, providing an up-to-date list of brokers. -- **Controller election**: one of the Kafka brokers is responsible for managing the leader/follower status for all the partitions. ZooKeeper is used to elect a controller and to make sure there is only one of it. -- **Topic configuration**: each topic can be replicated on multiple partitions. ZooKeeper keeps track of the locations of the partitions and replicas, so that high-availability is still attained when a broker shuts down. Topic-specific configuration overrides (e.g. message retention and size) are also stored in ZooKeeper. -- **Access control and authentication**: ZooKeeper stores access control lists (ACL) for Kafka resources, to ensure only the proper, authorized, users or groups can read or write on each topic. +- **Controller election**: one of the Apache Kafka brokers is responsible for managing the leader/follower status for all the partitions. Apache ZooKeeper is used to elect a controller and to make sure there is only one of it. +- **Topic configuration**: each topic can be replicated on multiple partitions. Apache ZooKeeper keeps track of the locations of the partitions and replicas, so that high-availability is still attained when a broker shuts down. Topic-specific configuration overrides (e.g. message retention and size) are also stored in Apache ZooKeeper. +- **Access control and authentication**: Apache ZooKeeper stores access control lists (ACL) for Apache Kafka resources, to ensure only the proper, authorized, users or groups can read or write on each topic. -The values for the configuration parameters mentioned above are stored in znodes, the hierarchical unit data structure in ZooKeeper. +The values for the configuration parameters mentioned above are stored in znodes, the hierarchical unit data structure in Apache ZooKeeper. A znode is represented by its path and can both have data associated with it and children nodes. -ZooKeeper clients interact with its data structure similarly to a remote file system that would be sync-ed between the ZooKeeper units for high availability. +Apache ZooKeeper clients interact with its data structure similarly to a remote file system that would be sync-ed between the Apache ZooKeeper units for high availability. For a Charmed Apache Kafka related to a Charmed Apache ZooKeeper: - the list of the broker ids of the cluster can be found in `/kafka/brokers/ids` - the endpoint used to access the broker with id `0` can be found in `/kafka/brokers/ids/0` diff --git a/docs/explanation/e-hardening.md b/docs/explanation/e-hardening.md index 9c2af5f5..903276e4 100644 --- a/docs/explanation/e-hardening.md +++ b/docs/explanation/e-hardening.md @@ -5,7 +5,7 @@ a secure deployment of [Charmed Apache Kafka](https://github.com/canonical/kafka The document is divided into the following sections: 1. Environment, outlining the recommendation for deploying a secure environment -2. Applications, outlining the product features that enable a secure deployment of a Kafka cluster +2. Applications, outlining the product features that enable a secure deployment of an Apache Kafka cluster 3. Additional resources, providing any further information about security and compliance ## Environment @@ -58,7 +58,7 @@ Juju user credentials must be stored securely and rotated regularly to limit the In the following we provide guidance on how to harden your deployment using: 1. Operating System -2. Kafka and ZooKeeper Security Upgrades +2. Apache Kafka and Apache ZooKeeper Security Upgrades 3. Encryption 4. Authentication 5. Monitoring and Auditing @@ -68,7 +68,7 @@ In the following we provide guidance on how to harden your deployment using: Charmed Apache Kafka and Charmed Apache ZooKeeper currently run on top of Ubuntu 22.04. Deploy a [Landscape Client Charm](https://charmhub.io/landscape-client?) in order to connect the underlying VM to a Landscape User Account to manage security upgrades and integrate Ubuntu Pro subscriptions. -### Kafka and ZooKeeper Security Upgrades +### Apache Kafka and Apache ZooKeeper Security Upgrades Charmed Apache Kafka and Charmed Apache ZooKeeper operators install a pinned revision of the [Charmed Apache Kafka snap](https://snapcraft.io/charmed-kafka) and [Charmed Apache ZooKeeper snap](https://snapcraft.io/charmed-zookeeper), respectively, in order to provide reproducible and secure environments. @@ -79,7 +79,7 @@ For more information on how to refresh the charm, see the [how-to upgrade](https ### Encryption Charmed Apache Kafka must be deployed with encryption enabled. -To do that, you need to relate Kafka and ZooKeeper charms to one of the TLS certificate operator charms. +To do that, you need to relate Apache Kafka and Apache ZooKeeper charms to one of the TLS certificate operator charms. Please refer to the [Charming Security page](https://charmhub.io/topics/security-with-x-509-certificates) for more information on how to select the right certificate provider for your use-case. @@ -107,7 +107,7 @@ Refer to How-To user guide for more information on: * [how to integrate the Charmed Apache Kafka deployment with COS](/t/charmed-kafka-how-to-enable-monitoring/10283) * [how to customise the alerting rules and dashboards](/t/charmed-kafka-documentation-how-to-integrate-custom-alerting-rules-and-dashboards/13431) -External user access to Kafka is logged to the `kafka-authorizer.log` that is pushes to [Loki endpoint](https://charmhub.io/loki-k8s) and exposed via [Grafana](https://charmhub.io/grafana), both components being part of the COS stack. +External user access to Apache Kafka is logged to the `kafka-authorizer.log` that is pushes to [Loki endpoint](https://charmhub.io/loki-k8s) and exposed via [Grafana](https://charmhub.io/grafana), both components being part of the COS stack. Access denials are logged at INFO level, whereas allowed accesses are logged at DEBUG level. Depending on the auditing needs, customize the logging level either for all logs via the [`log_level`](https://charmhub.io/kafka/configurations?channel=3/stable#log_level) config option or only tune the logging level of the `authorizerAppender` in the `log4j.properties` file. Refer to the Reference documentation, for more information about diff --git a/docs/explanation/e-security.md b/docs/explanation/e-security.md index c404d707..76992516 100644 --- a/docs/explanation/e-security.md +++ b/docs/explanation/e-security.md @@ -3,26 +3,30 @@ This document describes cryptography used by Charmed Apache Kafka. ## Resource checksums + Every version of the Charmed Apache Kafka and Charmed Apache ZooKeeper operators install a pinned revision of the Charmed Apache Kafka snap and Charmed Apache ZooKeeper, respectively, in order to provide reproducible and secure environments. The [Charmed Apache Kafka snap](https://snapstore.io/charmed-kafka) and [Charmed Apache ZooKeeper snap](https://snapstore.io/charmed-zookeeper) package the -Kafka and ZooKeeper workload together with +Apache Kafka and Apache ZooKeeper workload together with a set of dependencies and utilities required by the lifecycle of the operators (see [Charmed Apache Kafka snap contents](https://github.com/canonical/charmed-kafka-snap/blob/3/edge/snap/snapcraft.yaml) and [Charmed Apache ZooKeeper snap contents](https://github.com/canonical/charmed-zookeeper-snap/blob/3/edge/snap/snapcraft.yaml)). Every artifact bundled into the Charmed Apache Kafka snap and Charmed Apache ZooKeeper snap is verified against their SHA256 or SHA512 checksum after download. ## Sources verification + Charmed Apache Kafka sources are stored in: * GitHub repositories for snaps, rocks and charms -* LaunchPad repositories for the Kafka and ZooKeeper upstream fork used for building their respective distributions +* LaunchPad repositories for the Apache Kafka and Apache ZooKeeper upstream fork used for building their respective distributions ### LaunchPad + Distributions are built using private repositories only, hosted as part of the [SOSS namespace](https://launchpad.net/soss) to eventually integrate with Canonical's standard process for fixing CVEs. Branches associated with releases are mirrored to a public repository, hosted in the [Data Platform namespace](https://launchpad.net/~data-platform) to also provide the community with the patched source code. ### GitHub + All Charmed Apache Kafka and Charmed Apache ZooKeeper artifacts are published and released programmatically using release pipelines implemented via GitHub Actions. Distributions are published as both GitHub and LaunchPad releases via the [central-uploader repository](https://github.com/canonical/central-uploader), while @@ -35,18 +39,19 @@ All repositories in GitHub are set up with branch protection rules, requiring: * developers to sign the [Canonical Contributor License Agreement (CLA)](https://ubuntu.com/legal/contributors) ## Encryption -The Charmed Apache Kafka operator can be used to deploy a secure Kafka cluster that provides encryption-in-transit capabilities out of the box + +The Charmed Apache Kafka operator can be used to deploy a secure Apache Kafka cluster that provides encryption-in-transit capabilities out of the box for: * Interbroker communications -* ZooKeeper connection +* Apache ZooKeeper connection * External client connection -In order to set up secure connection Kafka and ZooKeeper applications need to be integrated with TLS Certificate Provider charms, e.g. +In order to set up secure connection Apache Kafka and Apache ZooKeeper applications need to be integrated with TLS Certificate Provider charms, e.g. `self-signed-certificates` operator. CSRs are generated for every unit using `tls_certificates_interface` library that uses `cryptography` python library to create X.509 compatible certificates. The CSR is signed by the TLS Certificate Provider and returned to the units, and -stored in a password-protected Keystore file. The password of the Keystore is stored in Juju secrets starting from revision 168 on Kafka -and revision 130 on ZooKeeper. The relation provides also the certificate for the CA to be loaded in a password-protected Truststore file. +stored in a password-protected Keystore file. The password of the Keystore is stored in Juju secrets starting from revision 168 on Apache Kafka +and revision 130 on Apache ZooKeeper. The relation provides also the certificate for the CA to be loaded in a password-protected Truststore file. When encryption is enabled, hostname verification is turned on for client connections, including inter-broker communication. Cipher suite can be customized by providing a list of allowed cipher suite to be used for external clients and zookeeper connections, using the charm config options @@ -56,39 +61,43 @@ for more information. Encryption at rest is currently not supported, although it can be provided by the substrate (cloud or on-premises). ## Authentication + In the Charmed Apache Kafka solution, authentication layers can be enabled for -1. ZooKeeper connections -2. Kafka inter broker communication -3. Kafka Clients +1. Apache ZooKeeper connections +2. Apache Kafka inter broker communication +3. Apache Kafka Clients + +### Apache Kafka authentication to Apache ZooKeeper -### Kafka authentication to ZooKeeper -Authentication to ZooKeeper is based on Simple Authentication and Security Layer (SASL) using digested MD5 hashes of -username and password, and implemented both for client-server (with Kafka) and server-server communication. -Username and passwords are exchanged using peer relations among ZooKeeper units and using normal relations between Kafka and ZooKeeper. -Juju secrets are used for exchanging credentials starting from revision 168 on Kafka and revision 130 on ZooKeeper. +Authentication to Apache ZooKeeper is based on Simple Authentication and Security Layer (SASL) using digested MD5 hashes of +username and password, and implemented both for client-server (with Apache Kafka) and server-server communication. +Username and passwords are exchanged using peer relations among Apache ZooKeeper units and using normal relations between Apache Kafka and Apache ZooKeeper. +Juju secrets are used for exchanging credentials starting from revision 168 on Apache Kafka and revision 130 on Apache ZooKeeper. -Username and password for the different users are stored in ZooKeeper servers in a JAAS configuration file in plain format. +Username and password for the different users are stored in Apache ZooKeeper servers in a JAAS configuration file in plain format. Permission on the file is restricted to the root user. -### Kafka Inter-broker authentication +### Apache Kafka Inter-broker authentication + Authentication among brokers is based on SCRAM-SHA-512 protocol. Username and passwords are exchanged via peer relations, using Juju secrets from revision 168 on Charmed Apache Kafka. -Kafka username and password used by brokers to authenticate one another are stored -both in a ZooKeeper zNode and in a JAAS configuration file in the Kafka server in plain format. +Apache Kafka username and password used by brokers to authenticate one another are stored +both in an Apache ZooKeeper zNode and in a JAAS configuration file in the Apache Kafka server in plain format. The file needs to be readable and -writable by root (as it is created by the charm), and be readable by the `snap_daemon` user running the Kafka server snap commands. +writable by root (as it is created by the charm), and be readable by the `snap_daemon` user running the Apache Kafka server snap commands. + +### Client authentication to Apache Kafka -### Client authentication to Kafka -Clients can authenticate to Kafka using: +Clients can authenticate to Apache Kafka using: 1. username and password exchanged using SCRAM-SHA-512 protocols 2. client certificates or CAs (mTLS) -When using SCRAM, username and passwords are stored in ZooKeeper to be used by the Kafka processes, +When using SCRAM, username and passwords are stored in Apache ZooKeeper to be used by the Apache Kafka processes, in peer-relation data to be used by the Kafka charm and in external relation to be shared with client applications. -Starting from revision 168 on Kafka, Juju secrets are used for storing the credentials in place of plain unencrypted text. +Starting from revision 168 on Charmed Apache Kafka, Juju secrets are used for storing the credentials in place of plain unencrypted text. -When using mTLS, client certificates are loaded into a `tls-certificates` operator and provided to the Kafka charm via the plain-text unencrypted +When using mTLS, client certificates are loaded into a `tls-certificates` operator and provided to the Apache Kafka charm via the plain-text unencrypted relation. Certificates are stored in the password-protected Truststore file. \ No newline at end of file diff --git a/docs/how-to/h-cluster-migration.md b/docs/how-to/h-cluster-migration.md index 37b0429f..2111202a 100644 --- a/docs/how-to/h-cluster-migration.md +++ b/docs/how-to/h-cluster-migration.md @@ -1,21 +1,24 @@ # Cluster migration using MirrorMaker2.0 -This How-To guide covers executing a cluster migration to a Charmed Apache Kafka deployment using MirrorMaker2.0, running as a process on each of the Juju units in an active/passive setup, where MirrorMaker will act as a consumer from an existing cluster, and a producer to the Charmed Apache Kafka cluster. In parallel (one process on each unit), data and consumer offsets for all existing topics will be synced one-way until both clusters are in-sync, with all data replicated across both in real-time. +This How-To guide covers executing a cluster migration to a Charmed Apache Kafka deployment using MirrorMaker2.0. + +The MirrorMaker runs on the new (destination) cluster as a process on each Juju unit in an active/passive setup. It acts as a consumer from an existing cluster, and a producer to the Charmed Apache Kafka cluster. Data and consumer offsets for all existing topics will be synced **one-way** in parallel (one process on each unit) until both clusters are in-sync, with all data replicated across both in real-time. ## MirrorMaker2 overview Under the hood, MirrorMaker uses Kafka Connect source connectors to replicate data, those being the following: + - **MirrorSourceConnector** - replicates topics from an original cluster to a new cluster. It also replicates ACLs and is necessary for the MirrorCheckpointConnector to run - **MirrorCheckpointConnector** - periodically tracks offsets. If enabled, it also synchronizes consumer group offsets between the original and new clusters - **MirrorHeartbeatConnector** - periodically checks connectivity between the original and new clusters -Together, they are used for cluster->cluster replication of topics, consumer groups, topic configuration and ACLs, preserving partitioning and consumer offsets. For more detail on MirrorMaker internals, consult the [MirrorMaker README.md](https://github.com/apache/kafka/blob/trunk/connect/mirror/README.md) and the [MirrorMaker 2.0 KIP](https://cwiki.apache.org/confluence/display/KAFKA/KIP-382%3A+MirrorMaker+2.0). In practice, it lets sync data one-way between two live Kafka clusters with minimal impact on the ongoing production service. +Together, they are used for cluster->cluster replication of topics, consumer groups, topic configuration and ACLs, preserving partitioning and consumer offsets. For more detail on MirrorMaker internals, consult the [MirrorMaker README.md](https://github.com/apache/kafka/blob/trunk/connect/mirror/README.md) and the [MirrorMaker 2.0 KIP](https://cwiki.apache.org/confluence/display/KAFKA/KIP-382%3A+MirrorMaker+2.0). In practice, it lets sync data one-way between two live Apache Kafka clusters with minimal impact on the ongoing production service. In short, MirrorMaker runs as a distributed service on the new cluster, and consumes all topics, groups and offsets from the still-active original cluster in production, before producing them one-way to the new cluster that may not yet be serving traffic to external clients. The original, in-production cluster is referred to as an ‘active’ cluster, and the new cluster still waiting to serve external clients is ‘passive’. The MirrorMaker service can be configured using much the same configuration as available for Kafka Connect. ## Pre-requisites -- An existing Kafka cluster to migrate from +- An existing Apache Kafka cluster to migrate from - A bootstrapped Juju VM machine cloud running Charmed Apache Kafka to migrate to - A tutorial on how to set-up a Charmed Apache Kafka deployment can be found as part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-overview/10571) - The CLI tool `yq` - https://github.com/mikefarah/yq @@ -57,7 +60,9 @@ OLD_SERVERS OLD_SASL_JAAS_CONFIG ``` -> **NOTE** - If using `SSL` or `SASL_SSL` authentication, review the configuration options supported by Kafka Connect in the [Apache Kafka documentation](https://kafka.apache.org/documentation/#connectconfigs) +[note] +If using `SSL` or `SASL_SSL` authentication, review the configuration options supported by Kafka Connect in the [Apache Kafka documentation](https://kafka.apache.org/documentation/#connectconfigs) +[/note] ## Generating `mm2.properties` file on the Charmed Apache Kafka cluster @@ -110,7 +115,7 @@ old.consumer.isolation.level=read_committed new.consumer.isolation.level=read_committed # Specific Connector configuration for ensuring Exactly-Once-Delivery (EOD) -# NOTE - EOD support guarantees released with Kafka 3.5.0 so some of these options may not work as expected +# NOTE - EOD support guarantees released with Apache Kafka 3.5.0 so some of these options may not work as expected old.producer.enable.idempotence=true new.producer.enable.idempotence=true old.producer.acks=all @@ -139,7 +144,7 @@ juju ssh kafka/ sudo -i 'cd /snap/charmed-kafka/current/opt/kafka/bin && KAF ## Monitoring and validating data replication -The migration process can be monitored using built-in Kafka bin-commands on the original cluster. In the Charmed Apache Kafka cluster, these bin-commands are also mapped to snap commands on the units (e.g `charmed-kafka.get-offsets` or `charmed-kafka.topics`). +The migration process can be monitored using built-in Apache Kafka bin-commands on the original cluster. In the Charmed Apache Kafka cluster, these bin-commands are also mapped to snap commands on the units (e.g `charmed-kafka.get-offsets` or `charmed-kafka.topics`). To monitor the current consumer offsets, run the following on the original cluster being migrated from: diff --git a/docs/how-to/h-create-mtls-client-credentials.md b/docs/how-to/h-create-mtls-client-credentials.md index 4ac62794..9a9e2b0a 100644 --- a/docs/how-to/h-create-mtls-client-credentials.md +++ b/docs/how-to/h-create-mtls-client-credentials.md @@ -1,6 +1,7 @@ # Create mTLS client credentials Requirements: + - Charmed Apache Kafka cluster up and running - [Admin credentials](./h-manage-app.md) - [Encryption enabled](./h-enable-encryption.md) @@ -8,7 +9,7 @@ Requirements: - [`charmed-kafka` snap](https://snapcraft.io/charmed-kafka) installed - [jq](https://snapcraft.io/jq) installed -Goal: Create mTLS credentials for a client application to be able to connect to the Kafka cluster. +This guide includes step by step instructions on how to create mTLS credentials for a client application to be able to connect to a Charmed Apache Kafka cluster. ## Authentication @@ -16,11 +17,11 @@ Goal: Create mTLS credentials for a client application to be able to connect to # ---------- Environment SNAP_KAFKA_PATH=/var/snap/charmed-kafka/current/etc/kafka -# Kafka ports +# Apache Kafka ports KAFKA_SASL_PORT=9093 KAFKA_MTLS_PORT=9094 -# Kafka servers +# Apache Kafka servers KAFKA_SERVERS_SASL=$KAFKA_SASL_PORT KAFKA_SERVERS_MTLS=$KAFKA_MTLS_PORT diff --git a/docs/how-to/h-deploy.md b/docs/how-to/h-deploy.md index 47742b51..7b3c87ed 100644 --- a/docs/how-to/h-deploy.md +++ b/docs/how-to/h-deploy.md @@ -13,7 +13,7 @@ If you already have a Juju controller and/or a Juju model, you can skip the asso ## Juju controller setup -Before deploying Kafka, make sure you have a Juju controller accessible from +Before deploying Charmed Apache Kafka, make sure you have a Juju controller accessible from your local environment using the [Juju client snap](https://snapcraft.io/juju). The properties of your current controller can be listed using `juju show-controller`. @@ -24,7 +24,9 @@ The cloud information can be retrieved with the following command juju show-controller | yq '.[].details.cloud' ``` -> **IMPORTANT** If the cloud is `k8s`, please refer to the [Charmed Apache Kafka K8s documentation](/t/charmed-kafka-k8s-documentation/10296) instead. +[note] +**IMPORTANT** If the cloud is `k8s`, please refer to the [Charmed Apache Kafka K8s documentation](/t/charmed-kafka-k8s-documentation/10296) instead. +[/note] You can find more information on how to bootstrap and configure a controller for different clouds [here](https://juju.is/docs/juju/manage-controllers#heading--bootstrap-a-controller). @@ -51,20 +53,24 @@ can be obtained by juju show-model | yq '.[].type' ``` -> **IMPORTANT** If the model is `k8s`, please refer to the [Charmed Apache Kafka K8s documentation](https://discourse.charmhub.io/t/charmed-kafka-k8s-documentation/10296) instead. +[note] +**IMPORTANT** If the model is `k8s`, please refer to the [Charmed Apache Kafka K8s documentation](https://discourse.charmhub.io/t/charmed-kafka-k8s-documentation/10296) instead. +[/note] ## Deploy Charmed Apache Kafka and Charmed Apache ZooKeeper -The Kafka and ZooKeeper charms can both be deployed as follows: +The Charmed Apache Kafka and Charmed Apache ZooKeeper charms can both be deployed as follows: ```shell $ juju deploy kafka --channel 3/stable -n --trust $ juju deploy zookeeper --channel 3/stable -n ``` -where `` and `` – the number of units to deploy for Kafka and ZooKeeper. We recommend values of at least `3` and `5` respectively. +where `` and `` – the number of units to deploy for Apache Kafka and Apache ZooKeeper. We recommend values of at least `3` and `5` respectively. -> **NOTE** The `--trust` option is needed for the Kafka application if NodePort is used. For more information about the trust options usage, see the [Juju documentation](/t/5476#heading--trust-an-application-with-a-credential). +[note] + The `--trust` option is needed for the Apache Kafka application if NodePort is used. For more information about the trust options usage, see the [Juju documentation](/t/5476#heading--trust-an-application-with-a-credential). +[/note] After this, it is necessary to connect them: @@ -77,14 +83,14 @@ should be ready to be used. ## (Optional) Create an external admin users -Charmed Apache Kafka aims to follow the _secure by default_ paradigm. As a consequence, after being deployed the Kafka cluster +Charmed Apache Kafka aims to follow the _secure by default_ paradigm. As a consequence, after being deployed the Apache Kafka cluster won't expose any external listener. In fact, ports are only opened when client applications are related, also depending on the protocols to be used. Please refer to [this table](/t/charmed-kafka-documentation-reference-listeners/13264) for more information about the available listeners and protocols. It is however generally useful for most of the use-cases to create a first admin user -to be used to manage the Kafka cluster (either internally or externally). +to be used to manage the Apache Kafka cluster (either internally or externally). To create an admin user, deploy the [Data Integrator Charm](https://charmhub.io/data-integrator) with `extra-user-roles` set to `admin` @@ -93,7 +99,7 @@ To create an admin user, deploy the [Data Integrator Charm](https://charmhub.io/ juju deploy data-integrator --channel stable --config topic-name=test-topic --config extra-user-roles=admin ``` -and relate to the Kafka charm +and relate to the Apache Kafka charm ```shell juju relate data-integrator kafka diff --git a/docs/how-to/h-enable-encryption.md b/docs/how-to/h-enable-encryption.md index 0fbe6fe6..9278b1a6 100644 --- a/docs/how-to/h-enable-encryption.md +++ b/docs/how-to/h-enable-encryption.md @@ -1,9 +1,9 @@ # How to enable encryption -## Deploy a TLS Provider charm +The Apache Kafka and Apache ZooKeeper charms implements the Requirer side of the [`tls-certificates/v1`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/tls_certificates/v1/README.md) charm relation. Therefore, any charm implementing the Provider side could be used. +To enable encryption, you should first deploy a TLS certificates Provider charm. -To enable encryption, you should first deploy a TLS certificates Provider charm. The Kafka and ZooKeeper charms implements the Requirer side of the [`tls-certificates/v1`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/tls_certificates/v1/README.md) charm relation. -Therefore, any charm implementing the Provider side could be used. +## Deploy a TLS Provider charm One possible option, suitable for testing, could be to use the `self-signed-certificates`, although this setup is however not recommended for production clusters. @@ -18,7 +18,9 @@ juju config self-signed-certificates ca-common-name="Test CA" Please refer to [this post](https://charmhub.io/topics/security-with-x-509-certificates) for an overview of the TLS certificates Providers charms and some guidance on how to choose the right charm for your use-case. -## Enable TLS on Kafka and ZooKeeper +## Relate the charms + +Now we need to add relation between a TLS certificates Provider charm and the Apache Kafka and Apache ZooKeper charms: ``` juju relate zookeeper @@ -27,7 +29,9 @@ juju relate kafka:certificates where `` is the name of the TLS certificate provider charm deployed. -> **Note** If Kafka and ZooKeeper are already related, they will start renegotiating the relation to provide each other certificates and enable/open to correct ports/connections. Otherwise relate them after the both relations with the `` . +[note] +If Apache Kafka and Apache ZooKeeper are already related, they will start renegotiating the relation to provide each other certificates and enable/open to correct ports/connections. Otherwise relate them after the both relations with the `` . +[/note] ## Manage keys diff --git a/docs/how-to/h-enable-monitoring.md b/docs/how-to/h-enable-monitoring.md index 81d1cb71..97a085a7 100644 --- a/docs/how-to/h-enable-monitoring.md +++ b/docs/how-to/h-enable-monitoring.md @@ -80,9 +80,9 @@ juju run grafana/leader get-admin-password --model : @@ -90,7 +90,7 @@ juju config kafka log_level= Possible values are `ERROR`, `WARNING`, `INFO`, `DEBUG`. -### ZooKeeper +### Apache ZooKeeper ``` juju config kafka log-level= diff --git a/docs/how-to/h-enable-oauth.md b/docs/how-to/h-enable-oauth.md index dd3260b9..13c62988 100644 --- a/docs/how-to/h-enable-oauth.md +++ b/docs/how-to/h-enable-oauth.md @@ -2,7 +2,7 @@ Versions used for this integration example: - LXD (v5.21.1) - MicroK8s (v1.28.10) -- Kafka charm: built from [this feature PR](https://github.com/canonical/kafka-operator/pull/168), which adds Hydra integration +- Apache Kafka charm: built from [this feature PR](https://github.com/canonical/kafka-operator/pull/168), which adds Hydra integration ## Initial deployment @@ -35,7 +35,7 @@ $ juju offer admin/iam.hydra:oauth $ juju offer admin/iam.self-signed-certificates:certificates ``` -Kafka setup: +Apache Kafka setup: ```bash # On the lxd controller @@ -52,7 +52,7 @@ $ juju integrate kafka:certificates self-signed-certificates $ juju integrate zookeeper self-signed-certificates ``` -Once everything is settled, integrate Kafka and Hydra: +Once everything is settled, integrate Apache Kafka and Hydra: ```bash # On the lxd model @@ -83,4 +83,4 @@ $ curl https://10.64.140.44/iam-hydra/oauth2/token -k -u eeec2a88-52bf-46e6-85bf {"access_token":"ory_at_b2pcwnwTpCVHPbxoU7L45isbRJhNdBbn91y4Ex0YNrA.easwGEfsTJ7VnNfER2svIMHwen5ZzNXaVZm8i7QdLLg","expires_in":3599,"scope":"profile","token_type":"bearer"} ``` -With this token, a client can now authenticate on Kafka using oAuth listeners. \ No newline at end of file +With this token, a client can now authenticate on Apache Kafka using oAuth listeners. \ No newline at end of file diff --git a/docs/how-to/h-integrate-alerts-dashboards.md b/docs/how-to/h-integrate-alerts-dashboards.md index addd5f3d..33456192 100644 --- a/docs/how-to/h-integrate-alerts-dashboards.md +++ b/docs/how-to/h-integrate-alerts-dashboards.md @@ -66,4 +66,4 @@ As for the dashboards, they should be available in the Grafana interface. ## Conclusion -In this guide, we enabled monitoring on a Kafka deployment and integrated alert rules and dashboards by syncing a git repository to the COS stack. \ No newline at end of file +In this guide, we enabled monitoring on a Charmed Apache Kafka deployment and integrated alert rules and dashboards by syncing a git repository to the COS stack. \ No newline at end of file diff --git a/docs/how-to/h-manage-units.md b/docs/how-to/h-manage-units.md index f84ab2fe..ecde1bcb 100644 --- a/docs/how-to/h-manage-units.md +++ b/docs/how-to/h-manage-units.md @@ -4,7 +4,7 @@ Unit management guide for scaling and running admin utility scripts. ## Replication and Scaling -Increasing the number of Kafka brokers can be achieved by adding more units +Increasing the number of Apache Kafka brokers can be achieved by adding more units to the Charmed Apache Kafka application, for example: ```shell @@ -13,12 +13,12 @@ juju add-unit kafka -n For more information on how to manage units, please refer to the [Juju documentation](https://juju.is/docs/juju/manage-units) -It is important to note that when adding more units, the Kafka cluster will not +It is important to note that when adding more units, the Apache Kafka cluster will not *automatically* rebalance existing topics and partitions. New storage and new brokers will be used only when new topics and new partitions are created. Partition reassignment can still be done manually by the admin user by using the -`charmed-kafka.reassign-partitions` Kafka bin utility script. Please refer to +`charmed-kafka.reassign-partitions` Apache Kafka bin utility script. Please refer to its documentation for more information. > **IMPORTANT** Scaling down is currently not supported in the charm automation. @@ -26,22 +26,22 @@ its documentation for more information. > to make sure the decommissioned units do not hold any data, **your cluster may > suffer to data loss**. -## Running Kafka admin utility scripts +## Running Apache Kafka admin utility scripts Apache Kafka ships with `bin/*.sh` commands to do various administrative tasks such as: * `bin/kafka-config.sh` to update cluster configuration * `bin/kafka-topics.sh` for topic management -* `bin/kafka-acls.sh` for management of ACLs of Kafka users +* `bin/kafka-acls.sh` for management of ACLs of Apache Kafka users -Please refer to the upstream [Kafka project](https://github.com/apache/kafka/tree/trunk/bin), -for a full list of the bash commands available in Kafka distributions. Also, you can +Please refer to the upstream [Apache Kafka project](https://github.com/apache/kafka/tree/trunk/bin), +for a full list of the bash commands available in Apache Kafka distributions. Also, you can use `--help` argument for printing a short summary of the argument for a given bash command. The most important commands are also exposed via the [Charmed Apache Kafka snap](https://snapcraft.io/charmed-kafka), accessible via `charmed-kafka.`. Please refer to [this table](/t/charmed-kafka-documentation-reference-snap-entrypoints/13263) for -more information about the mapping between the Kafka bin commands and the snap entrypoints. +more information about the mapping between the Apache Kafka bin commands and the snap entrypoints. > **IMPORTANT** Before running bash scripts, make sure that some listeners have been correctly > opened by creating appropriate integrations. Please refer to [this table](/t/charmed-kafka-documentation-reference-listeners/13264) for more @@ -51,12 +51,12 @@ more information about the mapping between the Kafka bin commands and the snap e To run most of the scripts, you need to provide: -1. the Kafka service endpoints, generally referred to as *bootstrap servers* +1. the Apache Kafka service endpoints, generally referred to as *bootstrap servers* 2. authentication information -### Juju admins of the Kafka deployment +### Juju admins of the Apache Kafka deployment -For Juju admins of the Kafka deployment, the bootstrap servers information can +For Juju admins of the Apache Kafka deployment, the bootstrap servers information can be obtained using ``` @@ -64,25 +64,25 @@ BOOTSTRAP_SERVERS=$(juju run kafka/leader get-admin-credentials | grep "bootstra ``` Admin client authentication information is stored in the -`/var/snap/charmed-kafka/common/etc/kafka/client.properties` file present on every Kafka +`/var/snap/charmed-kafka/common/etc/kafka/client.properties` file present on every Apache Kafka broker. The content of the file can be accessed using ``` juju ssh kafka/leader `cat /etc/kafka/client.properties` ``` -This file can be provided to the Kafka bin commands via the `--command-config` +This file can be provided to the Apache Kafka bin commands via the `--command-config` argument. Note that `client.properties` may also refer to other files ( e.g. truststore and keystore for TLS-enabled connections). Those files also need to be accessible and correctly specified. -Commands can also be run within a Kafka broker, since both the authentication +Commands can also be run within an Apache Kafka broker, since both the authentication file (along with the truststore if needed) and the Charmed Apache Kafka snap are already present. #### Example (listing topics) -For instance, in order to list the current topics on the Kafka cluster, you can run: +For instance, in order to list the current topics on the Apache Kafka cluster, you can run: ``` juju ssh kafka/leader 'charmed-kafka.topics --bootstrap-server $BOOTSTRAP_SERVERS --list --command-config /var/snap/charmed-kafka/common/etc/kafka/client.properties' diff --git a/docs/how-to/h-upgrade.md b/docs/how-to/h-upgrade.md index 99ce316f..92f03050 100644 --- a/docs/how-to/h-upgrade.md +++ b/docs/how-to/h-upgrade.md @@ -1,19 +1,22 @@ # How to upgrade between minor versions -> **Note** This feature is available on Charmed Apache Kafka and Charmed Apache ZooKeeper from revisions 134 and 103, respectively. Upgrade from previous versions is **not supported**, although possible (see e.g. [here](https://github.com/deusebio/kafka-pre-upgrade-patch) for a custom example). +[note] +This feature is available on Charmed Apache Kafka and Charmed Apache ZooKeeper from revisions 134 and 103, respectively. Upgrade from previous versions is **not supported**, although possible (see [example](https://github.com/deusebio/kafka-pre-upgrade-patch)). +[/note] -Charm upgrades can include both upgrades of operator code (e.g. the revision used by the charm) and/or the workload version. Note that since the charm code pins a particular version of the workload, a charm upgrade may or may not involve also a workload version upgrade. In general, the following guide only applies for in-place upgrades that involve (at most) minor version upgrade of Kafka workload, e.g. between Kafka 3.4.x to 3.5.x. Major workload upgrades are generally **NOT SUPPORTED**, and they should be carried out using full cluster-to-cluster migrations. Please refer to the how-to guide about cluster migration [how-to guide about cluster migration](/t/charmed-kafka-how-to-cluster-migration/10951) for more information on how this can be achieved. +Charm upgrades can include both upgrades of operator code (e.g. the revision used by the charm) and/or the workload version. Note that since the charm code pins a particular version of the workload, a charm upgrade may or may not involve also a workload version upgrade. -Perform other extraordinary operations on the Kafka cluster while upgrading is not supported. As an example, these may be (but not limited to) the following: +In general, the following guide only applies for in-place upgrades that involve (at most) minor version upgrade of Apache Kafka workload, e.g. between Apache Kafka 3.4.x to 3.5.x. Major workload upgrades are generally **NOT SUPPORTED**, and they should be carried out using [full cluster-to-cluster migrations](/t/charmed-kafka-how-to-cluster-migration/10951). +While upgrading an Apache Kafka cluster, do not perform any other major operations, including, but no limited to, the following: 1. Adding or removing units 2. Creating or destroying new relations 3. Changes in workload configuration -4. Upgrading other connected applications (e.g. ZooKeeper) +4. Upgrading other connected applications (e.g. Apache ZooKeeper) The concurrency with other operations is not supported, and it can lead the cluster into inconsistent states. -## Minor upgrade process overview +## Minor upgrade process When performing an in-place upgrade process, the full process is composed by the following high-level steps: @@ -22,9 +25,9 @@ When performing an in-place upgrade process, the full process is composed by the 3. **Upgrade** the charm and/or the workload. Once started, all units in a cluster will refresh the charm code and undergo a workload restart/update. The upgrade will be aborted if the unit upgrade has failed, requiring the admin user to rollback. 4. **Post-upgrade checks** to make sure all units are in the proper state and the cluster is healthy. -## Step 1: Collect +### Step 1: Collect -The first step is to record the revisions of the running application, as a safety measure for a rollback action if needed. To accomplish this, simply run the `juju status` command and look for the revisions of the deployed Kafka and ZooKeeper applications. You can also retrieve this with the following command (that requires [`yq`](https://snapcraft.io/install/yq/ubuntu) to be installed): +The first step is to record the revisions of the running application, as a safety measure for a rollback action if needed. To accomplish this, simply run the `juju status` command and look for the revisions of the deployed Apache Kafka and Apache ZooKeeper applications. You can also retrieve this with the following command (that requires [yq](https://snapcraft.io/install/yq/ubuntu) to be installed): ```shell KAFKA_CHARM_REVISION=$(juju status --format json | yq .applications..charm-rev) @@ -33,9 +36,10 @@ ZOOKEEPER_CHARM_REVISION=$(juju status --format json | yq .applications.` and `}` placeholder appropriately, e.g. `kafka` and `zookeeper`. -## Step 2: Prepare +### Step 2: Prepare + +Before upgrading, the charm needs to perform some preparatory tasks to define the upgrade plan. -Before upgrading, the charm needs to perform some preparatory tasks to define the upgrade plan. To do so, run the `pre-upgrade-check` action against the leader unit: ```shell @@ -44,26 +48,28 @@ juju run kafka/leader pre-upgrade-check Make sure that the output of the action is successful. -> **Note**: This action must be run before Charmed Apache Kafka upgrades. +[note] +This action must be run before Charmed Apache Kafka upgrades. +[/note] The action will also configure the charm to minimize high-availability reduction and ensure a safe upgrade process. After successful execution, the charm is ready to be upgraded. -## Step 3: Upgrade +### Step 3: Upgrade Use the [`juju refresh`](https://juju.is/docs/juju/juju-refresh) command to trigger the charm upgrade process. Note that the upgrade can be performed against: -* selected channel/track, therefore upgrading to the latest revision published on that track +* selected channel/track, therefore upgrading to the latest revision published on that track: ```shell juju refresh kafka --channel 3/edge ``` -* selected revision +* selected revision: ```shell juju refresh kafka --revision= ``` -* a local charm file +* a local charm file: ```shell juju refresh kafka --path ./kafka_ubuntu-22.04-amd64.charm @@ -71,7 +77,9 @@ Note that the upgrade can be performed against: When issuing the commands, all units will refresh (i.e. receive new charm content), and the upgrade charm event will be fired. The charm will take care of executing an update (if required) and a restart of the workload one unit at a time to not lose high-availability. -> **Note** On Juju<3.4.4, the refresh operation may transitively fail because of [this issue](https://bugs.launchpad.net/juju/+bug/2053242) on Juju. The failure will resolve itself and the upgrade process will resume normally in few minutes (as soon as the new charm has been downloaded and the upgrade events are appropriately emitted. +[note] +On Juju<3.4.4, the refresh operation may transitively fail because of [this issue](https://bugs.launchpad.net/juju/+bug/2053242) on Juju. The failure will resolve itself and the upgrade process will resume normally in few minutes (as soon as the new charm has been downloaded and the upgrade events are appropriately emitted. +[/note] The upgrade process can be monitored using `juju status` command, where the message of the units will provide information about which units have been upgraded already, which unit is currently upgrading and which units are waiting for the upgrade to be triggered, as shown below: @@ -90,17 +98,17 @@ kafka/2 active idle 5 10.193.41.221 Upgrade completed ``` -### Failing upgrade +#### Failing upgrade Before upgrading the unit, the charm will check whether the upgrade can be performed, e.g. this may mean: -1. Checking that the upgrade from the previous charm revision and Kafka version is possible. -2. Checking that other external applications that Kafka depends on (e.g. ZooKeeper) are running the correct version. +1. Checking that the upgrade from the previous charm revision and Apache Kafka version is possible. +2. Checking that other external applications that Apache Kafka depends on (e.g. Apache ZooKeeper) are running the correct version. Note that these checks are only possible after a refresh of the charm code, and therefore cannot be done upfront (e.g. during the `pre-upgrade-checks` action). If some of these checks fail, the upgrade will be aborted. When this happens, the workload may still be operating (as only the operator may have failed) but we recommend to rollback the upgrade as soon as possible. -To roll back the upgrade, re-run steps 2 and 3, using the revision taken in step 1, i.e. +To roll back the upgrade, re-run steps 2 and 3, using the revision taken in step 1: ```shell juju run kafka/leader pre-upgrade-check @@ -110,9 +118,9 @@ juju refresh kafka --revision=${KAFKA_CHARM_REVISION} We strongly recommend to also retrieve the full set of logs with `juju debug-log`, to extract insights on why the upgrade failed. -## ZooKeeper upgrade +## Apache ZooKeeper upgrade -Although the previous steps focused on upgrading Kafka, the same process can also be applied to ZooKeeper. However, for revisions prior to XXX, a patch needs to be applied before running the aforementioned process. The ZooKeeper process, as part of its operations, overwrites the `zoo.cfg` pinning the snap revision for the `dynamicConfigFile`. This may create problems in the upgrade if `snapd` removes the previous revision once the snap is refreshed. To prevent this, it is sufficient to replace the `` with `current`. +Although the previous steps focused on upgrading Apache Kafka, the same process can also be applied to Apache ZooKeeper. However, for revisions prior to XXX, a patch needs to be applied before running the aforementioned process. The Apache ZooKeeper process, as part of its operations, overwrites the `zoo.cfg` pinning the snap revision for the `dynamicConfigFile`. This may create problems in the upgrade if `snapd` removes the previous revision once the snap is refreshed. To prevent this, it is sufficient to replace the `` with `current`. To do so, on each unit, first apply the patch: @@ -130,6 +138,6 @@ Check that the server has started correctly, and then apply the patch to the nex Once all the units have been patched, proceed with the upgrade process, as outlined above. -## Kafka and ZooKeeper combined upgrades +## Apache Kafka and Apache ZooKeeper combined upgrades -If Kafka and ZooKeeper charms need both to be upgraded, we recommend you to start the upgrade from the ZooKeeper cluster. As outlined above, the two upgrades should **NEVER** be done concurrently. \ No newline at end of file +If Apache Kafka and Apache ZooKeeper charms need both to be upgraded, we recommend you to start the upgrade from the Apache ZooKeeper cluster. As outlined above, the two upgrades should **NEVER** be done concurrently. \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index de520efc..807494ad 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,16 +2,16 @@ Charmed Apache Kafka is an open-source operator that makes it easier to manage Apache Kafka, with built-in support for enterprise features. -Apache Kafka is a free, open source software project by the Apache Software Foundation. Users can find out more at the [Kafka project page](https://kafka.apache.org). +Apache Kafka is a free, open source software project by the Apache Software Foundation. Users can find out more at the [Apache Kafka project page](https://kafka.apache.org). -Charmed Apache Kafka is built on top of [Juju](https://juju.is/) and reliably simplifies the deployment, scaling, design, and management of [Apache Kafka](https://kafka.apache.org/) in production. Additionally, you can use the charm to manage your Kafka clusters with automation capabilities. It also offers replication, TLS, password rotation, easy-to-use application integration, and monitoring. +Charmed Apache Kafka is built on top of [Juju](https://juju.is/) and reliably simplifies the deployment, scaling, design, and management of [Apache Kafka](https://kafka.apache.org/) in production. Additionally, you can use the charm to manage your Apache Kafka clusters with automation capabilities. It also offers replication, TLS, password rotation, easy-to-use application integration, and monitoring. Charmed Apache Kafka operates Apache Kafka on physical systems, Virtual Machines (VM), and a wide range of cloud and cloud-like environments, including AWS, Azure, OpenStack, and VMware. Charmed Apache Kafka is a solution designed and developed to help ops teams and administrators automate Apache Kafka operations from [Day 0 to Day 2](https://codilime.com/blog/day-0-day-1-day-2-the-software-lifecycle-in-the-cloud-age/), across multiple cloud environments and platforms. [note] -Canonical has also developed the [Charmed Apache Kafka K8s operator](/t/charmed-kafka-k8s-documentation/10296) to support Kafka in Kubernetes environments. +Canonical has also developed the [Charmed Apache Kafka K8s operator](/t/charmed-kafka-k8s-documentation/10296) to support Apache Kafka in Kubernetes environments. [/note] Charmed Apache Kafka is developed and supported by [Canonical](https://canonical.com/), as part of its commitment to @@ -34,7 +34,7 @@ Charmed Apache Kafka is a distribution of Apache Kafka. It’s an open-source pr - Explore [Canonical Data Fabric solutions](https://canonical.com/data) - [Contact us]([/t/13107) for all further questions -Apache®, Apache Kafka, Kafka®, and the Kafka logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. +Apache®, Apache Kafka, Kafka®, and the Apache Kafka logo are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. ## License @@ -45,7 +45,7 @@ The Charmed Apache Kafka Operator is free software, distributed under the Apache 1. [Tutorial](tutorial) 1. [1. Introduction](tutorial/t-overview.md) 1. [2. Set up the environment](tutorial/t-setup-environment.md) - 1. [3. Deploy Kafka](tutorial/t-deploy.md) + 1. [3. Deploy Apache Kafka](tutorial/t-deploy.md) 1. [4. Integrate with client applications](tutorial/t-relate-kafka.md) 1. [5. Manage passwords](tutorial/t-manage-passwords.md) 1. [6. Enable Encryption](tutorial/t-enable-encryption.md) @@ -68,7 +68,7 @@ The Charmed Apache Kafka Operator is free software, distributed under the Apache 1. [Revision 156/136](reference/r-releases/r-rev156_136.md) 1. [File System Paths](reference/r-file-system-paths.md) 1. [Snap Entrypoints](reference/r-snap-entrypoints.md) - 1. [Kafka Listeners](reference/r-listeners.md) + 1. [Apache Kafka Listeners](reference/r-listeners.md) 1. [Statuses](reference/r-statuses.md) 1. [Requirements](reference/r-requirements.md) 1. [Performance Tuning](reference/r-performance-tuning.md) diff --git a/docs/reference/r-file-system-paths.md b/docs/reference/r-file-system-paths.md index e0202a60..d7d997d6 100644 --- a/docs/reference/r-file-system-paths.md +++ b/docs/reference/r-file-system-paths.md @@ -1,30 +1,30 @@ # File system path -In the following table, we summarize some of the most relevant file paths used in the Kafka and ZooKeeper charms. +In the following table, we summarize some of the most relevant file paths used in the Apache Kafka and Apache ZooKeeper charms. -## Kafka +## Apache Kafka | Path | Description | Permission | |------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| | `/snap/charmed-kafka/current/opt/kafka` | Binary files for the Charmed Apache Kafka distribution. Note that this is a read-only Squashfs file system. | (read-only) | -| `/snap/charmed-kafka/current/opt/kafka/bin/*.sh` | General bash scripts to provide helpers and utilities for managing and interacting with Kafka. | (read-only) | -| `/var/snap/charmed-kafka/current/etc/kafka/` | Configuration files used by Kafka daemon process. These files are generally written and managed by the charm. | (owned by `snap_daemon`, managed by `charm`) | -| `/var/snap/charmed-kafka/common/var/log/kafka/` | Application Logging files generated by the Kafka daemon process. These files are written by the workload, but they may be read by other components to provide monitoring (for example, Grafana or other charms). | (owned and managed by `snap_daemon`) | -| `/var/snap/charmed-kafka/common/var/lib/kafka/` | Raw data stored persistently by Kafka during its operations. The files are written and managed by Kafka only. | (owned and managed by `snap_daemon`) | +| `/snap/charmed-kafka/current/opt/kafka/bin/*.sh` | General bash scripts to provide helpers and utilities for managing and interacting with Apache Kafka. | (read-only) | +| `/var/snap/charmed-kafka/current/etc/kafka/` | Configuration files used by Apache Kafka daemon process. These files are generally written and managed by the charm. | (owned by `snap_daemon`, managed by `charm`) | +| `/var/snap/charmed-kafka/common/var/log/kafka/` | Application Logging files generated by the Apache Kafka daemon process. These files are written by the workload, but they may be read by other components to provide monitoring (for example, Grafana or other charms). | (owned and managed by `snap_daemon`) | +| `/var/snap/charmed-kafka/common/var/lib/kafka/` | Raw data stored persistently by Apache Kafka during its operations. The files are written and managed by Apache Kafka only. | (owned and managed by `snap_daemon`) | External storage is used for storing persistent raw data that is mounted at `/var/snap/charmed-kafka/common/var/lib/kafka/`, with `` being a progressive number. Multiple storage volumes can be used for providing both horizontal scalability and provide IO parallelisation to enhance throughput. -## ZooKeeper +## Apache ZooKeeper | Path | Description | Permission | |--------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------| | `/snap/charmed-zookeeper/current/opt/zookeeper` | Binary files for the Charmed Apache ZooKeeper distribution. Note that this is a readonly squashfs file system. | (read-only) | -| `/snap/charmed-zookeeper/current/opt/zookeeper/bin/*.sh` | General bash scripts to provide helpers and utilities for managing and interacting with ZooKeeper. | (read-only) | -| `/var/snap/charmed-zookeeper/current/etc/zookeeper/ ` | Configuration files used by ZooKeeper daemon process. These files are generally written and managed by the charm. | (owned by `snap_daemon`, managed by `charm`) | -| `/var/snap/charmed-zookeeper/common/var/log/zookeeper/ ` | Application Logging files generated by the ZooKeeper daemon process. These files are written by the workload, but they may be read by other components to provide monitoring (for example, Grafana or other charms). | (owned and managed by `snap_daemon`) | -| `/var/snap/charmed-zookeeper/common/var/lib/zookeeper/` | Raw data stored persistently by ZooKeeper during its operations. The files are written and managed by ZooKeeper only. | (owned and managed by `snap_daemon`) | +| `/snap/charmed-zookeeper/current/opt/zookeeper/bin/*.sh` | General bash scripts to provide helpers and utilities for managing and interacting with Apache ZooKeeper. | (read-only) | +| `/var/snap/charmed-zookeeper/current/etc/zookeeper/ ` | Configuration files used by Apache ZooKeeper daemon process. These files are generally written and managed by the charm. | (owned by `snap_daemon`, managed by `charm`) | +| `/var/snap/charmed-zookeeper/common/var/log/zookeeper/ ` | Application Logging files generated by the Apache ZooKeeper daemon process. These files are written by the workload, but they may be read by other components to provide monitoring (for example, Grafana or other charms). | (owned and managed by `snap_daemon`) | +| `/var/snap/charmed-zookeeper/common/var/lib/zookeeper/` | Raw data stored persistently by Apache ZooKeeper during its operations. The files are written and managed by Apache ZooKeeper only. | (owned and managed by `snap_daemon`) | External storage is used for storing persistent raw data, and it is diff --git a/docs/reference/r-listeners.md b/docs/reference/r-listeners.md index a2bfdb27..954edd12 100644 --- a/docs/reference/r-listeners.md +++ b/docs/reference/r-listeners.md @@ -1,10 +1,10 @@ -# Kafka listeners +# Apache Kafka listeners Charmed Apache Kafka comes with a set of listeners that can be enabled for inter- and intra-cluster communication. *Internal listeners* are used for internal traffic and exchange of information -between Kafka brokers, whereas *external listeners* are used for external clients +between Apache Kafka brokers, whereas *external listeners* are used for external clients to be optionally enabled based the relations created on particular charm endpoints. Each listener is characterized by a specific port, scope and protocol. @@ -22,4 +22,6 @@ opened. | SSL_EXTERNAL | `trusted-certificate` + `certificates` | SSL | `9094` | external | | SSL_EXTERNAL | `trusted-ca` + `certificates` | SSL | `9094` | external | -> **Note** Since `cluster` is a peer-relation, the `SASL_INTERNAL` listener is always enabled. \ No newline at end of file +[note] +Since `cluster` is a peer-relation, the `SASL_INTERNAL` listener is always enabled. +[/note] diff --git a/docs/reference/r-performance-tuning.md b/docs/reference/r-performance-tuning.md index adb69e0d..dc030e97 100644 --- a/docs/reference/r-performance-tuning.md +++ b/docs/reference/r-performance-tuning.md @@ -4,7 +4,7 @@ This section contains some suggested values to get a better performance from Cha ## Virtual memory handling (recommended) -Kafka brokers make heavy use of the OS page cache to maintain performance. They never normally explicitly issue a command to ensure messages have been persisted to disk (`sync`), relying instead on the underlying OS to ensure that larger chunks (pages) of data are persisted from the page cache to the disk when the OS deems it efficient and/or necessary to do so. As such, there is a range of runtime kernel parameter tuning that is recommended to set on machines running Kafka to improve performance. +Apache Kafka brokers make heavy use of the OS page cache to maintain performance. They never normally explicitly issue a command to ensure messages have been persisted to disk (`sync`), relying instead on the underlying OS to ensure that larger chunks (pages) of data are persisted from the page cache to the disk when the OS deems it efficient and/or necessary to do so. As such, there is a range of runtime kernel parameter tuning that is recommended to set on machines running Apache Kafka to improve performance. To configure these settings, one can write them to `/etc/sysctl.conf` using `sudo echo $SETTING >> /etc/sysctl.conf`. Note that the settings shown below are simply sensible defaults that may not apply to every workload: ```bash @@ -18,7 +18,7 @@ vm.dirty_background_ratio=5 ## Memory maps (recommended) -Each Kafka log segment requires an `index` file and a `timeindex` file, both requiring one map area. The default OS maximum number of memory map areas a process can have is set by `vm.max_map_count=65536`. For production deployments with a large number of partitions and log-segments, it is likely to exceed the maximum OS limit. +Each Apache Kafka log segment requires an `index` file and a `timeindex` file, both requiring one map area. The default OS maximum number of memory map areas a process can have is set by `vm.max_map_count=65536`. For production deployments with a large number of partitions and log-segments, it is likely to exceed the maximum OS limit. It is recommended to set the mmap number sufficiently higher than the number of memory mapped files. This can also be written to `/etc/sysctl.conf`: @@ -28,7 +28,7 @@ vm.max_map_count= ## File descriptors (recommended) -Kafka uses file descriptors for log segments and open connections. If a broker hosts many partitions, keep in mind that the broker requires **at least** `(number_of_partitions)*(partition_size/segment_size)` file descriptors to track all the log segments and number of connections. +Apache Kafka uses file descriptors for log segments and open connections. If a broker hosts many partitions, keep in mind that the broker requires **at least** `(number_of_partitions)*(partition_size/segment_size)` file descriptors to track all the log segments and number of connections. To configure those limits, update the values and add the following to `/etc/security/limits.d/root.conf`: diff --git a/docs/reference/r-releases/r-rev156_126.md b/docs/reference/r-releases/r-rev156_126.md index 29213228..01d4b942 100644 --- a/docs/reference/r-releases/r-rev156_126.md +++ b/docs/reference/r-releases/r-rev156_126.md @@ -14,7 +14,7 @@ Please reach out should you have any question, comment, feedback or information. ## Features * Deploying on VM (tested with LXD, MAAS) -* ZooKeeper using SASL authentication +* Apache ZooKeeper using SASL authentication * Scaling up/down in one simple Juju command * Multi-broker support and Highly-Available setups * Inter-broker authenticated communication @@ -32,8 +32,8 @@ and [GitHub](https://github.com/canonical/kafka-operator/issues) platforms. ## Inside the charms -* Charmed Apache ZooKeeper charm ships the ZooKeeper [3.8.2-ubuntu0](https://launchpad.net/zookeeper-releases/3.x/3.8.2-ubuntu0), built and supported by Canonical -* Charmed Apache Kafka charm ships the Kafka [3.6.0-ubuntu0](https://launchpad.net/kafka-releases/3.x/3.6.0-ubuntu0), built and supported by Canonical +* Charmed Apache ZooKeeper charm ships the Apache ZooKeeper [3.8.2-ubuntu0](https://launchpad.net/zookeeper-releases/3.x/3.8.2-ubuntu0), built and supported by Canonical +* Charmed Apache Kafka charm ships the Apache Kafka [3.6.0-ubuntu0](https://launchpad.net/kafka-releases/3.x/3.6.0-ubuntu0), built and supported by Canonical * Charmed Apache ZooKeeper charm is based on [charmed-zookeeper snap](https://snapcraft.io/charmed-zookeeper) on the `3/stable` (Ubuntu LTS “22.04” - core22-based) * Charmed Apache Kafka charm is based on [charmed-kafka snap](https://snapcraft.io/charmed-kafka) on the `3/stable` channel (Ubuntu LTS “22.04” - core22-based) * Principal charms supports the latest LTS series “22.04” only. @@ -42,19 +42,19 @@ More information about the artifacts are provided by the following table: | Artifact | Track/Series | Version/Revision | Code | |------------------------|--------------|------------------|---------------------------------------------------------------------------------------------------------------------| -| ZooKeeper distribution | 3.x | 3.8.2-ubuntu0 | [5bb82d](https://git.launchpad.net/zookeeper-releases/tree/?h=lp-3.8.2&id=5bb82df4ffba910a5b30dd42499921466405f087) | -| Kafka distribution | 3.x | 3.6.0-ubuntu0 | [424389](https://git.launchpad.net/kafka-releases/tree/?h=lp-3.6.0&id=424389bb8f230beaef4ccb94aca464b5d22ac310) | +| Apache ZooKeeper distribution | 3.x | 3.8.2-ubuntu0 | [5bb82d](https://git.launchpad.net/zookeeper-releases/tree/?h=lp-3.8.2&id=5bb82df4ffba910a5b30dd42499921466405f087) | +| Apache Kafka distribution | 3.x | 3.6.0-ubuntu0 | [424389](https://git.launchpad.net/kafka-releases/tree/?h=lp-3.6.0&id=424389bb8f230beaef4ccb94aca464b5d22ac310) | | Charmed Apache ZooKeeper snap | 3/stable | 28 | [9757f4](https://github.com/canonical/charmed-zookeeper-snap/tree/9757f4a2a889981275f8f2a1a87e1c78ae1adb77) | -| ZooKeeper operator | 3/stable | 126 | [9ebd9a](https://github.com/canonical/zookeeper-operator/commit/9ebd9a2050e0bd626feb0019222d45f211ca7774) | +| Charmed Apache ZooKeeper operator | 3/stable | 126 | [9ebd9a](https://github.com/canonical/zookeeper-operator/commit/9ebd9a2050e0bd626feb0019222d45f211ca7774) | | Charmed Apache Kafka snap | 3/stable | 30 | [c0ce27](https://github.com/canonical/charmed-kafka-snap/tree/c0ce275f70f688e66f10f295456d2b5ff33d4f64) | -| Kafka operator | 3/stable | 156 | [01d65c](https://github.com/canonical/kafka-operator/tree/01d65c3444b593d5f18d197a6514421afd3f2bc6) | +| Charmed Apache Kafka operator | 3/stable | 156 | [01d65c](https://github.com/canonical/kafka-operator/tree/01d65c3444b593d5f18d197a6514421afd3f2bc6) | ## Technical notes * A Charmed Apache Kafka cluster is secure by default, meaning that when deployed if there are no client charms related to it, external listeners will not be enabled. -* We recommend to deploy one `data-integrator` with `extra-user-roles=admin` alongside the Kafka deployment, in order to enable listeners and also create one user with elevated permission +* We recommend to deploy one `data-integrator` with `extra-user-roles=admin` alongside the Apache Kafka deployment, in order to enable listeners and also create one user with elevated permission to perform administrative tasks. For more information, see the [How-to manage application](/t/charmed-kafka-documentation-how-to-manage-app/10285) guide. * The current release has been tested with Juju 2.9.45+ and Juju 3.1+ -* Inplace upgrade for charms tracking `latest` is not supported, both for ZooKeeper and Kafka charms. Perform data migration to upgrade to a Charmed Apache Kafka cluster managed via a `3/stable` charm. +* Inplace upgrade for charms tracking `latest` is not supported, both for Apache ZooKeeper and Apache Kafka charms. Perform data migration to upgrade to a Charmed Apache Kafka cluster managed via a `3/stable` charm. For more information on how to perform the migration, see [How-to migrate a cluster](/t/charmed-kafka-documentation-how-to-migrate-a-cluster/10951) guide. \ No newline at end of file diff --git a/docs/reference/r-releases/r-rev156_136.md b/docs/reference/r-releases/r-rev156_136.md index 098e053c..44cfb6f4 100644 --- a/docs/reference/r-releases/r-rev156_136.md +++ b/docs/reference/r-releases/r-rev156_136.md @@ -25,8 +25,8 @@ and [GitHub](https://github.com/canonical/kafka-operator/issues) platforms. ## Inside the charms -* Charmed Apache ZooKeeper charm ships the ZooKeeper [3.8.4-ubuntu0](https://launchpad.net/zookeeper-releases/3.x/3.8.4-ubuntu0), built and supported by Canonical -* Charmed Apache Kafka charm ships the Kafka [3.6.0-ubuntu0](https://launchpad.net/kafka-releases/3.x/3.6.0-ubuntu0), built and supported by Canonical +* Charmed Apache ZooKeeper charm ships the Apache ZooKeeper [3.8.4-ubuntu0](https://launchpad.net/zookeeper-releases/3.x/3.8.4-ubuntu0), built and supported by Canonical +* Charmed Apache Kafka charm ships the Apache Kafka [3.6.0-ubuntu0](https://launchpad.net/kafka-releases/3.x/3.6.0-ubuntu0), built and supported by Canonical * Charmed Apache ZooKeeper charm is based on [charmed-zookeeper snap](https://snapcraft.io/charmed-zookeeper) on the `3/stable` (Ubuntu LTS “22.04” - core22-based) * Charmed Apache Kafka charm is based on [charmed-kafka snap](https://snapcraft.io/charmed-kafka) on the `3/stable` channel (Ubuntu LTS “22.04” - core22-based) * Principal charms supports the latest LTS series “22.04” only. @@ -35,17 +35,17 @@ More information about the artifacts are provided by the following table: | Artifact | Track/Series | Version/Revision | Code | |------------------------|--------------|------------------|---------------------------------------------------------------------------------------------------------------------| -| ZooKeeper distribution | 3.x | 3.8.4-ubuntu0 | [78499c](https://git.launchpad.net/zookeeper-releases/tree/?h=lp-3.8.4&id=78499c9f4d4610f9fb963afdad1ffd1aab2a96b8) | -| Kafka distribution | 3.x | 3.6.0-ubuntu0 | [424389](https://git.launchpad.net/kafka-releases/tree/?h=lp-3.6.0&id=424389bb8f230beaef4ccb94aca464b5d22ac310) | +| Apache ZooKeeper distribution | 3.x | 3.8.4-ubuntu0 | [78499c](https://git.launchpad.net/zookeeper-releases/tree/?h=lp-3.8.4&id=78499c9f4d4610f9fb963afdad1ffd1aab2a96b8) | +| Apache Kafka distribution | 3.x | 3.6.0-ubuntu0 | [424389](https://git.launchpad.net/kafka-releases/tree/?h=lp-3.6.0&id=424389bb8f230beaef4ccb94aca464b5d22ac310) | | Charmed Apache ZooKeeper snap | 3/stable | 30 | [d85fed](https://github.com/canonical/charmed-zookeeper-snap/tree/d85fed4c2f83d99dbc028ff10c2e38915b6cdf04) | -| ZooKeeper operator | 3/stable | 136 | [0b7d66](https://github.com/canonical/zookeeper-operator/tree/0b7d66170d80e23804032034119a419f174bb965) | +| Charmed Apache ZooKeeper operator | 3/stable | 136 | [0b7d66](https://github.com/canonical/zookeeper-operator/tree/0b7d66170d80e23804032034119a419f174bb965) | | Charmed Apache Kafka snap | 3/stable | 30 | [c0ce27](https://github.com/canonical/charmed-kafka-snap/tree/c0ce275f70f688e66f10f295456d2b5ff33d4f64) | -| Kafka operator | 3/stable | 156 | [01d65c](https://github.com/canonical/kafka-operator/tree/01d65c3444b593d5f18d197a6514421afd3f2bc6) | +| Charmed Apache Kafka operator | 3/stable | 156 | [01d65c](https://github.com/canonical/kafka-operator/tree/01d65c3444b593d5f18d197a6514421afd3f2bc6) | ## Technical notes -* Rev126 on Charmed Apache ZooKeeper was observed to sporadically trigger ZooKeeper reconfiguration of the clusters by removing all server but the Juju leader from the ZooKeeper quorum. This leads to a +* Rev126 on Charmed Apache ZooKeeper was observed to sporadically trigger Apache ZooKeeper reconfiguration of the clusters by removing all server but the Juju leader from the Apache ZooKeeper quorum. This leads to a non-highly available cluster, that it is however still up and running. The reconfiguration generally resulted from some glitch and connection drop with the Juju controller that resulted in transient inconsistent databag of juju events. This was once observed during a controller upgrade (see reported [bug](https://bugs.launchpad.net/juju/+bug/2053055) on Juju), but its occurrence is not limited to it. diff --git a/docs/reference/r-requirements.md b/docs/reference/r-requirements.md index c8bb2c63..39fc69d3 100644 --- a/docs/reference/r-requirements.md +++ b/docs/reference/r-requirements.md @@ -11,7 +11,7 @@ The minimum supported Juju versions are: ## Minimum requirements -For production environments, it is recommended to deploy at least five nodes for ZooKeeper and three for Kafka. While the following requirements are meant to be for production, the charm can be deployed in much smaller environments. +For production environments, it is recommended to deploy at least five nodes for Apache ZooKeeper and three for Apache Kafka. While the following requirements are meant to be for production, the charm can be deployed in much smaller environments. - 64GB of RAM - 24 cores diff --git a/docs/reference/r-snap-entrypoints.md b/docs/reference/r-snap-entrypoints.md index 1b30deba..e34f032e 100644 --- a/docs/reference/r-snap-entrypoints.md +++ b/docs/reference/r-snap-entrypoints.md @@ -1,11 +1,11 @@ # Charmed Apache Kafka snap entrypoints -Snap entrypoints wrap the Kafka Distribution Bash scripts and make sure +Snap entrypoints wrap the Apache Kafka Distribution Bash scripts and make sure that they run with the correct environment settings (configuration files, logging files, etc). Below is a reference table for the mapping between entrypoints and wrapped bash script: -| Snap Entrypoint | Kafka Distribution Bash Script | +| Snap Entrypoint | Apache Kafka Distribution Bash Script | |-------------------------------------------------|----------------------------------------------------------| | `charmed-kafka.daemon` | `$SNAP/opt/kafka/bin/kafka-server-start.sh` | | `charmed-kafka.log-dirs` | `$SNAP/opt/kafka/bin/kafka-log-dirs.sh` | @@ -33,7 +33,7 @@ Below is a reference table for the mapping between entrypoints and wrapped bash | `charmed-kafka.trogdor` | `$SNAP/opt/kafka/bin/trogdor.sh` | | `charmed-kafka.keytool` | `$SNAP/usr/lib/jvm/java-17-openjdk-amd64/bin/keytool` | -Available Kafka bin commands can also be found with: +Available Apache Kafka bin commands can also be found with: ``` snap info charmed-kafka --channel 3/stable diff --git a/docs/reference/r-statuses.md b/docs/reference/r-statuses.md index 30521e23..24766819 100644 --- a/docs/reference/r-statuses.md +++ b/docs/reference/r-statuses.md @@ -2,45 +2,45 @@ The charm follows [standard Juju applications statuses](https://juju.is/docs/olm/status-values#heading--application-status). Here you can find the expected end-users reactions on different statuses: -## Kafka +## Apache Kafka | Juju Status | Message | Expectations | Actions | |-----------------|----------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **Active** | | Normal charm operations | No actions required | | **Active** | manual partition reassignment may be needed to utilize new storage volumes | Existing data is not automatically rebalanced when new storage is attached. New storage will be used for newly created topics and/or partitions | Inspect the storage utilization and based on the need, use the bash utility script `/snap/charmed-kafka/current/opt/kafka/bin/kafka-reassign-partitions.sh` for manual data rebalancing. | | **Active** | potential data loss due to storage removal without replication | Some partition/topics are not replicated on multiple storages, therefore potentially leading to data loss | Add new storage, increase replication of topics/partitions and/or rebalance data across multiple storages/brokers | -| **Active** | machine system settings are not optimal - see logs for info | The broker is running on a machine that has sub-optimal OS settings. Although this may not preclude Kafka to work, it may result in sub-optimal performances | Check the `juju debug-log` for insights on which settings are sub-optimal and may be changed | -| **Active** | sysctl params cannot be set. Is the machine running on a container? | Some of the sysctl settings required by Kafka could not be set, therefore affective Kafka performance and correct settings. This can also be due to the charm being deployed on the wrong substrate | Remove the deployment and make sure that the selected charm is correct given the Juju cloud substrate | +| **Active** | machine system settings are not optimal - see logs for info | The broker is running on a machine that has sub-optimal OS settings. Although this may not preclude Apache Kafka to work, it may result in sub-optimal performances | Check the `juju debug-log` for insights on which settings are sub-optimal and may be changed | +| **Active** | sysctl params cannot be set. Is the machine running on a container? | Some of the sysctl settings required by Apache Kafka could not be set, therefore affective Apache Kafka performance and correct settings. This can also be due to the charm being deployed on the wrong substrate | Remove the deployment and make sure that the selected charm is correct given the Juju cloud substrate | | **Blocked** | unable to install charmed-kafka snap | There are issues with the network connection and/or the Snap Store | Check your internet connection and https://status.snapcraft.io/. Remove the application and when everything is ok, deploy the charm again | -| **Blocked** | snap service not running | The charm failed to start the snap daemon processes | Check the Kafka logs for insights on the issue | -| **Blocked** | missing required zookeeper relation | Kafka charm has not been connected to any ZooKeeper cluster | Relate to a ZooKeeper charm | -| **Blocked** | unit not connected to zookeeper | Although the relation is present, the unit has failed to connect to ZooKeeper | Make sure that Kafka and ZooKeeper can connect and exchange data. When using encryption, make sure that certificates/ca are correctly setup. | -| **Blocked** | tls must be enabled on both kafka and zookeeper | Encryption (and relation with TLS-certificates operators) must be either enabled or disabled on both Kafka and ZooKeeper | Make sure that both Kafka and ZooKeeper either both use or neither of them use encryption. | -| **Waiting** | zookeeper credentials not created yet | Credentials are being created on ZooKeeper, and Kafka is waiting to receive them to connect to ZooKeeper | | -| **Waiting** | internal broker credentials not yet added | Intra-broker credentials being created to enable communication and syncing among brokers belonging to the Kafka clusters. | | +| **Blocked** | snap service not running | The charm failed to start the snap daemon processes | Check the Apache Kafka logs for insights on the issue | +| **Blocked** | missing required zookeeper relation | Apache Kafka charm has not been connected to any Apache ZooKeeper cluster | Relate to an Apache ZooKeeper charm | +| **Blocked** | unit not connected to zookeeper | Although the relation is present, the unit has failed to connect to Apache ZooKeeper | Make sure that Apache Kafka and Apache ZooKeeper can connect and exchange data. When using encryption, make sure that certificates/ca are correctly setup. | +| **Blocked** | tls must be enabled on both kafka and zookeeper | Encryption (and relation with TLS-certificates operators) must be either enabled or disabled on both Apache Kafka and Apache ZooKeeper | Make sure that both Apache Kafka and Apache ZooKeeper either both use or neither of them use encryption. | +| **Waiting** | zookeeper credentials not created yet | Credentials are being created on Apache ZooKeeper, and Apache Kafka is waiting to receive them to connect to Apache ZooKeeper | | +| **Waiting** | internal broker credentials not yet added | Intra-broker credentials being created to enable communication and syncing among brokers belonging to the Apache Kafka clusters. | | | **Waiting** | unit waiting for signed certificates | Unit has requested a CSR request via the `certificates` relation and it is waiting to received the signed certificate | | | **Maintenance** | | Charm is performing the internal maintenance (e.g. cluster re-configuration, upgrade, ...) | No actions required | | **Error** | any | An unhanded internal error happened | Read the message hint. Execute `juju resolve ` after addressing the root of the error state | | **Terminated** | any | The unit is gone and will be cleaned by Juju soon | No actions possible | | **Unknown** | any | Juju doesn't know the charm app/unit status. Possible reason: K8s charm termination in progress. | Manual investigation required if status is permanent | -## ZooKeeper +## Apache ZooKeeper | Juju Status | Message | Expectations | Actions | |-----------------|----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **Active** | | Normal charm operations | No actions required | | **Blocked** | unable to install zookeeper service | There are issues with the network connection and/or the Snap Store | Check your internet connection and https://status.snapcraft.io/. Remove the application and when everything is ok, deploy the charm again | -| **Blocked** | zookeeper service not running | The charm failed to start the snap daemon processes | Check the ZooKeeper logs for insights on the issue | -| **Blocked** | zookeeper service is unreachable or not serving requests | The ZooKeeper service is either down or not exposed through the correct port | Check the ZooKeeper logs for the impacted units and insights on underlying issue | | -| **Waiting** | waiting for leader to create internal user credentials | The ZooKeeper cluster is being initialized and the leader is setting up credentials | | -| **Waiting** | other units starting first | ZooKeeper units are being started and added to the quorum in order | | +| **Blocked** | zookeeper service not running | The charm failed to start the snap daemon processes | Check the Apache ZooKeeper logs for insights on the issue | +| **Blocked** | zookeeper service is unreachable or not serving requests | The Apache ZooKeeper service is either down or not exposed through the correct port | Check the Apache ZooKeeper logs for the impacted units and insights on underlying issue | | +| **Waiting** | waiting for leader to create internal user credentials | The Apache ZooKeeper cluster is being initialized and the leader is setting up credentials | | +| **Waiting** | other units starting first | Apache ZooKeeper units are being started and added to the quorum in order | | | **Waiting** | unit waiting for signed certificates | Unit has requested a CSR request via the `certificates` relation and it is waiting to received the signed certificate | | | **Maintenance** | not all units registered IP | The units are being registered to the quorum | | | | -| **Maintenance** | cluster not stable - not all units related | Some ZooKeeper units are not connected, reducing cluster availability and obstructing elections | Make sure the units can reach each other and communicate | -| **Maintenance** | cluster not stable - quorum is stale | The cluster does not have an active quorum, preventing the cluster from running elections | Do not perform any extra-ordinary operation. Wait for the units to connect and form a quorum. If the problem persists, please check the ZooKeeper logs on all units for further insights. | -| **Maintenance** | cluster not stable - not all units added to quorum | Some ZooKeeper units are not part of the quorum, reducing cluster availability and obstructing elections | Do not perform any extra-ordinary operation. Wait for the units to connect and form a quorum. If some units keep being not connected, please check the ZooKeeper logs of such units for further insights. | -| **Maintenance** | provider not ready - not all units using same encryption | Units use different settings for encryption, therefore preventing correct cluster operations. | This situation can transiently occur when new protocols / certificates are being setup. If the message persist, please check the ZooKeeper logs for further insights. | -| **Maintenance** | provider not ready - switching quorum encryption | Encryption is being enabled / disabled. | This situation can transiently occur when encryption are being setup. If the message persist, please check the ZooKeeper logs for further insights. | +| **Maintenance** | cluster not stable - not all units related | Some Apache ZooKeeper units are not connected, reducing cluster availability and obstructing elections | Make sure the units can reach each other and communicate | +| **Maintenance** | cluster not stable - quorum is stale | The cluster does not have an active quorum, preventing the cluster from running elections | Do not perform any extra-ordinary operation. Wait for the units to connect and form a quorum. If the problem persists, please check the Apache ZooKeeper logs on all units for further insights. | +| **Maintenance** | cluster not stable - not all units added to quorum | Some Apache ZooKeeper units are not part of the quorum, reducing cluster availability and obstructing elections | Do not perform any extra-ordinary operation. Wait for the units to connect and form a quorum. If some units keep being not connected, please check the Apache ZooKeeper logs of such units for further insights. | +| **Maintenance** | provider not ready - not all units using same encryption | Units use different settings for encryption, therefore preventing correct cluster operations. | This situation can transiently occur when new protocols / certificates are being setup. If the message persist, please check the Apache ZooKeeper logs for further insights. | +| **Maintenance** | provider not ready - switching quorum encryption | Encryption is being enabled / disabled. | This situation can transiently occur when encryption are being setup. If the message persist, please check the Apache ZooKeeper logs for further insights. | | **Maintenance** | provider not ready - portUnification not yet disabled | Specifies that the client port should accept SSL connections (using the same configuration as the secure client port). | | | **Error** | any | An unhanded internal error happened | Read the message hint. Run `juju resolve ` after addressing the root of the error state | | **Terminated** | any | The unit is gone and will be cleaned by Juju soon | No actions possible | diff --git a/docs/tutorial/t-cleanup-environment.md b/docs/tutorial/t-cleanup-environment.md index f0bb37b4..fb80008e 100644 --- a/docs/tutorial/t-cleanup-environment.md +++ b/docs/tutorial/t-cleanup-environment.md @@ -4,7 +4,9 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov If you're done using Charmed Apache Kafka and Juju and would like to free up resources on your machine, you can remove Charmed Apache Kafka, Charmed Apache ZooKeeper and Juju. -> **Warning**: when you remove Charmed Apache Kafka as shown below you will lose all the data in Kafka. Further, when you remove Juju as shown below you will lose access to any other applications you have hosted on Juju. +[note type="caution"] +Removing Charmed Apache Kafka as shown below will delete all the data in the Apache Kafka. Further, when you remove Juju as shown below you lose access to any other applications you have hosted on Juju. +[/note] To remove Charmed Apache Kafka and the model it is hosted on run the command: @@ -26,7 +28,7 @@ sudo snap remove juju --purge ## What's next? -In this tutorial, we've successfully deployed Kafka, added/removed replicas, added/removed users to/from the cluster, and even enabled and disabled TLS. +In this tutorial, we've successfully deployed Apache Kafka, added/removed replicas, added/removed users to/from the cluster, and even enabled and disabled TLS. You may now keep your Charmed Apache Kafka deployment running or remove it entirely using the steps in [Remove Charmed Apache Kafka and Juju](#remove-charmed-kafka-and-juju). If you're looking for what to do next you can: - Run [Charmed Apache Kafka on Kubernetes](https://github.com/canonical/kafka-k8s-operator). diff --git a/docs/tutorial/t-deploy.md b/docs/tutorial/t-deploy.md index 573d366b..5efa4519 100644 --- a/docs/tutorial/t-deploy.md +++ b/docs/tutorial/t-deploy.md @@ -2,7 +2,7 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov ## Deploy Charmed Apache Kafka (and Charmed Apache ZooKeeper) -To deploy Charmed Apache Kafka, all you need to do is run the following commands, which will automatically fetch [Kafka](https://charmhub.io/kafka?channel=3/stable) and [ZooKeeper](https://charmhub.io/zookeeper?channel=3/stable) charms from [Charmhub](https://charmhub.io/) and deploy them to your model. For example, to deploy a five ZooKeeper unit and three Kafka unit cluster, you can simply run: +To deploy Charmed Apache Kafka, all you need to do is run the following commands, which will automatically fetch [Apache Kafka](https://charmhub.io/kafka?channel=3/stable) and [Apache ZooKeeper](https://charmhub.io/zookeeper?channel=3/stable) charms from [Charmhub](https://charmhub.io/) and deploy them to your model. For example, to deploy a five Apache ZooKeeper unit and three Apache Kafka unit cluster, you can simply run: ```shell $ juju deploy zookeeper -n 5 @@ -22,7 +22,7 @@ juju status --watch 1s ``` This command is useful for checking the status of Charmed Apache ZooKeeper and Charmed Apache Kafka and gathering information about the machines hosting the two applications. Some of the helpful information it displays includes IP addresses, ports, state, etc. -The command updates the status of the cluster every second and as the application starts you can watch the status and messages of Charmed Apache Kafka and ZooKeeper change. +The command updates the status of the cluster every second and as the application starts you can watch the status and messages of Charmed Apache Kafka and Apache ZooKeeper change. Wait until the application is ready - when it is ready, `juju status --watch 1s` will show: @@ -57,7 +57,7 @@ Machine State Address Inst id Series AZ Message To exit the screen with `juju status --watch 1s`, enter `Ctrl+c`. -## Access Kafka cluster +## Access Apache Kafka cluster To watch the process, `juju status` can be used. Once all the units show as `active|idle` the credentials to access a broker can be queried with: @@ -77,24 +77,26 @@ password: e2sMfYLQg7sbbBMFTx1qlaZQKTUxr09x username: admin ``` -Providing you the `username` and `password` of the Kafka cluster admin user. +Providing you the `username` and `password` of the Apache Kafka cluster admin user. -> **IMPORTANT** Note that when no other application is related to Kafka, the cluster is secured-by-default and external listeners (bound to port `9092`) are disabled, thus preventing any external incoming connection. +[note type="caution"] +When no other application is related to Apache Kafka, the cluster is secured-by-default and external listeners (bound to port `9092`) are disabled, thus preventing any external incoming connection. +[/note] -Nevertheless, it is still possible to run a command from within the Kafka cluster using the internal listeners in place of the external ones. +Nevertheless, it is still possible to run a command from within the Apache Kafka cluster using the internal listeners in place of the external ones. The internal endpoints can be constructed by replacing the `19092` port in the `bootstrap.servers` returned in the output above, for example: ```shell INTERNAL_LISTENERS=$(juju run kafka/leader get-admin-credentials | grep "bootstrap.servers" | cut -d "=" -f2 | sed -s "s/\:9092/:19092/g") ``` -Once you have fetched the `INTERNAL_LISTENERS`, log in to one of the Kafka containers in one of the units: +Once you have fetched the `INTERNAL_LISTENERS`, log in to one of the Apache Kafka containers in one of the units: ```shell juju ssh kafka/leader sudo -i ``` -When the unit is started, the Charmed Apache Kafka Operator installs the [`charmed-kafka`](https://snapcraft.io/charmed-kafka) Snap in the unit that provides a number of entrypoints (that corresponds to the bin commands in the Kafka distribution) for performing various administrative tasks, e.g `charmed-kafka.config` to update cluster configuration, `charmed-kafka.topics` for topic management, and many more! +When the unit is started, the Charmed Apache Kafka Operator installs the [`charmed-kafka`](https://snapcraft.io/charmed-kafka) Snap in the unit that provides a number of entrypoints (that corresponds to the bin commands in the Apache Kafka distribution) for performing various administrative tasks, e.g `charmed-kafka.config` to update cluster configuration, `charmed-kafka.topics` for topic management, and many more! Within the machine, the Charmed Apache Kafka Operator also creates a `client.properties` file that already provides the relevant settings to connect to the cluster using the CLI ```shell @@ -130,7 +132,7 @@ charmed-kafka.topics \ --command-config $CLIENT_PROPERTIES ``` -Other available Kafka bin commands can also be found with: +Other available Apache Kafka bin commands can also be found with: ```shell snap info charmed-kafka @@ -140,4 +142,4 @@ snap info charmed-kafka However, although the commands above can run within the cluster, it is generally recommended during operations to enable external listeners and use these for running the admin commands from outside the cluster. -To do so, as we will see in the next section, we will deploy a [data-integrator](https://charmhub.io/data-integrator) charm and relate it to Kafka. \ No newline at end of file +To do so, as we will see in the next section, we will deploy a [data-integrator](https://charmhub.io/data-integrator) charm and relate it to Apache Kafka. \ No newline at end of file diff --git a/docs/tutorial/t-enable-encryption.md b/docs/tutorial/t-enable-encryption.md index 14a60e19..391c412e 100644 --- a/docs/tutorial/t-enable-encryption.md +++ b/docs/tutorial/t-enable-encryption.md @@ -6,8 +6,10 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov Again, relations come in handy here as TLS is enabled via relations; i.e. by relating Charmed Apache Kafka to the [Self-signed Certificates Charm](https://charmhub.io/self-signed-certificates) via the [`tls-certificates`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/tls_certificates/v1/README.md) charm relations. The `tls-certificates` relation centralises TLS certificate management in a consistent manner and handles providing, requesting, and renewing TLS certificates, making it possible to use different providers, like the self-signed certificates but also other services, e.g. Let's Encrypt. -> *Note: In this tutorial, we will distribute [self-signed certificates](https://en.wikipedia.org/wiki/Self-signed_certificate) to all charms (Kafka, ZooKeeper and client applications) that are signed using a root self-signed CA +[note] +In this tutorial, we will distribute [self-signed certificates](https://en.wikipedia.org/wiki/Self-signed_certificate) to all charms (Apache Kafka, Apache ZooKeeper and client applications) that are signed using a root self-signed CA that is also trusted by all applications. This setup is only for show-casing purposes and self-signed certificates should **never** be used in a production cluster. For more information about which charm may better suit your use-case, please refer to [this post](https://charmhub.io/topics/security-with-x-509-certificates).* +[/note] ### Configure TLS @@ -42,7 +44,7 @@ juju relate zookeeper self-signed-certificates juju relate kafka:certificates self-signed-certificates ``` -After the charms settle into `active/idle` states, the Kafka listeners should now have been swapped to the +After the charms settle into `active/idle` states, the Apache Kafka listeners should now have been swapped to the default encrypted port 9093. This can be tested by testing whether the ports are open/closed with `telnet` ```shell @@ -52,10 +54,10 @@ telnet 9093 ### Enable TLS encrypted connection -Once the Kafka cluster is enabled to use encrypted connection, client applications should be configured as well to connect to +Once the Apache Kafka cluster is enabled to use encrypted connection, client applications should be configured as well to connect to the correct port as well as trust the self-signed CA provided by the `self-signed-certificates` charm. -Make sure that the `kafka-test-app` is not connected to the Kafka charm, by removing the relation if it exists +Make sure that the `kafka-test-app` is not connected to the Apache Kafka charm, by removing the relation if it exists ```shell juju remove-relation kafka-test-app kafka @@ -80,7 +82,7 @@ and then relate with the `kafka` cluster juju relate kafka kafka-test-app ``` -As before, you can check that the messages are pushed into the Kafka cluster by inspecting the logs +As before, you can check that the messages are pushed into the Apache Kafka cluster by inspecting the logs ```shell juju exec --application kafka-test-app "tail /tmp/*.log" diff --git a/docs/tutorial/t-manage-passwords.md b/docs/tutorial/t-manage-passwords.md index 2573a587..190f2713 100644 --- a/docs/tutorial/t-manage-passwords.md +++ b/docs/tutorial/t-manage-passwords.md @@ -2,7 +2,7 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov ## Manage passwords -Passwords help to secure our cluster and are essential for security. Over time it is a good practice to change the password frequently. Here we will go through setting and changing the password both for the admin user and external Kafka users managed by the data-integrator. +Passwords help to secure our cluster and are essential for security. Over time it is a good practice to change the password frequently. Here we will go through setting and changing the password both for the admin user and external Apache Kafka users managed by the data-integrator. ### Admin user @@ -64,7 +64,9 @@ unit-kafka-1: The admin password is under the result: `admin-password`. It should be different from your previous password. -> **Note** When changing the admin password you will also need to update the admin password the in Kafka connection parameters; as the old password will no longer be valid.* +[note] +When changing the admin password you will also need to update the admin password the in Apache Kafka connection parameters; as the old password will no longer be valid.* +[/note] #### Set the admin password @@ -91,15 +93,17 @@ unit-kafka-1: The admin password under the result: `admin-password` should match whatever you passed in when you entered the command. -> **Note** When changing the admin password you will also need to update the admin password in the Kafka connection parameters, as the old password will no longer be valid.* +[note] + When changing the admin password you will also need to update the admin password in the Apache Kafka connection parameters, as the old password will no longer be valid.* +[/note] -### External Kafka users +### External Apache Kafka users -Unlike Admin management, the password management for external Kafka users is instead managed using relations. Let's see this into play with the Data Integrator charm, that we have deployed in the previous part of the tutorial. +Unlike Admin management, the password management for external Apache Kafka users is instead managed using relations. Let's see this into play with the Data Integrator charm, that we have deployed in the previous part of the tutorial. #### Retrieve the password -Similarly to the Kafka application, also the `data-integrator` exposes an action to retrieve the credentials, e.g. +Similarly to the Apache Kafka application, also the `data-integrator` exposes an action to retrieve the credentials, e.g. ```shell juju run data-integrator/leader get-credentials @@ -192,7 +196,9 @@ Machine State Address Inst id Series AZ Message 8 started 10.244.26.4 juju-f1a2cd-8 jammy Running ``` -> **Note** The operations above would also apply to charmed applications that implement the `kafka_client` relation, for which password rotation and user deletion can be achieved in the same consistent way. +[note] +The operations above would also apply to charmed applications that implement the `kafka_client` relation, for which password rotation and user deletion can be achieved in the same consistent way. +[/note] ## What's next? diff --git a/docs/tutorial/t-overview.md b/docs/tutorial/t-overview.md index fc5b5aee..189d4b6f 100644 --- a/docs/tutorial/t-overview.md +++ b/docs/tutorial/t-overview.md @@ -2,21 +2,21 @@ The Charmed Apache Kafka Operator delivers automated operations management from [Day 0 to Day 2](https://codilime.com/blog/day-0-day-1-day-2-the-software-lifecycle-in-the-cloud-age/) on the [Apache Kafka](https://kafka.apache.org/) event streaming platform. It is an open source, end-to-end, production-ready data platform [on top of Juju](https://juju.is/). As a first step this tutorial shows you how to get Charmed Apache Kafka up and running, but the tutorial does not stop there. -As currently Kafka requires a paired [ZooKeeper](https://zookeeper.apache.org/) deployment in production, this operator makes use of the [ZooKeeper Operator](https://github.com/canonical/zookeeper-operator) for various essential functions. +As currently Apache Kafka requires a paired [Apache ZooKeeper](https://zookeeper.apache.org/) deployment in production, this operator makes use of the [Charmed Apache ZooKeeper Operator](https://github.com/canonical/zookeeper-operator) for various essential functions. Through this tutorial you will learn a variety of operations, everything from adding replicas to advanced operations such as enabling Transport Layer Security (TLS). In this tutorial we will walk through how to: - Set up your environment using LXD and Juju. -- Deploy Kafka using a couple of commands. +- Deploy Apache Kafka using a couple of commands. - Get the admin credentials directly. - Add high availability with replication. - Change the admin password. -- Automatically create Kafka users via Juju relations. +- Automatically create Apache Kafka users via Juju relations. While this tutorial intends to guide and teach you as you deploy Charmed Apache Kafka, it will be most beneficial if you already have a familiarity with: - Basic terminal commands. -- Kafka concepts such as replication and users. +- Apache Kafka concepts such as replication and users. ## Minimum requirements @@ -32,7 +32,7 @@ Before we start, make sure your machine meets the following requirements: Here’s an overview of the steps required with links to our separate tutorials that deal with each individual step: * [Set up the environment](/t/charmed-kafka-tutorial-setup-environment/10575) -* [Deploy Kafka](/t/charmed-kafka-tutorial-deploy-kafka/10567) +* [Deploy Apache Kafka](/t/charmed-kafka-tutorial-deploy-kafka/10567) * [Integrate with client applications](/t/charmed-kafka-tutorial-relate-kafka/10573) * [Manage passwords](/t/charmed-kafka-tutorial-manage-passwords/10569) * [Enable encryption](/t/charmed-kafka-documentation-tutorial-enable-security/12043) diff --git a/docs/tutorial/t-relate-kafka.md b/docs/tutorial/t-relate-kafka.md index 81f8c0a3..6d76de8b 100644 --- a/docs/tutorial/t-relate-kafka.md +++ b/docs/tutorial/t-relate-kafka.md @@ -4,11 +4,11 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov As mentioned in the previous section of the Tutorial, the recommended way to create and manage users is by means of another charm: the [Data Integrator Charm](https://charmhub.io/data-integrator). This lets us to encode users directly in the Juju model, and - as shown in the following - rotate user credentials with and without application downtime using Relations. -> Relations, or what Juju documentation describes also as [Integrations](https://juju.is/docs/sdk/integration), let two charms to exchange information and interact with one another. Creating a relation between Kafka and the Data Integrator will automatically generate a username, password, and assign read/write permissions on a given topic. This is the simplest method to create and manage users in Charmed Apache Kafka. +> Relations, or what Juju documentation describes also as [Integrations](https://juju.is/docs/sdk/integration), let two charms to exchange information and interact with one another. Creating a relation between Apache Kafka and the Data Integrator will automatically generate a username, password, and assign read/write permissions on a given topic. This is the simplest method to create and manage users in Charmed Apache Kafka. ### Data Integrator charm -The [Data Integrator charm](https://charmhub.io/data-integrator) is a bare-bones charm for central management of database users, providing support for different kinds of data platforms (e.g. MongoDB, MySQL, PostgreSQL, Kafka, OpenSearch, etc.) with a consistent, opinionated and robust user experience. To deploy the Data Integrator charm we can use the command `juju deploy` we have learned above: +The [Data Integrator charm](https://charmhub.io/data-integrator) is a bare-bones charm for central management of database users, providing support for different kinds of data platforms (e.g. MongoDB, MySQL, PostgreSQL, Apache Kafka, OpenSearch, etc.) with a consistent, opinionated and robust user experience. To deploy the Data Integrator charm we can use the command `juju deploy` we have learned above: ```shell juju deploy data-integrator --channel stable --config topic-name=test-topic --config extra-user-roles=producer,consumer @@ -21,9 +21,9 @@ Located charm "data-integrator" in charm-hub, revision 11 Deploying "data-integrator" from charm-hub charm "data-integrator", revision 11 in channel stable on jammy ``` -### Relate to Kafka +### Relate to Apache Kafka -Now that the Database Integrator Charm has been set up, we can relate it to Kafka. This will automatically create a username, password, and database for the Database Integrator Charm. Relate the two applications with: +Now that the Database Integrator Charm has been set up, we can relate it to Apache Kafka. This will automatically create a username, password, and database for the Database Integrator Charm. Relate the two applications with: ```shell juju relate data-integrator kafka @@ -87,7 +87,7 @@ Save the value listed under `bootstrap-server`, `username` and `password`. *(Not ### Produce/consume messages -We will now use the username and password to produce some messages to Kafka. To do so, we will first deploy the Kafka Test App (available [here](https://charmhub.io/kafka-test-app)): a test charm that also bundles some python scripts to push data to Kafka, e.g. +We will now use the username and password to produce some messages to Apache Kafka. To do so, we will first deploy the [Apache Kafka Test App](https://charmhub.io/kafka-test-app): a test charm that also bundles some python scripts to push data to Apache Kafka, e.g. ```shell juju deploy kafka-test-app -n1 --channel edge @@ -169,25 +169,27 @@ python3 -m charms.kafka.v0.client \ ### Charm client applications -Actually, the Data Integrator is only a very special client charm, that implements the `kafka_client` relation for exchanging data with the Kafka charm and user management via relations. +Actually, the Data Integrator is only a very special client charm, that implements the `kafka_client` relation for exchanging data with the Apache Kafka charm and user management via relations. -For example, the steps above for producing and consuming messages to Kafka have also been implemented in the `kafka-test-app` charm (that also implement the `kafka_client` relation) providing a fully integrated charmed user-experience, where producing/consuming messages can simply be achieved using relations. +For example, the steps above for producing and consuming messages to Apache Kafka have also been implemented in the `kafka-test-app` charm (that also implement the `kafka_client` relation) providing a fully integrated charmed user-experience, where producing/consuming messages can simply be achieved using relations. #### Producing messages -To produce messages to Kafka, we need to configure the `kafka-test-app` to act as a producer, publishing messages to a specific topic: +To produce messages to Apache Kafka, we need to configure the `kafka-test-app` to act as a producer, publishing messages to a specific topic: ```shell juju config kafka-test-app topic_name=test_kafka_app_topic role=producer num_messages=20 ``` -To start producing messages to Kafka, we **JUST** simply relate the Kafka Test App with Kafka +To start producing messages to Apache Kafka, we **JUST** simply relate the Apache Kafka Test App with Apache Kafka ```shell juju relate kafka-test-app kafka ``` -> **Note**: This will both take care of creating a dedicated user (as much as done for the data-integrator) as well as start a producer process publishing messages to the `test_kafka_app_topic` topic, basically automating what was done before by hands. +[note] +This will both take care of creating a dedicated user (as much as done for the data-integrator) as well as start a producer process publishing messages to the `test_kafka_app_topic` topic, basically automating what was done before by hands. +[/note] After some time, the `juju status` output should show @@ -226,8 +228,8 @@ Note that the `kafka-test-app` charm can also similarly be used to consume messa juju config kafka-test-app topic_name=test_kafka_app_topic role=consumer consumer_group_prefix=cg ``` -After configuring the Kafka Test App, just relate it again with the Kafka charm. This will again create a new user and start the consumer process. +After configuring the Apache Kafka Test App, just relate it again with the Apache Kafka charm. This will again create a new user and start the consumer process. ## What's next? -In the next section, we will learn how to rotate and manage the passwords for the Kafka users, both the admin one and the ones managed by the Data Integrator. \ No newline at end of file +In the next section, we will learn how to rotate and manage the passwords for the Apache Kafka users, both the admin one and the ones managed by the Data Integrator. \ No newline at end of file diff --git a/docs/tutorial/t-setup-environment.md b/docs/tutorial/t-setup-environment.md index 0d2d8035..7d8ea064 100644 --- a/docs/tutorial/t-setup-environment.md +++ b/docs/tutorial/t-setup-environment.md @@ -4,7 +4,7 @@ This is part of the [Charmed Apache Kafka Tutorial](/t/charmed-kafka-tutorial-ov For this tutorial, we will need to setup the environment with two main components: * LXD that is a simple and lightweight virtual machine provisioner -* Juju that will help us to deploy and manage Kafka and related applications +* Juju that will help us to deploy and manage Apache Kafka and related applications ### Prepare LXD diff --git a/metadata.yaml b/metadata.yaml index c31e669f..c6a15364 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -3,11 +3,11 @@ name: kafka display-name: Apache Kafka description: | - Kafka is an event streaming platform. This charm deploys and operates Kafka on + Apache Kafka is an event streaming platform. This charm deploys and operates Apache Kafka on a VM machines environment. Apache Kafka is a free, open source software project by the Apache Software Foundation. - Users can find out more at the [Kafka project page](https://kafka.apache.org/). + Users can find out more at the [Apache Kafka project page](https://kafka.apache.org/). summary: Charmed Apache Kafka Operator docs: https://discourse.charmhub.io/t/charmed-kafka-documentation/10288 source: https://github.com/canonical/kafka-operator diff --git a/src/alert_rules/prometheus/kafka_metrics.rules b/src/alert_rules/prometheus/kafka_metrics.rules index 57de7025..bc985cc6 100644 --- a/src/alert_rules/prometheus/kafka_metrics.rules +++ b/src/alert_rules/prometheus/kafka_metrics.rules @@ -11,7 +11,7 @@ groups: severity: critical annotations: summary: Prometheus target missing (instance {{ $labels.instance }}) - description: "Kafka target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + description: "Apache Kafka target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - alert: JvmMemory Filling Up expr: (sum by (instance)(jvm_memory_bytes_used{area="heap",juju_charm!=".*"}) / sum by (instance)(jvm_memory_bytes_max{area="heap",juju_charm!=".*"})) * 100 > 80 @@ -63,7 +63,7 @@ groups: severity: warning annotations: summary: 'Broker {{ $labels.instance }} :: Broker State :: The ZooKeeper session has expired.' - description: 'When a session expires, we can have leader changes and even a new controller. It is important to keep an eye on the number of such events across a Kafka cluster and if the overall number is high.' + description: 'When a session expires, we can have leader changes and even a new controller. It is important to keep an eye on the number of such events across an Apache Kafka cluster and if the overall number is high.' # ========================= # Controller and Partitions diff --git a/src/grafana_dashboards/kafka-metrics.json b/src/grafana_dashboards/kafka-metrics.json index 462b7c77..3996cafb 100644 --- a/src/grafana_dashboards/kafka-metrics.json +++ b/src/grafana_dashboards/kafka-metrics.json @@ -58,7 +58,7 @@ } ] }, - "description": "Dashboard for Kafka metrics based on jmx_exporter prometheus", + "description": "Dashboard for Apache Kafka metrics based on jmx_exporter prometheus", "editable": true, "gnetId": 11962, "graphTooltip": 0, @@ -2635,7 +2635,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "ZooKeeper Disconnects Per Sec", + "title": "Apache ZooKeeper Disconnects Per Sec", "tooltip": { "shared": true, "sort": 0, @@ -2739,7 +2739,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "ZooKeeper Expires Per Sec", + "title": "Apache ZooKeeper Expires Per Sec", "tooltip": { "shared": true, "sort": 0, @@ -2845,7 +2845,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "ZooKeeper Request Latency Ms", + "title": "Apache ZooKeeper Request Latency Ms", "tooltip": { "shared": true, "sort": 0, @@ -4511,7 +4511,7 @@ ] }, "timezone": "", - "title": "Kafka Metrics", + "title": "Apache Kafka Metrics", "uid": "5nhADrDWk", "version": 10 } diff --git a/tests/integration/app-charm/actions.yaml b/tests/integration/app-charm/actions.yaml index a8949872..6c8a0064 100644 --- a/tests/integration/app-charm/actions.yaml +++ b/tests/integration/app-charm/actions.yaml @@ -9,7 +9,7 @@ run-mtls-producer: params: bootstrap-server: type: string - description: The address for mtls Kafka + description: The address for mtls Apache Kafka broker-ca: type: string description: The CA used for broker identity from certificates relation @@ -22,11 +22,11 @@ get-offsets: params: bootstrap-server: type: string - description: The address for mtls Kafka + description: The address for mtls Apache Kafka create-topic: description: Attempts the configured topic params: bootstrap-server: type: string - description: The address for SASL_PLAINTEXT Kafka + description: The address for SASL_PLAINTEXT Apache Kafka diff --git a/tests/integration/app-charm/config.yaml b/tests/integration/app-charm/config.yaml index b8665226..ed4d5087 100644 --- a/tests/integration/app-charm/config.yaml +++ b/tests/integration/app-charm/config.yaml @@ -1,6 +1,6 @@ options: topic-name: description: | - The topic-name to request when relating to the Kafka application + The topic-name to request when relating to the Apache Kafka application type: string default: test-topic diff --git a/tests/integration/app-charm/metadata.yaml b/tests/integration/app-charm/metadata.yaml index 9d9af6bf..4ba32c93 100644 --- a/tests/integration/app-charm/metadata.yaml +++ b/tests/integration/app-charm/metadata.yaml @@ -3,7 +3,7 @@ name: application description: | - Dummy charm used in integration tests for Kafka. + Dummy charm used in integration tests for Apache Kafka. summary: | Dummy charm application meant to be used only for testing of the libs in this repository.