diff --git a/.gitignore b/.gitignore index 7318728cc..9d1d59e5d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ \#* *.prof tmp-build -.DS_Store \ No newline at end of file +.DS_Store +.idea \ No newline at end of file diff --git a/.semaphore/project.yml b/.semaphore/project.yml new file mode 100644 index 000000000..72ede6a3a --- /dev/null +++ b/.semaphore/project.yml @@ -0,0 +1,23 @@ +apiVersion: v1alpha +kind: Project +metadata: + name: confluent-kafka-go + description: "" +spec: + visibility: private + repository: + url: git@github.com:confluentinc/confluent-kafka-go.git + run_on: + - branches + - tags + - pull_requests + pipeline_file: .semaphore/semaphore.yml + integration_type: github_app + status: + pipeline_files: + - path: .semaphore/semaphore.yml + level: pipeline + whitelist: + branches: + - master + - "/^v\\d+\\.\\d+\\.x$/" \ No newline at end of file diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml new file mode 100644 index 000000000..6648f62e9 --- /dev/null +++ b/.semaphore/semaphore.yml @@ -0,0 +1,81 @@ +version: v1.0 +name: build-test-release +agent: + machine: + type: e1-standard-4 + os_image: ubuntu1804 + +auto_cancel: + running: + when: "branch != 'master'" + +execution_time_limit: + hours: 1 + +global_job_config: + secrets: + - name: vault_sem2_approle + prologue: + commands: + - chmod 400 ~/.ssh/id_rsa + - sem-version go 1.18 + - export "GOPATH=$(go env GOPATH)" + - export "SEMAPHORE_GIT_DIR=${GOPATH}/src/github.com/confluentinc/${SEMAPHORE_PROJECT_NAME}" + - export "PATH=${GOPATH}/bin:${PATH}" + - mkdir -vp "${SEMAPHORE_GIT_DIR}" "${GOPATH}/bin" + - git config --global url."git@github.com:".insteadOf "https://github.com/" + - sem-service start postgres 13 + # This username/password is just to create test db to run tests. DB will be destroyed when build finishes. + - psql -U postgres -h localhost -c "CREATE USER runner WITH PASSWORD 'semaphoredb';" + - psql -U postgres -h localhost -c "ALTER USER runner WITH SUPERUSER;" + - export ADMIN_DB_URL=postgres://runner:semaphoredb@localhost:5432 + # Let the app set the default but don't let cc-go.mk sets the default + - export DB_URL="" + - sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + - sudo apt-get update + - sudo apt-get -y install postgresql-client-13 + - checkout + - make install-vault + - . mk-include/bin/vault-setup + - . vault-sem-get-secret gitconfig + - . vault-sem-get-secret ssh_id_rsa + - . vault-sem-get-secret ssh_config + - . vault-sem-get-secret netrc + - . vault-sem-get-secret artifactory-docker-helm + - . vault-sem-get-secret maven-settings + - . vault-sem-get-secret cpd_gcloud + - . vault-sem-get-secret aws_credentials + - . vault-sem-get-secret testbreak-reporting + - . vault-sem-get-secret v1/ci/kv/service-foundations/cc-mk-include + - exec &> >(tee -a build.log) + - make init-ci + epilogue: + always: + commands: + - make epilogue-ci + +blocks: + - name: "Build, Test, Release" + run: + # don't run the build or unit tests on non-functional changes... + when: "change_in('/', {exclude: ['/.deployed-versions/', '.github/']})" + task: + # You can customize your CI job here +# env_vars: +# # custom env_vars +# prologue: +# commands: +# # custom vault secrets +# # custom prologue commands + jobs: + - name: "Build, Test, Release" + commands: + - make build + - make test + - make release-ci + epilogue: + always: + commands: + - make epilogue-ci + - make testbreak-after diff --git a/.travis.yml b/.travis.yml index eb94cd239..b552c2924 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,23 +12,14 @@ jobs: os: osx env: - EXPECT_LINK_INFO="static" - - name: "Go 1.14 OSX bundled librdkafka" - go: "1.14" - os: osx - env: - - EXPECT_LINK_INFO="static" - name: "Go 1.16 Linux bundled librdkafka" go: "1.16" os: linux env: - EXPECT_LINK_INFO="static" - - name: "Go 1.14 Linux bundled librdkafka" - go: "1.14" - os: linux - env: - - EXPECT_LINK_INFO="static" - - name: "Go 1.14 OSX dynamic librdkafka" - go: "1.14" + - name: "Go 1.16 OSX dynamic librdkafka" + if: tag is present + go: "1.16" os: osx env: - EXPECT_LINK_INFO="dynamic" @@ -37,8 +28,9 @@ jobs: - LD_LIBRARY_PATH="$HOME/gopath/src/github.com/confluentinc/confluent-kafka-go/tmp-build/lib" - DYLD_LIBRARY_PATH="$HOME/gopath/src/github.com/confluentinc/confluent-kafka-go/tmp-build/lib" - LIBRDKAFKA_VERSION=master - - name: "Go 1.14 Linux dynamic librdkafka" - go: "1.14" + - name: "Go 1.16 Linux dynamic librdkafka" + if: tag is present + go: "1.16" os: linux env: - EXPECT_LINK_INFO="dynamic" @@ -52,11 +44,6 @@ jobs: os: windows env: - EXPECT_LINK_INFO="static" - - name: "Go 1.14 Windows bundled librdkafka" - go: "1.14" - os: windows - env: - - EXPECT_LINK_INFO="static" before_install: - if [[ $TRAVIS_OS_NAME == linux ]]; then wget -qO - https://packages.confluent.io/deb/5.4/archive.key | sudo apt-key add - ; fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 97aa2ae0b..2ba9a5e13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,47 @@ # Confluent's Golang client for Apache Kafka -## v1.7.0 +## v1.9.0 -confluent-kafka-go is based on librdkafka v1.7.0, see the -[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.7.0) +This is a feature release: + + * OAUTHBEARER OIDC support + * KIP-140 Admin API ACL support + * Added MockCluster for functional testing of applications without the need + for a real Kafka cluster (by @SourceFellows and @kkoehler, #729). + See [examples/mock_cluster](examples/mock_cluster). + + +### Fixes + + * Fix Rebalance events behavior for static membership (@jliunyu, #757, + #798). + * Fix consumer close taking 10 seconds when there's no rebalance + needed (@jliunyu, #757). + +confluent-kafka-go is based on librdkafka v1.9.0, see the +[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.9.0) for a complete list of changes, enhancements, fixes and upgrade considerations. + +## v1.8.2 + +This is a maintenance release: + + * Bundles librdkafka v1.8.2 + * Check termination channel while reading delivery reports (by @zjj) + * Added convenience method Consumer.StoreMessage() (@finncolman, #676) + + +confluent-kafka-go is based on librdkafka v1.8.2, see the +[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.8.2) +for a complete list of changes, enhancements, fixes and upgrade considerations. + + +**Note**: There were no confluent-kafka-go v1.8.0 and v1.8.1 releases. + + +## v1.7.0 + ### Enhancements * Experimental Windows support (by @neptoess). @@ -22,6 +58,10 @@ for a complete list of changes, enhancements, fixes and upgrade considerations. ReplicationFactor without specifying an explicit ReplicaAssignment, this is now fixed. +confluent-kafka-go is based on librdkafka v1.7.0, see the +[librdkafka release notes](https://github.com/edenhill/librdkafka/releases/tag/v1.7.0) +for a complete list of changes, enhancements, fixes and upgrade considerations. + ## v1.6.1 diff --git a/README.md b/README.md index 86490883e..a5ab9896f 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,9 @@ for the balanced consumer groups of Apache Kafka 0.9 and above. See the [API documentation](http://docs.confluent.io/current/clients/confluent-kafka-go/index.html) for more information. +For a step-by-step guide on using the client see [Getting Started with Apache Kafka and Golang](https://developer.confluent.io/get-started/go/). + + Examples ======== @@ -123,7 +126,7 @@ for use with [Confluent Cloud](https://www.confluent.io/confluent-cloud/). Getting Started =============== -Supports Go 1.11+ and librdkafka 1.6.0+. +Supports Go 1.11+ and librdkafka 1.9.0+. Using Go Modules ---------------- @@ -155,17 +158,14 @@ your `go.mod` file. Install the client ------------------ -If Go modules can't be used we recommend that you version pin the -confluent-kafka-go import to `v1` using gopkg.in: - Manual install: ```bash -go get -u gopkg.in/confluentinc/confluent-kafka-go.v1/kafka +go get -u github.com/confluentinc/confluent-kafka-go/kafka ``` Golang import: ```golang -import "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" +import "github.com/confluentinc/confluent-kafka-go/kafka" ``` librdkafka @@ -218,87 +218,34 @@ with `-tags dynamic`. **Note:** If you use the `master` branch of the Go client, then you need to use the `master` branch of librdkafka. -**confluent-kafka-go requires librdkafka v1.6.0 or later.** +**confluent-kafka-go requires librdkafka v1.9.0 or later.** API Strands =========== -There are two main API strands: function and channel-based. +The recommended API strand is the Function-Based one, +the Channel-Based one is documented in [examples/legacy](examples/legacy). Function-Based Consumer ----------------------- Messages, errors and events are polled through the `consumer.Poll()` function. -Pros: - - * More direct mapping to underlying librdkafka functionality. - -Cons: - - * Makes it harder to read from multiple channels, but a go-routine easily - solves that (see Cons in channel-based consumer below about outdated events). - * Slower than the channel consumer. +It has direct mapping to underlying librdkafka functionality. See [examples/consumer_example](examples/consumer_example) -Channel-Based Consumer (deprecated) ------------------------------------ - -*Deprecated*: The channel-based consumer is deprecated due to the channel issues - mentioned below. Use the function-based consumer. - -Messages, errors and events are posted on the `consumer.Events()` channel -for the application to read. - -Pros: - - * Possibly more Golang:ish - * Makes reading from multiple channels easy - * Fast - -Cons: - - * Outdated events and messages may be consumed due to the buffering nature - of channels. The extent is limited, but not remedied, by the Events channel - buffer size (`go.events.channel.size`). - -See [examples/consumer_channel_example](examples/consumer_channel_example) - -Channel-Based Producer ----------------------- - -Application writes messages to the `producer.ProducerChannel()`. -Delivery reports are emitted on the `producer.Events()` or specified private channel. - -Pros: - - * Go:ish - * Proper channel backpressure if librdkafka internal queue is full. - -Cons: - - * Double queueing: messages are first queued in the channel (size is configurable) - and then inside librdkafka. - -See [examples/producer_channel_example](examples/producer_channel_example) - Function-Based Producer ----------------------- Application calls `producer.Produce()` to produce messages. Delivery reports are emitted on the `producer.Events()` or specified private channel. -Pros: - - * Go:ish - -Cons: +_Warnings_ * `Produce()` is a non-blocking call, if the internal librdkafka queue is full - the call will fail. - * Somewhat slower than the channel producer. + the call will fail and can be retried. See [examples/producer_example](examples/producer_example) @@ -319,3 +266,8 @@ See [kafka/README](kafka/README.md) Contributions to the code, examples, documentation, et.al, are very much appreciated. Make your changes, run `gofmt`, tests, etc, push your branch, create a PR, and [sign the CLA](http://clabot.confluent.io/cla). + +Confluent Cloud +=============== + +For a step-by-step guide on using the Golang client with Confluent Cloud see [Getting Started with Apache Kafka and Golang](https://developer.confluent.io/get-started/go/) on [Confluent Developer](https://developer.confluent.io/). diff --git a/examples/.gitignore b/examples/.gitignore index 408c6ca7b..53c9c6450 100644 --- a/examples/.gitignore +++ b/examples/.gitignore @@ -1,10 +1,21 @@ -consumer_channel_example/consumer_channel_example +admin_create_acls/admin_create_acls +admin_create_topic/admin_create_topic +admin_delete_acls/admin_delete_acls +admin_delete_topics/admin_delete_topics +admin_describe_acls/admin_describe_acls +admin_describe_config/admin_describe_config +confluent_cloud_example/confluent_cloud_example consumer_example/consumer_example consumer_offset_metadata/consumer_offset_metadata -producer_channel_example/producer_channel_example -producer_example/producer_example +cooperative_consumer_example/cooperative_consumer_example go-kafkacat/go-kafkacat -admin_describe_config/admin_describe_config -admin_delete_topics/admin_delete_topics -admin_create_topic/admin_create_topic +idempotent_producer_example/idempotent_producer_example +legacy/consumer_channel_example/consumer_channel_example +legacy/producer_channel_example/producer_channel_example +library-version/library-version +mockcluster_example/mockcluster_example +oauthbearer_example/oauthbearer_example +producer_custom_channel_example/producer_custom_channel_example +producer_example/producer_example stats_example/stats_example +transactions_example/transactions_example diff --git a/examples/README.md b/examples/README.md index 98776bc34..c0ad634e5 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,24 +1,51 @@ -Examples: +Examples +-------- - consumer_channel_example - Channel based consumer - consumer_example - Function & callback based consumer - consumer_offset_metadata - Commit offset with metadata + [admin_create_acls](admin_create_acls) - Create Access Control Lists + + [admin_create_topic](admin_create_topic) - Create a topic - producer_channel_example - Channel based producer - producer_example - Function based producer + [admin_delete_acls](admin_delete_acls) - Delete Access Control Lists using different filters + + [admin_delete_topics](admin_delete_topics) - Delete some topics + + [admin_describe_acls](admin_describe_acls) - Find Access Control Lists using a filter + + [admin_describe_config](admin_describe_config) - Describe broker, topic or group configs + + [consumer_example](consumer_example) - Function & callback based consumer + + [consumer_offset_metadata](consumer_offset_metadata) - Commit offset with metadata + + [cooperative_consumer_example](cooperative_consumer_example) - Using the cooperative incremental rebalancing protocol - transactions_example - Showcasing a transactional consume-process-produce application + [confluent_cloud_example](confluent_cloud_example) - Usage example with Confluent Cloud - go-kafkacat - Channel based kafkacat Go clone + [go-kafkacat](go-kafkacat) - Channel based kafkacat Go clone - oauthbearer_example - Provides unsecured SASL/OAUTHBEARER example + [idempotent_producer_example](idempotent_producer_example) - Idempotent producer + + [legacy](legacy) - Legacy examples + + [library-version](library-version) - Show the library version + [mockcluster_example](mockcluster_example) - Use a mock cluster for testing -Usage example: + [oauthbearer_example](oauthbearer_example) - Provides unsecured SASL/OAUTHBEARER example + + [producer_custom_channel_example](producer_custom_channel_example) - Function based producer with a custom delivery channel + + [producer_example](producer_example) - Function based producer + + [stats_example](stats_example) - Receiving stats events + + [transactions_example](transactions_example) - Showcasing a transactional consume-process-produce application + +Usage example +------------- $ cd consumer_example $ go build (or 'go install') $ ./consumer_example # see usage $ ./consumer_example mybroker mygroup mytopic - diff --git a/examples/admin_create_acls/admin_create_acls.go b/examples/admin_create_acls/admin_create_acls.go new file mode 100644 index 000000000..71b8bbf82 --- /dev/null +++ b/examples/admin_create_acls/admin_create_acls.go @@ -0,0 +1,147 @@ +// Create ACLs +package main + +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +// Parses a list of 7n arguments to a slice of n ACLBinding +func parseACLBindings(args []string) (aclBindings kafka.ACLBindings, err error) { + nACLBindings := len(args) / 7 + parsedACLBindings := make(kafka.ACLBindings, nACLBindings) + + for i := 0; i < nACLBindings; i++ { + start := i * 7 + resourceTypeString := args[start] + name := args[start+1] + resourcePatternTypeString := args[start+2] + principal := args[start+3] + host := args[start+4] + operationString := args[start+5] + permissionTypeString := args[start+6] + + resourceType, errParse := kafka.ResourceTypeFromString(resourceTypeString) + if errParse != nil { + err = errParse + fmt.Printf("Invalid resource type: %s: %v\n", resourceTypeString, err) + return + } + + resourcePatternType, errParse := kafka.ResourcePatternTypeFromString(resourcePatternTypeString) + if errParse != nil { + err = errParse + fmt.Printf("Invalid resource pattern type: %s: %v\n", resourcePatternTypeString, err) + return + } + + operation, errParse := kafka.ACLOperationFromString(operationString) + if errParse != nil { + err = errParse + fmt.Printf("Invalid operation: %s: %v\n", operationString, err) + return + } + + permissionType, errParse := kafka.ACLPermissionTypeFromString(permissionTypeString) + if errParse != nil { + err = errParse + fmt.Printf("Invalid permission type: %s: %v\n", permissionTypeString, err) + return + } + + parsedACLBindings[i] = kafka.ACLBinding{ + Type: resourceType, + Name: name, + ResourcePatternType: resourcePatternType, + Principal: principal, + Host: host, + Operation: operation, + PermissionType: permissionType, + } + } + aclBindings = parsedACLBindings + return +} + +func main() { + + // 2 + 7n arguments to create n ACL bindings + nArgs := len(os.Args) + aclBindingArgs := nArgs - 2 + if aclBindingArgs <= 0 || aclBindingArgs%7 != 0 { + fmt.Fprintf(os.Stderr, + "Usage: %s "+ + " ...\n", + os.Args[0]) + os.Exit(1) + } + + bootstrapServers := os.Args[1] + aclBindings, err := parseACLBindings(os.Args[2:]) + if err != nil { + os.Exit(1) + } + + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + os.Exit(1) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create ACLs on cluster. + // Set Admin options to wait for the request to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + panic("ParseDuration(60s)") + } + results, err := a.CreateACLs( + ctx, + aclBindings, + kafka.SetAdminRequestTimeout(maxDur), + ) + if err != nil { + fmt.Printf("Failed to create ACLs: %v\n", err) + os.Exit(1) + } + + // Print results + for i, result := range results { + if result.Error.Code() == kafka.ErrNoError { + fmt.Printf("CreateACLs %d successful\n", i) + } else { + fmt.Printf("CreateACLs %d failed, error code: %s, message: %s\n", + i, result.Error.Code(), result.Error.String()) + } + } + + a.Close() +} diff --git a/examples/admin_create_topic/admin_create_topic.go b/examples/admin_create_topic/admin_create_topic.go index 3acda269d..864e4c6be 100644 --- a/examples/admin_create_topic/admin_create_topic.go +++ b/examples/admin_create_topic/admin_create_topic.go @@ -20,22 +20,23 @@ package main import ( "context" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "strconv" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) != 5 { fmt.Fprintf(os.Stderr, - "Usage: %s \n", + "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] topic := os.Args[2] numParts, err := strconv.Atoi(os.Args[3]) if err != nil { @@ -52,7 +53,7 @@ func main() { // AdminClient can also be instantiated using an existing // Producer or Consumer instance, see NewAdminClientFromProducer and // NewAdminClientFromConsumer. - a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": broker}) + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) if err != nil { fmt.Printf("Failed to create Admin client: %s\n", err) os.Exit(1) diff --git a/examples/admin_delete_acls/admin_delete_acls.go b/examples/admin_delete_acls/admin_delete_acls.go new file mode 100644 index 000000000..11802090f --- /dev/null +++ b/examples/admin_delete_acls/admin_delete_acls.go @@ -0,0 +1,147 @@ +// Delete ACLs +package main + +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +// Parses a list of 7n arguments to a slice of n ACLBindingFilter +func parseACLBindingFilters(args []string) (aclBindingFilters kafka.ACLBindingFilters, err error) { + nACLBindingFilters := len(args) / 7 + parsedACLBindingFilters := make(kafka.ACLBindingFilters, nACLBindingFilters) + + for i := 0; i < nACLBindingFilters; i++ { + start := i * 7 + resourceTypeString := args[start] + name := args[start+1] + resourcePatternTypeString := args[start+2] + principal := args[start+3] + host := args[start+4] + operationString := args[start+5] + permissionTypeString := args[start+6] + + var resourceType kafka.ResourceType + var resourcePatternType kafka.ResourcePatternType + var operation kafka.ACLOperation + var permissionType kafka.ACLPermissionType + + resourceType, err = kafka.ResourceTypeFromString(resourceTypeString) + if err != nil { + fmt.Printf("Invalid resource type: %s: %v\n", resourceTypeString, err) + return + } + resourcePatternType, err = kafka.ResourcePatternTypeFromString(resourcePatternTypeString) + if err != nil { + fmt.Printf("Invalid resource pattern type: %s: %v\n", resourcePatternTypeString, err) + return + } + + operation, err = kafka.ACLOperationFromString(operationString) + if err != nil { + fmt.Printf("Invalid operation: %s: %v\n", operationString, err) + return + } + + permissionType, err = kafka.ACLPermissionTypeFromString(permissionTypeString) + if err != nil { + fmt.Printf("Invalid permission type: %s: %v\n", permissionTypeString, err) + return + } + + parsedACLBindingFilters[i] = kafka.ACLBindingFilter{ + Type: resourceType, + Name: name, + ResourcePatternType: resourcePatternType, + Principal: principal, + Host: host, + Operation: operation, + PermissionType: permissionType, + } + } + aclBindingFilters = parsedACLBindingFilters + return +} + +func main() { + + // 2 + 7n arguments to create n ACL binding filters + nArgs := len(os.Args) + aclBindingFilterArgs := nArgs - 2 + if aclBindingFilterArgs <= 0 || aclBindingFilterArgs%7 != 0 { + fmt.Fprintf(os.Stderr, + "Usage: %s "+ + " ...\n", + os.Args[0]) + os.Exit(1) + } + + bootstrapServers := os.Args[1] + aclBindingFilters, err := parseACLBindingFilters(os.Args[2:]) + if err != nil { + os.Exit(1) + } + + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + os.Exit(1) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create ACLs on cluster. + // Set Admin options to wait for the request to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + panic("ParseDuration(60s)") + } + results, err := a.DeleteACLs( + ctx, + aclBindingFilters, + kafka.SetAdminRequestTimeout(maxDur), + ) + if err != nil { + fmt.Printf("Failed to delete ACLs: %v\n", err) + os.Exit(1) + } + + // Print results + for i, result := range results { + if result.Error.Code() == kafka.ErrNoError { + fmt.Printf("DeleteACLs %d successful, deleted: %+v\n", i, result.ACLBindings) + } else { + fmt.Printf("DeleteACLs %d failed, error code: %s, message: %s\n", + i, result.Error.Code(), result.Error.String()) + } + } + + a.Close() +} diff --git a/examples/admin_delete_topics/admin_delete_topics.go b/examples/admin_delete_topics/admin_delete_topics.go index ac8120694..e944fe8e0 100644 --- a/examples/admin_delete_topics/admin_delete_topics.go +++ b/examples/admin_delete_topics/admin_delete_topics.go @@ -20,28 +20,29 @@ package main import ( "context" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) < 3 { fmt.Fprintf(os.Stderr, - "Usage: %s ..\n", + "Usage: %s ..\n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] topics := os.Args[2:] // Create a new AdminClient. // AdminClient can also be instantiated using an existing // Producer or Consumer instance, see NewAdminClientFromProducer and // NewAdminClientFromConsumer. - a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": broker}) + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) if err != nil { fmt.Printf("Failed to create Admin client: %s\n", err) os.Exit(1) diff --git a/examples/admin_describe_acls/admin_describe_acls.go b/examples/admin_describe_acls/admin_describe_acls.go new file mode 100644 index 000000000..9a52bef4e --- /dev/null +++ b/examples/admin_describe_acls/admin_describe_acls.go @@ -0,0 +1,145 @@ +// Describe ACLs +package main + +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +// Parses a list of 7n arguments to a slice of n ACLBindingFilter +func parseACLBindingFilters(args []string) (aclBindingFilters kafka.ACLBindingFilters, err error) { + nACLBindingFilters := len(args) / 7 + parsedACLBindingFilters := make(kafka.ACLBindingFilters, nACLBindingFilters) + + for i := 0; i < nACLBindingFilters; i++ { + start := i * 7 + resourceTypeString := args[start] + name := args[start+1] + resourcePatternTypeString := args[start+2] + principal := args[start+3] + host := args[start+4] + operationString := args[start+5] + permissionTypeString := args[start+6] + + var resourceType kafka.ResourceType + var resourcePatternType kafka.ResourcePatternType + var operation kafka.ACLOperation + var permissionType kafka.ACLPermissionType + + resourceType, err = kafka.ResourceTypeFromString(resourceTypeString) + if err != nil { + fmt.Printf("Invalid resource type: %s: %v\n", resourceTypeString, err) + return + } + resourcePatternType, err = kafka.ResourcePatternTypeFromString(resourcePatternTypeString) + if err != nil { + fmt.Printf("Invalid resource pattern type: %s: %v\n", resourcePatternTypeString, err) + return + } + + operation, err = kafka.ACLOperationFromString(operationString) + if err != nil { + fmt.Printf("Invalid operation: %s: %v\n", operationString, err) + return + } + + permissionType, err = kafka.ACLPermissionTypeFromString(permissionTypeString) + if err != nil { + fmt.Printf("Invalid permission type: %s: %v\n", permissionTypeString, err) + return + } + + parsedACLBindingFilters[i] = kafka.ACLBindingFilter{ + Type: resourceType, + Name: name, + ResourcePatternType: resourcePatternType, + Principal: principal, + Host: host, + Operation: operation, + PermissionType: permissionType, + } + } + aclBindingFilters = parsedACLBindingFilters + return +} + +func main() { + + // 2 + 7 arguments to create an ACL binding filter + nArgs := len(os.Args) + aclBindingFilterArgs := nArgs - 2 + if aclBindingFilterArgs != 7 { + fmt.Fprintf(os.Stderr, + "Usage: %s "+ + " ...\n", + os.Args[0]) + os.Exit(1) + } + + bootstrapServers := os.Args[1] + aclBindingFilters, err := parseACLBindingFilters(os.Args[2:]) + if err != nil { + os.Exit(1) + } + + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + os.Exit(1) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Describe ACLs on cluster. + // Set Admin options to wait for the request to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + panic("ParseDuration(60s)") + } + result, err := a.DescribeACLs( + ctx, + aclBindingFilters[0], + kafka.SetAdminRequestTimeout(maxDur), + ) + if err != nil { + fmt.Printf("Failed to describe ACLs: %v\n", err) + os.Exit(1) + } + + // Print results + if result.Error.Code() == kafka.ErrNoError { + fmt.Printf("DescribeACLs successful, result: %+v\n", result.ACLBindings) + } else { + fmt.Printf("DescribeACLs failed, error code: %s, message: %s\n", + result.Error.Code(), result.Error.String()) + } + + a.Close() +} diff --git a/examples/admin_describe_config/admin_describe_config.go b/examples/admin_describe_config/admin_describe_config.go index 3963584a3..f4e08f296 100644 --- a/examples/admin_describe_config/admin_describe_config.go +++ b/examples/admin_describe_config/admin_describe_config.go @@ -20,25 +20,26 @@ package main import ( "context" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) != 4 { fmt.Fprintf(os.Stderr, - "Usage: %s \n"+ + "Usage: %s \n"+ "\n"+ - " - CSV list of bootstrap brokers\n"+ + " - CSV list of bootstrap brokers\n"+ " - any, broker, topic, group\n"+ " - broker id or topic name\n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] resourceType, err := kafka.ResourceTypeFromString(os.Args[2]) if err != nil { fmt.Printf("Invalid resource type: %s\n", os.Args[2]) @@ -50,7 +51,7 @@ func main() { // AdminClient can also be instantiated using an existing // Producer or Consumer instance, see NewAdminClientFromProducer and // NewAdminClientFromConsumer. - a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": broker}) + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) if err != nil { fmt.Printf("Failed to create Admin client: %s\n", err) os.Exit(1) diff --git a/examples/consumer_example/consumer_example.go b/examples/consumer_example/consumer_example.go index a0157b664..b6f663afd 100644 --- a/examples/consumer_example/consumer_example.go +++ b/examples/consumer_example/consumer_example.go @@ -22,37 +22,40 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" "syscall" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) < 4 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] group := os.Args[2] topics := os.Args[3:] sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) c, err := kafka.NewConsumer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, // Avoid connecting to IPv6 brokers: // This is needed for the ErrAllBrokersDown show-case below // when using localhost brokers on OSX, since the OSX resolver // will return the IPv6 addresses first. // You typically don't need to specify this configuration property. - "broker.address.family": "v4", - "group.id": group, - "session.timeout.ms": 6000, - "auto.offset.reset": "earliest"}) + "broker.address.family": "v4", + "group.id": group, + "session.timeout.ms": 6000, + "auto.offset.reset": "earliest", + "enable.auto.offset.store": false, + }) if err != nil { fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err) @@ -83,6 +86,11 @@ func main() { if e.Headers != nil { fmt.Printf("%% Headers: %v\n", e.Headers) } + _, err := c.StoreMessage(e) + if err != nil { + fmt.Fprintf(os.Stderr, "%% Error storing offset after message %s:\n", + e.TopicPartition) + } case kafka.Error: // Errors should generally be considered // informational, the client will try to diff --git a/examples/consumer_offset_metadata/consumer_offset_metadata.go b/examples/consumer_offset_metadata/consumer_offset_metadata.go index 9f678a6a6..3f059ad0c 100644 --- a/examples/consumer_offset_metadata/consumer_offset_metadata.go +++ b/examples/consumer_offset_metadata/consumer_offset_metadata.go @@ -23,22 +23,23 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "strconv" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) != 7 && len(os.Args) != 5 { fmt.Fprintf(os.Stderr, `Usage: -- commit offset with metadata: %s "" -- show partition offset: %s `, +- commit offset with metadata: %s "" +- show partition offset: %s `, os.Args[0], os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] group := os.Args[2] topic := os.Args[3] partition, err := strconv.Atoi(os.Args[4]) @@ -48,7 +49,7 @@ func main() { } c, err := kafka.NewConsumer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, "group.id": group, }) diff --git a/examples/cooperative_consumer_example/cooperative_consumer_example.go b/examples/cooperative_consumer_example/cooperative_consumer_example.go index d63be047e..2facabfa6 100644 --- a/examples/cooperative_consumer_example/cooperative_consumer_example.go +++ b/examples/cooperative_consumer_example/cooperative_consumer_example.go @@ -21,28 +21,29 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" "syscall" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) < 4 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] group := os.Args[2] topics := os.Args[3:] sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) c, err := kafka.NewConsumer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, // Avoid connecting to IPv6 brokers: // This is needed for the ErrAllBrokersDown show-case below // when using localhost brokers on OSX, since the OSX resolver diff --git a/examples/go-kafkacat/go-kafkacat.go b/examples/go-kafkacat/go-kafkacat.go index 30054fb9b..fbff6b64b 100644 --- a/examples/go-kafkacat/go-kafkacat.go +++ b/examples/go-kafkacat/go-kafkacat.go @@ -20,7 +20,7 @@ package main import ( "bufio" "fmt" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/alecthomas/kingpin" "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" diff --git a/examples/go.mod b/examples/go.mod new file mode 100644 index 000000000..23e5f859f --- /dev/null +++ b/examples/go.mod @@ -0,0 +1,14 @@ +module github.com/confluentinc/confluent-kafka-go/examples + +go 1.13 + +replace github.com/confluentinc/confluent-kafka-go => ../ + +require ( + github.com/alecthomas/kingpin v2.2.6+incompatible + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/confluentinc/confluent-kafka-go v1.9.0 + github.com/gdamore/tcell v1.4.0 + github.com/stretchr/testify v1.7.1 // indirect +) diff --git a/examples/go.sum b/examples/go.sum new file mode 100644 index 000000000..78528af5a --- /dev/null +++ b/examples/go.sum @@ -0,0 +1,31 @@ +github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.4.0 h1:vUnHwJRvcPQa3tzi+0QI4U9JINXYJlOz9yiaiPQ2wMU= +github.com/gdamore/tcell v1.4.0/go.mod h1:vxEiSDZdW3L+Uhjii9c3375IlDmR05bzxY404ZVSMo0= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756 h1:9nuHUbU8dRnRRfj9KjWUVrJeoexdbeMjttk6Oh1rD10= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/examples/idempotent_producer_example/idempotent_producer_example.go b/examples/idempotent_producer_example/idempotent_producer_example.go index a95f551f8..c20f2f969 100644 --- a/examples/idempotent_producer_example/idempotent_producer_example.go +++ b/examples/idempotent_producer_example/idempotent_producer_example.go @@ -29,9 +29,10 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) var run = true @@ -39,16 +40,16 @@ var run = true func main() { if len(os.Args) != 3 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] topic := os.Args[2] p, err := kafka.NewProducer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, // Enable the Idempotent Producer "enable.idempotence": true}) diff --git a/examples/legacy/README.md b/examples/legacy/README.md new file mode 100644 index 000000000..33f72ed23 --- /dev/null +++ b/examples/legacy/README.md @@ -0,0 +1,53 @@ +Legacy examples +=============== + +This directory contains examples for no longer recommended functionality + +Channel-Based Consumer (deprecated) +----------------------------------- + +*Deprecated*: The channel-based consumer is deprecated due to the channel issues + mentioned below. Use the function-based consumer. + +Messages, errors and events are posted on the `consumer.Events()` channel +for the application to read. + +Pros: + + * Possibly more Golang:ish + * Makes reading from multiple channels easy + * Fast + +Cons: + + * Outdated events and messages may be consumed due to the buffering nature + of channels. The extent is limited, but not remedied, by the Events channel + buffer size (`go.events.channel.size`). + +See [consumer_channel_example](consumer_channel_example) + +Channel-Based Producer +---------------------- + +Application writes messages to the `producer.ProducerChannel()`. +Delivery reports are emitted on the `producer.Events()` or specified private channel. + +Pros: + + * Go:ish + * Proper channel backpressure if librdkafka internal queue is full. + +Cons: + + * Double queueing: messages are first queued in the channel (size is configurable) + and then inside librdkafka. + +See [producer_channel_example](producer_channel_example) + +Usage example +------------- + + $ cd consumer_channel_example + $ go build (or 'go install') + $ ./consumer_channel_example # see usage + $ ./consumer_channel_example mybroker mygroup mytopic diff --git a/examples/consumer_channel_example/consumer_channel_example.go b/examples/legacy/consumer_channel_example/consumer_channel_example.go similarity index 93% rename from examples/consumer_channel_example/consumer_channel_example.go rename to examples/legacy/consumer_channel_example/consumer_channel_example.go index a1b2c1ed4..c2304657f 100644 --- a/examples/consumer_channel_example/consumer_channel_example.go +++ b/examples/legacy/consumer_channel_example/consumer_channel_example.go @@ -19,21 +19,22 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" "syscall" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) < 4 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] group := os.Args[2] topics := os.Args[3:] @@ -41,7 +42,7 @@ func main() { signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) c, err := kafka.NewConsumer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, "group.id": group, "session.timeout.ms": 6000, "go.events.channel.enable": true, diff --git a/examples/producer_channel_example/producer_channel_example.go b/examples/legacy/producer_channel_example/producer_channel_example.go similarity index 93% rename from examples/producer_channel_example/producer_channel_example.go rename to examples/legacy/producer_channel_example/producer_channel_example.go index c06dc9251..5b7e18135 100644 --- a/examples/producer_channel_example/producer_channel_example.go +++ b/examples/legacy/producer_channel_example/producer_channel_example.go @@ -19,22 +19,23 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) != 3 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] topic := os.Args[2] - p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker}) + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) if err != nil { fmt.Printf("Failed to create producer: %s\n", err) diff --git a/examples/mockcluster_example/mockcluster.go b/examples/mockcluster_example/mockcluster.go new file mode 100644 index 000000000..4732c98ea --- /dev/null +++ b/examples/mockcluster_example/mockcluster.go @@ -0,0 +1,95 @@ +package main + +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "os" +) + +func main() { + + mockCluster, err := kafka.NewMockCluster(1) + if err != nil { + fmt.Printf("Failed to create MockCluster: %s\n", err) + os.Exit(1) + } + defer mockCluster.Close() + + broker := mockCluster.BootstrapServers() + + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker}) + + if err != nil { + fmt.Printf("Failed to create producer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Producer %v\n", p) + deliveryChan := make(chan kafka.Event) + + topic := "Test" + value := "Hello Go!" + err = p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(value), + Headers: []kafka.Header{{Key: "myTestHeader", Value: []byte("header values are binary")}}, + }, deliveryChan) + + e := <-deliveryChan + m := e.(*kafka.Message) + + if m.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) + } else { + fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + + close(deliveryChan) + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": broker, + "broker.address.family": "v4", + "group.id": "group", + "session.timeout.ms": 6000, + "auto.offset.reset": "earliest"}) + + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err) + os.Exit(1) + } + defer c.Close() + + fmt.Printf("Created Consumer %v\n", c) + + err = c.SubscribeTopics([]string{topic}, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to subscribe to consumer: %s\n", err) + os.Exit(1) + } + + msg, err := c.ReadMessage(-1) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to read message: %s\n", err) + os.Exit(1) + } + + fmt.Println("received message: ", string(msg.Value)) + +} diff --git a/examples/oauthbearer_example/oauthbearer_example.go b/examples/oauthbearer_example/oauthbearer_example.go index e0a708508..0db48d2a6 100644 --- a/examples/oauthbearer_example/oauthbearer_example.go +++ b/examples/oauthbearer_example/oauthbearer_example.go @@ -21,10 +21,11 @@ import ( "encoding/base64" "encoding/json" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "regexp" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) var ( @@ -116,17 +117,17 @@ func retrieveUnsecuredToken(e kafka.OAuthBearerTokenRefresh) (kafka.OAuthBearerT func main() { if len(os.Args) != 3 { - fmt.Fprintf(os.Stderr, "Usage: %s \"[principalClaimName=] principal=\"\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "Usage: %s \"[principalClaimName=] principal=\"\n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] oauthConf := os.Args[2] // You'll probably need to modify this configuration to // match your environment. config := kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, "security.protocol": "SASL_PLAINTEXT", "sasl.mechanisms": "OAUTHBEARER", "sasl.oauthbearer.config": oauthConf, diff --git a/examples/producer_custom_channel_example/producer_custom_channel_example.go b/examples/producer_custom_channel_example/producer_custom_channel_example.go new file mode 100644 index 000000000..d4df3efa0 --- /dev/null +++ b/examples/producer_custom_channel_example/producer_custom_channel_example.go @@ -0,0 +1,129 @@ +// Example function-based Apache Kafka producer with a custom delivery channel +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "os" + "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +func main() { + + if len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", + os.Args[0]) + os.Exit(1) + } + + bootstrapServers := os.Args[1] + topic := os.Args[2] + totalMsgcnt := 3 + + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + + if err != nil { + fmt.Printf("Failed to create producer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Producer %v\n", p) + + // Listen to all the client instance-level errors. + // It's important to read these errors too otherwise the events channel will eventually fill up + go func() { + for e := range p.Events() { + switch ev := e.(type) { + case kafka.Error: + // Generic client instance-level errors, such as + // broker connection failures, authentication issues, etc. + // + // These errors should generally be considered informational + // as the underlying client will automatically try to + // recover from any errors encountered, the application + // does not need to take action on them. + fmt.Printf("Error: %v\n", ev) + default: + fmt.Printf("Ignored event: %s\n", ev) + } + } + }() + + msgcnt := 0 + for msgcnt < totalMsgcnt { + value := fmt.Sprintf("Producer example, message #%d", msgcnt) + + // A delivery channel for each message sent. + // This permits to receive delivery reports + // separately and to handle the use case + // of a server that has multiple concurrent + // produce requests and needs to deliver the replies + // to many different response channels. + deliveryChan := make(chan kafka.Event) + go func() { + for e := range deliveryChan { + switch ev := e.(type) { + case *kafka.Message: + // The message delivery report, indicating success or + // permanent failure after retries have been exhausted. + // Application level retries won't help since the client + // is already configured to do that. + m := ev + if m.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) + } else { + fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + + default: + fmt.Printf("Ignored event: %s\n", ev) + } + // in this case the caller knows that this channel is used only + // for one Produce call, so it can close it. + close(deliveryChan) + } + }() + + err = p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(value), + Headers: []kafka.Header{{Key: "myTestHeader", Value: []byte("header values are binary")}}, + }, deliveryChan) + + if err != nil { + close(deliveryChan) + if err.(kafka.Error).Code() == kafka.ErrQueueFull { + // Producer queue is full, wait 1s for messages + // to be delivered then try again. + time.Sleep(time.Second) + continue + } + fmt.Printf("Failed to produce message: %v\n", err) + } + msgcnt++ + } + + // Flush and close the producer and the events channel + for p.Flush(10000) > 0 { + fmt.Print("Still waiting to flush outstanding messages\n", err) + } + p.Close() +} diff --git a/examples/producer_example/producer_example.go b/examples/producer_example/producer_example.go index 62c284144..5ef124752 100644 --- a/examples/producer_example/producer_example.go +++ b/examples/producer_example/producer_example.go @@ -19,22 +19,25 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" + "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) != 3 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] topic := os.Args[2] + totalMsgcnt := 3 - p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker}) + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) if err != nil { fmt.Printf("Failed to create producer: %s\n", err) @@ -43,26 +46,62 @@ func main() { fmt.Printf("Created Producer %v\n", p) - // Optional delivery channel, if not specified the Producer object's - // .Events channel is used. - deliveryChan := make(chan kafka.Event) + // Listen to all the events on the default events channel + go func() { + for e := range p.Events() { + switch ev := e.(type) { + case *kafka.Message: + // The message delivery report, indicating success or + // permanent failure after retries have been exhausted. + // Application level retries won't help since the client + // is already configured to do that. + m := ev + if m.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) + } else { + fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + case kafka.Error: + // Generic client instance-level errors, such as + // broker connection failures, authentication issues, etc. + // + // These errors should generally be considered informational + // as the underlying client will automatically try to + // recover from any errors encountered, the application + // does not need to take action on them. + fmt.Printf("Error: %v\n", ev) + default: + fmt.Printf("Ignored event: %s\n", ev) + } + } + }() - value := "Hello Go!" - err = p.Produce(&kafka.Message{ - TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, - Value: []byte(value), - Headers: []kafka.Header{{Key: "myTestHeader", Value: []byte("header values are binary")}}, - }, deliveryChan) + msgcnt := 0 + for msgcnt < totalMsgcnt { + value := fmt.Sprintf("Producer example, message #%d", msgcnt) - e := <-deliveryChan - m := e.(*kafka.Message) + err = p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(value), + Headers: []kafka.Header{{Key: "myTestHeader", Value: []byte("header values are binary")}}, + }, nil) - if m.TopicPartition.Error != nil { - fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) - } else { - fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", - *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + if err != nil { + if err.(kafka.Error).Code() == kafka.ErrQueueFull { + // Producer queue is full, wait 1s for messages + // to be delivered then try again. + time.Sleep(time.Second) + continue + } + fmt.Printf("Failed to produce message: %v\n", err) + } + msgcnt++ } - close(deliveryChan) + // Flush and close the producer and the events channel + for p.Flush(10000) > 0 { + fmt.Print("Still waiting to flush outstanding messages\n", err) + } + p.Close() } diff --git a/examples/stats_example/stats_example.go b/examples/stats_example/stats_example.go index cd795efe8..9d186a6d4 100644 --- a/examples/stats_example/stats_example.go +++ b/examples/stats_example/stats_example.go @@ -24,28 +24,29 @@ package main import ( "encoding/json" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" "syscall" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) func main() { if len(os.Args) < 4 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - broker := os.Args[1] + bootstrapServers := os.Args[1] group := os.Args[2] topics := os.Args[3:] sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) c, err := kafka.NewConsumer(&kafka.ConfigMap{ - "bootstrap.servers": broker, + "bootstrap.servers": bootstrapServers, "group.id": group, "session.timeout.ms": 6000, "auto.offset.reset": "earliest", diff --git a/examples/transactions_example/generator.go b/examples/transactions_example/generator.go index 95296e3b5..d2184da6c 100644 --- a/examples/transactions_example/generator.go +++ b/examples/transactions_example/generator.go @@ -22,10 +22,11 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "math/rand" "sync" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) // Intersections this application will process. @@ -75,7 +76,7 @@ func generateInputMessages(wg *sync.WaitGroup, termChan chan bool) { config := &kafka.ConfigMap{ "client.id": "generator", - "bootstrap.servers": brokers, + "bootstrap.servers": bootstrapServers, "enable.idempotence": true, "go.logs.channel.enable": true, "go.logs.channel": logsChan, diff --git a/examples/transactions_example/processor.go b/examples/transactions_example/processor.go index 201c25745..495e01cfa 100644 --- a/examples/transactions_example/processor.go +++ b/examples/transactions_example/processor.go @@ -24,9 +24,10 @@ package main import ( "encoding/json" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "sync" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) // The processor's consumer group id. @@ -267,7 +268,7 @@ func trafficLightProcessor(wg *sync.WaitGroup, termChan chan bool) { consumerConfig := &kafka.ConfigMap{ "client.id": "processor", - "bootstrap.servers": brokers, + "bootstrap.servers": bootstrapServers, "group.id": processorGroupID, "auto.offset.reset": "earliest", // Consumer used for input to a transactional processor diff --git a/examples/transactions_example/transactions_example.go b/examples/transactions_example/transactions_example.go index 447fe9fb1..af1450cbe 100644 --- a/examples/transactions_example/transactions_example.go +++ b/examples/transactions_example/transactions_example.go @@ -20,13 +20,14 @@ package main import ( "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "math/rand" "os" "os/signal" "sync" "syscall" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) // Set to false to disable visualization, useful for troubleshooting. @@ -36,8 +37,8 @@ var withVisualizer = true var inputTopic = "go-transactions-example-ingress-cars" var outputTopic = "go-transactions-example-traffic-light-states" -// brokers holds the bootstrap servers -var brokers string +// bootstrapServers holds the bootstrap servers +var bootstrapServers string // logsChan is the common log channel for all Kafka client instances. var logsChan chan kafka.LogEvent @@ -65,11 +66,11 @@ func logReader(wg *sync.WaitGroup, termChan chan bool) { func main() { if len(os.Args) != 2 { - fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) os.Exit(1) } - brokers = os.Args[1] + bootstrapServers = os.Args[1] rand.Seed(time.Now().Unix()) diff --git a/examples/transactions_example/txnhelpers.go b/examples/transactions_example/txnhelpers.go index 6a5055477..09de2c7f4 100644 --- a/examples/transactions_example/txnhelpers.go +++ b/examples/transactions_example/txnhelpers.go @@ -21,8 +21,9 @@ package main import ( "context" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" ) // createTransactionalProducer creates a transactional producer for the given @@ -30,7 +31,7 @@ import ( func createTransactionalProducer(toppar kafka.TopicPartition) error { producerConfig := &kafka.ConfigMap{ "client.id": fmt.Sprintf("txn-p%d", toppar.Partition), - "bootstrap.servers": brokers, + "bootstrap.servers": bootstrapServers, "transactional.id": fmt.Sprintf("go-transactions-example-p%d", int(toppar.Partition)), "go.logs.channel.enable": true, "go.logs.channel": logsChan, diff --git a/examples/transactions_example/visualizer.go b/examples/transactions_example/visualizer.go index 260f02d79..04c8610ac 100644 --- a/examples/transactions_example/visualizer.go +++ b/examples/transactions_example/visualizer.go @@ -23,12 +23,13 @@ package main import ( "encoding/json" "fmt" - "github.com/confluentinc/confluent-kafka-go/kafka" - "github.com/gdamore/tcell" "os" "sort" "sync" "time" + + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/gdamore/tcell" ) // Height and width (terminal characters) per intersection frame. @@ -331,7 +332,7 @@ func trafficLightVisualizer(wg *sync.WaitGroup, termChan chan bool) { consumerConfig := &kafka.ConfigMap{ "client.id": "visualizer", - "bootstrap.servers": brokers, + "bootstrap.servers": bootstrapServers, "group.id": processorGroupID + "_visualizer", "auto.offset.reset": "earliest", "go.logs.channel.enable": true, diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..7187c70e3 --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module github.com/confluentinc/confluent-kafka-go + +go 1.13 diff --git a/kafka/00version.go b/kafka/00version.go index 8c2c7b4b1..13eaac7c3 100644 --- a/kafka/00version.go +++ b/kafka/00version.go @@ -29,19 +29,19 @@ import ( //defines and strings in sync. // -#define MIN_RD_KAFKA_VERSION 0x01060000 +#define MIN_RD_KAFKA_VERSION 0x01090000 #ifdef __APPLE__ -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION #ifdef __APPLE__ -#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#error "confluent-kafka-go requires librdkafka v1.6.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #endif */ diff --git a/kafka/README.md b/kafka/README.md index 98152e9e3..01e2803e8 100644 --- a/kafka/README.md +++ b/kafka/README.md @@ -87,8 +87,7 @@ See instructions in [kafka/librdkafka/README.md](kafka/librdkafka/README.md). ### Update librdkafka version requirement Update the minimum required librdkafka version in `kafka/00version.go` -and `README.md`. - +and `README.md` and the version in `examples/go.mod` and `mk/doc-gen.py`. ### Update error codes @@ -125,11 +124,6 @@ Manually verify that the examples/ applications work. Also make sure the examples in README.md work. -Convert any examples using `github.com/confluentinc/confluent-kafka-go/kafka` to use -`gopkg.in/confluentinc/confluent-kafka-go.v1/kafka` import path. - - $ find examples/ -type f -name *\.go -exec sed -i -e 's|github\.com/confluentinc/confluent-kafka-go/kafka|gopkg\.in/confluentinc/confluent-kafka-go\.v1/kafka|g' {} + - ### Commit any changes Make sure to push to github before creating the tag to have CI tests pass. diff --git a/kafka/adminapi.go b/kafka/adminapi.go index e12812969..e4371eed5 100644 --- a/kafka/adminapi.go +++ b/kafka/adminapi.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2018 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "context" "fmt" @@ -48,6 +48,27 @@ ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t i return NULL; return entries[idx]; } + +static const rd_kafka_acl_result_t * +acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_results[idx]; +} + +static const rd_kafka_DeleteAcls_result_response_t * +DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return delete_acls_result_responses[idx]; +} + +static const rd_kafka_AclBinding_t * +AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_bindings[idx]; +} */ import "C" @@ -312,6 +333,225 @@ func (c ConfigResourceResult) String() string { return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config)) } +// ResourcePatternType enumerates the different types of Kafka resource patterns. +type ResourcePatternType int + +const ( + // ResourcePatternTypeUnknown is a resource pattern type not known or not set. + ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) + // ResourcePatternTypeAny matches any resource, used for lookups. + ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY) + // ResourcePatternTypeMatch will perform pattern matching + ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH) + // ResourcePatternTypeLiteral matches a literal resource name + ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL) + // ResourcePatternTypePrefixed matches a prefixed resource name + ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED) +) + +// String returns the human-readable representation of a ResourcePatternType +func (t ResourcePatternType) String() string { + return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t))) +} + +// ResourcePatternTypeFromString translates a resource pattern type name to +// a ResourcePatternType value. +func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) { + switch strings.ToUpper(patternTypeString) { + case "ANY": + return ResourcePatternTypeAny, nil + case "MATCH": + return ResourcePatternTypeMatch, nil + case "LITERAL": + return ResourcePatternTypeLiteral, nil + case "PREFIXED": + return ResourcePatternTypePrefixed, nil + default: + return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false) + } +} + +// ACLOperation enumerates the different types of ACL operation. +type ACLOperation int + +const ( + // ACLOperationUnknown represents an unknown or unset operation + ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN) + // ACLOperationAny in a filter, matches any ACLOperation + ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY) + // ACLOperationAll represents all the operations + ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL) + // ACLOperationRead a read operation + ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ) + // ACLOperationWrite represents a write operation + ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE) + // ACLOperationCreate represents a create operation + ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE) + // ACLOperationDelete represents a delete operation + ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE) + // ACLOperationAlter represents an alter operation + ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER) + // ACLOperationDescribe represents a describe operation + ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE) + // ACLOperationClusterAction represents a cluster action operation + ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION) + // ACLOperationDescribeConfigs represents a describe configs operation + ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS) + // ACLOperationAlterConfigs represents an alter configs operation + ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS) + // ACLOperationIdempotentWrite represents an idempotent write operation + ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE) +) + +// String returns the human-readable representation of an ACLOperation +func (o ACLOperation) String() string { + return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o))) +} + +// ACLOperationFromString translates a ACL operation name to +// a ACLOperation value. +func ACLOperationFromString(aclOperationString string) (ACLOperation, error) { + switch strings.ToUpper(aclOperationString) { + case "ANY": + return ACLOperationAny, nil + case "ALL": + return ACLOperationAll, nil + case "READ": + return ACLOperationRead, nil + case "WRITE": + return ACLOperationWrite, nil + case "CREATE": + return ACLOperationCreate, nil + case "DELETE": + return ACLOperationDelete, nil + case "ALTER": + return ACLOperationAlter, nil + case "DESCRIBE": + return ACLOperationDescribe, nil + case "CLUSTER_ACTION": + return ACLOperationClusterAction, nil + case "DESCRIBE_CONFIGS": + return ACLOperationDescribeConfigs, nil + case "ALTER_CONFIGS": + return ACLOperationAlterConfigs, nil + case "IDEMPOTENT_WRITE": + return ACLOperationIdempotentWrite, nil + default: + return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false) + } +} + +// ACLPermissionType enumerates the different types of ACL permission types. +type ACLPermissionType int + +const ( + // ACLPermissionTypeUnknown represents an unknown ACLPermissionType + ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN) + // ACLPermissionTypeAny in a filter, matches any ACLPermissionType + ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY) + // ACLPermissionTypeDeny disallows access + ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY) + // ACLPermissionTypeAllow grants access + ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) +) + +// String returns the human-readable representation of an ACLPermissionType +func (o ACLPermissionType) String() string { + return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o))) +} + +// ACLPermissionTypeFromString translates a ACL permission type name to +// a ACLPermissionType value. +func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) { + switch strings.ToUpper(aclPermissionTypeString) { + case "ANY": + return ACLPermissionTypeAny, nil + case "DENY": + return ACLPermissionTypeDeny, nil + case "ALLOW": + return ACLPermissionTypeAllow, nil + default: + return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false) + } +} + +// ACLBinding specifies the operation and permission type for a specific principal +// over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBinding struct { + Type ResourceType // The resource type. + // The resource name, which depends on the resource type. + // For ResourceBroker the resource name is the broker id. + Name string + ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. + Principal string // The principal this ACLBinding refers to. + Host string // The host that the call is allowed to come from. + Operation ACLOperation // The operation/s specified by this binding. + PermissionType ACLPermissionType // The permission type for the specified operation. +} + +// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBindingFilter = ACLBinding + +// ACLBindings is a slice of ACLBinding that also implements +// the sort interface +type ACLBindings []ACLBinding + +// ACLBindingFilters is a slice of ACLBindingFilter that also implements +// the sort interface +type ACLBindingFilters []ACLBindingFilter + +func (a ACLBindings) Len() int { + return len(a) +} + +func (a ACLBindings) Less(i, j int) bool { + if a[i].Type != a[j].Type { + return a[i].Type < a[j].Type + } + if a[i].Name != a[j].Name { + return a[i].Name < a[j].Name + } + if a[i].ResourcePatternType != a[j].ResourcePatternType { + return a[i].ResourcePatternType < a[j].ResourcePatternType + } + if a[i].Principal != a[j].Principal { + return a[i].Principal < a[j].Principal + } + if a[i].Host != a[j].Host { + return a[i].Host < a[j].Host + } + if a[i].Operation != a[j].Operation { + return a[i].Operation < a[j].Operation + } + if a[i].PermissionType != a[j].PermissionType { + return a[i].PermissionType < a[j].PermissionType + } + return true +} + +func (a ACLBindings) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +// CreateACLResult provides create ACL error information. +type CreateACLResult struct { + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DescribeACLsResult provides describe ACLs result or error information. +type DescribeACLsResult struct { + // Slice of ACL bindings matching the provided filter + ACLBindings ACLBindings + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DeleteACLsResult provides delete ACLs result or error information. +type DeleteACLsResult = DescribeACLsResult + // waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens // first. // The returned result event is checked for errors its error is returned if set. @@ -950,6 +1190,353 @@ func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { return a.handle.setOAuthBearerTokenFailure(errstr) } +// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t +func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBinding.Name) > 0 { + cName = C.CString(aclBinding.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBinding.Principal) > 0 { + cPrincipal = C.CString(aclBinding.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBinding.Host) > 0 { + cHost = C.CString(aclBinding.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBinding_new( + C.rd_kafka_ResourceType_t(aclBinding.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBinding.Operation), + C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr))) + } + return +} + +// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t +func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBindingFilter.Name) > 0 { + cName = C.CString(aclBindingFilter.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBindingFilter.Principal) > 0 { + cPrincipal = C.CString(aclBindingFilter.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBindingFilter.Host) > 0 { + cHost = C.CString(aclBindingFilter.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBindingFilter_new( + C.rd_kafka_ResourceType_t(aclBindingFilter.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBindingFilter.Operation), + C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr))) + } + return +} + +// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding +func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding { + return ACLBinding{ + ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)), + ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)), + ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)), + ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)), + } +} + +// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings +func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) { + result = make(ACLBindings, aclCnt) + for i := uint(0); i < uint(aclCnt); i++ { + cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i)) + if cACLBinding == nil { + panic("AclBinding_by_idx must not return nil") + } + result[i] = a.cToACLBinding(cACLBinding) + } + return +} + +// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list. +func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) { + result = make([]CreateACLResult, uint(aclCnt)) + + for i := uint(0); i < uint(aclCnt); i++ { + cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i)) + if cCreateACLRes != nil { + cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes) + result[i].Error = newErrorFromCError(cCreateACLError) + } + } + + return result, nil +} + +// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct. +func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) { + result = &DescribeACLsResult{} + err := C.rd_kafka_event_error(rkev) + errCode := ErrorCode(err) + errStr := C.rd_kafka_event_error_string(rkev) + + var cResultACLsCount C.size_t + cResult := C.rd_kafka_event_DescribeAcls_result(rkev) + cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount) + if errCode != ErrNoError { + result.Error = newErrorFromCString(err, errStr) + } + result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount) + return +} + +// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice. +func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) { + result = make([]DeleteACLsResult, uint(resResponseCnt)) + + for i := uint(0); i < uint(resResponseCnt); i++ { + cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i)) + if cDeleteACLsResResponse == nil { + panic("DeleteAcls_result_response_by_idx must not return nil") + } + + cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse) + result[i].Error = newErrorFromCError(cDeleteACLsError) + + var cMatchingACLsCount C.size_t + cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls( + cDeleteACLsResResponse, &cMatchingACLsCount) + + result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount) + } + return +} + +// CreateACLs creates one or more ACL bindings. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindings` - A slice of ACL binding specifications to create. +// * `options` - Create ACLs options +// +// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +// plus an error that is not nil for client level errors +func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) { + if aclBindings == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBinding structs") + } + if len(aclBindings) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBinding structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings)) + + for i, aclBinding := range aclBindings { + cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindings[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions) + if err != nil { + return nil, err + } + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateAcls( + a.handle.rk, + (**C.rd_kafka_AclBinding_t)(&cACLBindings[0]), + C.size_t(len(cACLBindings)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultCnt C.size_t + cResult := C.rd_kafka_event_CreateAcls_result(rkev) + aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt) + result, err = a.cToCreateACLResults(aclResults, cResultCnt) + return +} + +// DescribeACLs matches ACL bindings by filter. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindingFilter` - A filter with attributes that must match. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns all +// the ACL bindings with `ResourcePatternTypeLiteral`, `ResourcePatternTypeWildcard` +// or `ResourcePatternTypePrefixed` pattern type that match the resource name. +// * `options` - Describe ACLs options +// +// Returns a slice of ACLBindings when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) { + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeAcls( + a.handle.rk, + cACLBindingFilter, + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + result = a.cToDescribeACLsResult(rkev) + return +} + +// DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +// +// Parameters: +// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// * `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns all +// the ACL bindings with `ResourcePatternTypeLiteral`, `ResourcePatternTypeWildcard` +// or `ResourcePatternTypePrefixed` pattern type that match the resource name. +// * `options` - Delete ACLs options +// +// Returns a slice of ACLBinding for each filter when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) { + if aclBindingFilters == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBindingFilter structs") + } + if len(aclBindingFilters) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBindingFilter structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters)) + + for i, aclBindingFilter := range aclBindingFilters { + cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteAcls( + a.handle.rk, + (**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]), + C.size_t(len(cACLBindingFilters)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultResponsesCount C.size_t + cResult := C.rd_kafka_event_DeleteAcls_result(rkev) + cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount) + result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount) + return +} + // Close an AdminClient instance. func (a *AdminClient) Close() { if a.isDerived { diff --git a/kafka/adminapi_test.go b/kafka/adminapi_test.go index 50660869e..54b878ee0 100644 --- a/kafka/adminapi_test.go +++ b/kafka/adminapi_test.go @@ -76,6 +76,350 @@ func TestAdminAPIWithDefaultValue(t *testing.T) { adminClient.Close() } +func testAdminAPIsCreateACLs(what string, a *AdminClient, t *testing.T) { + var res []CreateACLResult + var err error + var ctx context.Context + var cancel context.CancelFunc + var expDuration time.Duration + var expDurationLonger time.Duration + var expError string + var invalidTests []ACLBindings + + checkFail := func(res []CreateACLResult, err error) { + if res != nil || err == nil { + t.Fatalf("Expected CreateACLs to fail, but got result: %v, err: %v", res, err) + } + } + + testACLBindings := ACLBindings{ + { + Type: ResourceTopic, + Name: "mytopic", + ResourcePatternType: ResourcePatternTypeLiteral, + Principal: "User:myuser", + Host: "*", + Operation: ACLOperationAll, + PermissionType: ACLPermissionTypeAllow, + }, + } + + copyACLBindings := func() ACLBindings { + return append(ACLBindings{}, testACLBindings...) + } + + t.Logf("AdminClient API - ACLs testing on %s: %s", a, what) + expDuration, err = time.ParseDuration("0.1s") + if err != nil { + t.Fatalf("%s", err) + } + + // nil aclBindings + res, err = a.CreateACLs(ctx, nil) + checkFail(res, err) + expError = "Expected non-nil slice of ACLBinding structs" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // empty aclBindings + res, err = a.CreateACLs(ctx, ACLBindings{}) + checkFail(res, err) + expError = "Expected non-empty slice of ACLBinding structs" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // Correct input, fail with timeout + ctx, cancel = context.WithTimeout(context.Background(), expDuration) + defer cancel() + + res, err = a.CreateACLs(ctx, testACLBindings) + checkFail(res, err) + if ctx.Err() != context.DeadlineExceeded { + t.Fatalf("Expected DeadlineExceeded, not %v, %v", ctx.Err(), err) + } + + // request timeout comes before context deadline + expDurationLonger, err = time.ParseDuration("0.2s") + if err != nil { + t.Fatalf("%s", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), expDurationLonger) + defer cancel() + + res, err = a.CreateACLs(ctx, testACLBindings, SetAdminRequestTimeout(expDuration)) + checkFail(res, err) + expError = "Failed while waiting for controller: Local: Timed out" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // Invalid ACL bindings + invalidTests = []ACLBindings{copyACLBindings(), copyACLBindings()} + invalidTests[0][0].Type = ResourceUnknown + invalidTests[1][0].Type = ResourceAny + expError = ": Invalid resource type" + for _, invalidACLBindings := range invalidTests { + res, err = a.CreateACLs(ctx, invalidACLBindings) + checkFail(res, err) + if !strings.HasSuffix(err.Error(), expError) { + t.Fatalf("Expected an error ending with \"%s\", received: \"%s\"", expError, err.Error()) + } + } + + suffixes := []string{ + ": Invalid resource pattern type", + ": Invalid resource pattern type", + ": Invalid resource pattern type", + ": Invalid operation", + ": Invalid operation", + ": Invalid permission type", + ": Invalid permission type", + ": Invalid resource name", + ": Invalid principal", + ": Invalid host", + } + nInvalidTests := len(suffixes) + invalidTests = make([]ACLBindings, nInvalidTests) + for i := 0; i < nInvalidTests; i++ { + invalidTests[i] = copyACLBindings() + } + invalidTests[0][0].ResourcePatternType = ResourcePatternTypeUnknown + invalidTests[1][0].ResourcePatternType = ResourcePatternTypeMatch + invalidTests[2][0].ResourcePatternType = ResourcePatternTypeAny + invalidTests[3][0].Operation = ACLOperationUnknown + invalidTests[4][0].Operation = ACLOperationAny + invalidTests[5][0].PermissionType = ACLPermissionTypeUnknown + invalidTests[6][0].PermissionType = ACLPermissionTypeAny + invalidTests[7][0].Name = "" + invalidTests[8][0].Principal = "" + invalidTests[9][0].Host = "" + + for i, invalidACLBindings := range invalidTests { + res, err = a.CreateACLs(ctx, invalidACLBindings) + checkFail(res, err) + if !strings.HasSuffix(err.Error(), suffixes[i]) { + t.Fatalf("Expected an error ending with \"%s\", received: \"%s\"", suffixes[i], err.Error()) + } + } +} + +func testAdminAPIsDescribeACLs(what string, a *AdminClient, t *testing.T) { + var res *DescribeACLsResult + var err error + var ctx context.Context + var cancel context.CancelFunc + var expDuration time.Duration + var expDurationLonger time.Duration + var expError string + + checkFail := func(res *DescribeACLsResult, err error) { + if res != nil || err == nil { + t.Fatalf("Expected DescribeACLs to fail, but got result: %v, err: %v", res, err) + } + } + + aclBindingsFilter := ACLBindingFilter{ + Type: ResourceTopic, + ResourcePatternType: ResourcePatternTypeLiteral, + Operation: ACLOperationAll, + PermissionType: ACLPermissionTypeAllow, + } + + t.Logf("AdminClient API - ACLs testing on %s: %s", a, what) + expDuration, err = time.ParseDuration("0.1s") + if err != nil { + t.Fatalf("%s", err) + } + + // Correct input, fail with timeout + ctx, cancel = context.WithTimeout(context.Background(), expDuration) + defer cancel() + + res, err = a.DescribeACLs(ctx, aclBindingsFilter) + checkFail(res, err) + if ctx.Err() != context.DeadlineExceeded { + t.Fatalf("Expected DeadlineExceeded, not %v, %v", ctx.Err(), err) + } + + // request timeout comes before context deadline + expDurationLonger, err = time.ParseDuration("0.2s") + if err != nil { + t.Fatalf("%s", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), expDurationLonger) + defer cancel() + + res, err = a.DescribeACLs(ctx, aclBindingsFilter, SetAdminRequestTimeout(expDuration)) + checkFail(res, err) + expError = "Failed while waiting for controller: Local: Timed out" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // Invalid ACL binding filters + suffixes := []string{ + ": Invalid resource pattern type", + ": Invalid operation", + ": Invalid permission type", + } + nInvalidTests := len(suffixes) + invalidTests := make(ACLBindingFilters, nInvalidTests) + for i := 0; i < nInvalidTests; i++ { + invalidTests[i] = aclBindingsFilter + } + invalidTests[0].ResourcePatternType = ResourcePatternTypeUnknown + invalidTests[1].Operation = ACLOperationUnknown + invalidTests[2].PermissionType = ACLPermissionTypeUnknown + + for i, invalidACLBindingFilter := range invalidTests { + res, err = a.DescribeACLs(ctx, invalidACLBindingFilter) + checkFail(res, err) + if !strings.HasSuffix(err.Error(), suffixes[i]) { + t.Fatalf("Expected an error ending with \"%s\", received: \"%s\"", suffixes[i], err.Error()) + } + } + + // ACL binding filters are valid with empty strings, + // matching any value + validTests := [3]ACLBindingFilter{} + for i := 0; i < len(validTests); i++ { + validTests[i] = aclBindingsFilter + } + validTests[0].Name = "" + validTests[1].Principal = "" + validTests[2].Host = "" + + for _, validACLBindingFilter := range validTests { + res, err = a.DescribeACLs(ctx, validACLBindingFilter) + checkFail(res, err) + if ctx.Err() != context.DeadlineExceeded { + t.Fatalf("Expected DeadlineExceeded, not %v, %v", ctx.Err(), err) + } + } +} + +func testAdminAPIsDeleteACLs(what string, a *AdminClient, t *testing.T) { + var res []DeleteACLsResult + var err error + var ctx context.Context + var cancel context.CancelFunc + var expDuration time.Duration + var expDurationLonger time.Duration + var expError string + + checkFail := func(res []DeleteACLsResult, err error) { + if res != nil || err == nil { + t.Fatalf("Expected DeleteACL to fail, but got result: %v, err: %v", res, err) + } + } + + aclBindingsFilters := ACLBindingFilters{ + { + Type: ResourceTopic, + ResourcePatternType: ResourcePatternTypeLiteral, + Operation: ACLOperationAll, + PermissionType: ACLPermissionTypeAllow, + }, + } + + copyACLBindingFilters := func() ACLBindingFilters { + return append(ACLBindingFilters{}, aclBindingsFilters...) + } + + t.Logf("AdminClient API - ACLs testing on %s: %s", a, what) + expDuration, err = time.ParseDuration("0.1s") + if err != nil { + t.Fatalf("%s", err) + } + + // nil aclBindingFilters + res, err = a.DeleteACLs(ctx, nil) + checkFail(res, err) + expError = "Expected non-nil slice of ACLBindingFilter structs" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // empty aclBindingFilters + res, err = a.DeleteACLs(ctx, ACLBindingFilters{}) + checkFail(res, err) + expError = "Expected non-empty slice of ACLBindingFilter structs" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // Correct input, fail with timeout + ctx, cancel = context.WithTimeout(context.Background(), expDuration) + defer cancel() + + res, err = a.DeleteACLs(ctx, aclBindingsFilters) + checkFail(res, err) + if ctx.Err() != context.DeadlineExceeded { + t.Fatalf("Expected DeadlineExceeded, not %v, %v", ctx.Err(), err) + } + + // request timeout comes before context deadline + expDurationLonger, err = time.ParseDuration("0.2s") + if err != nil { + t.Fatalf("%s", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), expDurationLonger) + defer cancel() + + res, err = a.DeleteACLs(ctx, aclBindingsFilters, SetAdminRequestTimeout(expDuration)) + checkFail(res, err) + expError = "Failed while waiting for controller: Local: Timed out" + if err.Error() != expError { + t.Fatalf("Expected error \"%s\", received: \"%v\"", expError, err.Error()) + } + + // Invalid ACL binding filters + suffixes := []string{ + ": Invalid resource pattern type", + ": Invalid operation", + ": Invalid permission type", + } + nInvalidTests := len(suffixes) + invalidTests := make([]ACLBindingFilters, nInvalidTests) + for i := 0; i < nInvalidTests; i++ { + invalidTests[i] = copyACLBindingFilters() + } + invalidTests[0][0].ResourcePatternType = ResourcePatternTypeUnknown + invalidTests[1][0].Operation = ACLOperationUnknown + invalidTests[2][0].PermissionType = ACLPermissionTypeUnknown + + for i, invalidACLBindingFilters := range invalidTests { + res, err = a.DeleteACLs(ctx, invalidACLBindingFilters) + checkFail(res, err) + if !strings.HasSuffix(err.Error(), suffixes[i]) { + t.Fatalf("Expected an error ending with \"%s\", received: \"%s\"", suffixes[i], err.Error()) + } + } + + // ACL binding filters are valid with empty strings, + // matching any value + validTests := [3]ACLBindingFilters{} + for i := 0; i < len(validTests); i++ { + validTests[i] = copyACLBindingFilters() + } + validTests[0][0].Name = "" + validTests[1][0].Principal = "" + validTests[2][0].Host = "" + + for _, validACLBindingFilters := range validTests { + res, err = a.DeleteACLs(ctx, validACLBindingFilters) + checkFail(res, err) + if ctx.Err() != context.DeadlineExceeded { + t.Fatalf("Expected DeadlineExceeded, not %v, %v", ctx.Err(), err) + } + } +} + func testAdminAPIs(what string, a *AdminClient, t *testing.T) { t.Logf("AdminClient API testing on %s: %s", a, what) @@ -301,6 +645,10 @@ func testAdminAPIs(what string, a *AdminClient, t *testing.T) { if ctx.Err() != context.DeadlineExceeded || err != context.DeadlineExceeded { t.Fatalf("Expected DeadlineExceeded, not %v", ctx.Err()) } + + testAdminAPIsCreateACLs(what, a, t) + testAdminAPIsDescribeACLs(what, a, t) + testAdminAPIsDeleteACLs(what, a, t) } // TestAdminAPIs dry-tests most Admin APIs, no broker is needed. diff --git a/kafka/adminoptions.go b/kafka/adminoptions.go index 842631b30..8c1bc81ff 100644 --- a/kafka/adminoptions.go +++ b/kafka/adminoptions.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2018 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "fmt" "time" @@ -166,6 +166,15 @@ func (ao AdminOptionValidateOnly) supportsCreatePartitions() { func (ao AdminOptionValidateOnly) supportsAlterConfigs() { } +func (ao AdminOptionRequestTimeout) supportsCreateACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDescribeACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDeleteACLs() { +} + func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { if !ao.isSet { return nil @@ -240,6 +249,30 @@ type DescribeConfigsAdminOption interface { apply(cOptions *C.rd_kafka_AdminOptions_t) error } +// CreateACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type CreateACLsAdminOption interface { + supportsCreateACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DescribeACLsAdminOption interface { + supportsDescribeACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DeleteACLsAdminOption interface { + supportsDeleteACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + // AdminOption is a generic type not to be used directly. // // See CreateTopicsAdminOption et.al. diff --git a/kafka/api.html b/kafka/api.html index ab46fbb2d..c92903341 100644 --- a/kafka/api.html +++ b/kafka/api.html @@ -7,16 +7,16 @@ kafka - Go Documentation Server - + - - @@ -387,6 +387,71 @@

func WriteErrorCodes(f *os.File) +
+ + type ACLBinding + +
+
+ + type ACLBindingFilter + +
+
+ + type ACLBindingFilters + +
+
+ + type ACLBindings + +
+
+ + func (a ACLBindings) Len() int + +
+
+ + func (a ACLBindings) Less(i, j int) bool + +
+
+ + func (a ACLBindings) Swap(i, j int) + +
+
+ + type ACLOperation + +
+
+ + func ACLOperationFromString(aclOperationString string) (ACLOperation, error) + +
+
+ + func (o ACLOperation) String() string + +
+
+ + type ACLPermissionType + +
+
+ + func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) + +
+
+ + func (o ACLPermissionType) String() string + +
type AdminClient @@ -427,6 +492,11 @@

func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)

+
+ + func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) + +
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) @@ -437,11 +507,21 @@

func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)

+
+ + func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) + +
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
+
+ + func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) + +
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) @@ -752,6 +832,11 @@

func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error

+
+ + func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) + +
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) @@ -797,6 +882,16 @@

func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)

+
+ + type CreateACLResult + +
+
+ + type CreateACLsAdminOption + +
type CreatePartitionsAdminOption @@ -807,11 +902,31 @@

type CreateTopicsAdminOption

+
+ + type DeleteACLsAdminOption + +
+
+ + type DeleteACLsResult + +
type DeleteTopicsAdminOption
+
+ + type DescribeACLsAdminOption + +
+
+ + type DescribeACLsResult + +
type DescribeConfigsAdminOption @@ -912,6 +1027,26 @@

type Metadata

+
+ + type MockCluster + +
+
+ + func NewMockCluster(brokerCount int) (*MockCluster, error) + +
+
+ + func (mc *MockCluster) BootstrapServers() string + +
+
+ + func (mc *MockCluster) Close() + +
type OAuthBearerToken @@ -1102,6 +1237,21 @@

type RebalanceCb

+
+ + type ResourcePatternType + +
+
+ + func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) + +
+
+ + func (t ResourcePatternType) String() string + +
type ResourceType @@ -1205,70 +1355,73 @@

- + 00version.go - + adminapi.go - + adminoptions.go - - build_glibc_linux.go + + build_darwin.go - + config.go - + consumer.go - + context.go - + error.go - + error_gen.go - + event.go - + generated_errors.go - + handle.go - + header.go - + kafka.go - + log.go - + message.go - + metadata.go - + misc.go - + + mockcluster.go + + offset.go - + producer.go - + testhelpers.go - + time.go @@ -1282,37 +1435,87 @@

const (
     // ResourceUnknown - Unknown
-    ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN)
+    ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN)
     // ResourceAny - match any resource type (DescribeConfigs)
-    ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY)
+    ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY)
     // ResourceTopic - Topic
-    ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC)
+    ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC)
     // ResourceGroup - Group
-    ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP)
+    ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP)
     // ResourceBroker - Broker
-    ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER)
+    ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER)
 )
const (
     // ConfigSourceUnknown is the default value
-    ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG)
+    ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG)
     // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic
-    ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG)
+    ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG)
     // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker
-    ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG)
+    ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG)
     // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster
-    ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG)
+    ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG)
     // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file)
-    ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG)
+    ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG)
     // ConfigSourceDefault is built-in default configuration for configs that have a default value
-    ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
+    ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
+)
+
const (
+    // ResourcePatternTypeUnknown is a resource pattern type not known or not set.
+    ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN)
+    // ResourcePatternTypeAny matches any resource, used for lookups.
+    ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY)
+    // ResourcePatternTypeMatch will perform pattern matching
+    ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH)
+    // ResourcePatternTypeLiteral matches a literal resource name
+    ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL)
+    // ResourcePatternTypePrefixed matches a prefixed resource name
+    ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED)
+)
+
const (
+    // ACLOperationUnknown represents an unknown or unset operation
+    ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN)
+    // ACLOperationAny in a filter, matches any ACLOperation
+    ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY)
+    // ACLOperationAll represents all the operations
+    ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL)
+    // ACLOperationRead a read operation
+    ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ)
+    // ACLOperationWrite represents a write operation
+    ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE)
+    // ACLOperationCreate represents a create operation
+    ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE)
+    // ACLOperationDelete represents a delete operation
+    ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE)
+    // ACLOperationAlter represents an alter operation
+    ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER)
+    // ACLOperationDescribe represents a describe operation
+    ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE)
+    // ACLOperationClusterAction represents a cluster action operation
+    ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION)
+    // ACLOperationDescribeConfigs represents a describe configs operation
+    ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS)
+    // ACLOperationAlterConfigs represents an alter configs operation
+    ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS)
+    // ACLOperationIdempotentWrite represents an idempotent write operation
+    ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE)
+)
+
const (
+    // ACLPermissionTypeUnknown represents an unknown ACLPermissionType
+    ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN)
+    // ACLPermissionTypeAny in a filter, matches any ACLPermissionType
+    ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY)
+    // ACLPermissionTypeDeny disallows access
+    ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY)
+    // ACLPermissionTypeAllow grants access
+    ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW)
 )
const (
     // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
-    TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
+    TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
     // TimestampCreateTime indicates timestamp set by producer (source time)
-    TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
+    TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
     // TimestampLogAppendTime indicates timestamp set set by broker (store time)
-    TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+    TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
 )
const (
     // PurgeInFlight purges messages in-flight to or from the broker.
@@ -1320,75 +1523,253 @@ 

// broker, making it impossible for the application to know if these // messages were successfully delivered or not. // Retrying these messages may lead to duplicates. - PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) + PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT) // PurgeQueue Purge messages in internal queues. - PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) + PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE) // PurgeNonBlocking Don't wait for background thread queue purging to finish. - PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) + PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING) )

const (
     // AlterOperationSet sets/overwrites the configuration setting.
-    AlterOperationSet = iota
+    AlterOperationSet = iota
 )

LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client

-
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.7.0.tgz"
+
const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.9.0.tgz"

OffsetBeginning represents the earliest offset (logical)

-
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)

OffsetEnd represents the latest offset (logical)

-
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)

OffsetInvalid represents an invalid/unspecified offset

-
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)

OffsetStored represents a stored offset

-
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)

PartitionAny represents any partition (for partitioning), or unspecified value (for all other cases)

-
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)

func - + LibraryVersion

-
func LibraryVersion() (int, string)
+
func LibraryVersion() (int, string)

LibraryVersion returns the underlying librdkafka library version as a (version_int, version_str) tuple.

func - + WriteErrorCodes

-
func WriteErrorCodes(f *os.File)
+
func WriteErrorCodes(f *os.File)

WriteErrorCodes writes Go error code constants to file from the librdkafka error codes. This function is not intended for public use.

+

+ type + + ACLBinding + + +

+

+ ACLBinding specifies the operation and permission type for a specific principal +over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

+
type ACLBinding struct {
+    Type ResourceType // The resource type.
+    // The resource name, which depends on the resource type.
+    // For ResourceBroker the resource name is the broker id.
+    Name                string
+    ResourcePatternType ResourcePatternType // The resource pattern, relative to the name.
+    Principal           string              // The principal this ACLBinding refers to.
+    Host                string              // The host that the call is allowed to come from.
+    Operation           ACLOperation        // The operation/s specified by this binding.
+    PermissionType      ACLPermissionType   // The permission type for the specified operation.
+}
+
+

+ type + + ACLBindingFilter + + +

+

+ ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

+
type ACLBindingFilter = ACLBinding
+

+ type + + ACLBindingFilters + + +

+

+ ACLBindingFilters is a slice of ACLBindingFilter that also implements +the sort interface +

+
type ACLBindingFilters []ACLBindingFilter
+

+ type + + ACLBindings + + +

+

+ ACLBindings is a slice of ACLBinding that also implements +the sort interface +

+
type ACLBindings []ACLBinding
+

+ func (ACLBindings) + + Len + + +

+
func (a ACLBindings) Len() int
+

+ func (ACLBindings) + + Less + + +

+
func (a ACLBindings) Less(i, j int) bool
+

+ func (ACLBindings) + + Swap + + +

+
func (a ACLBindings) Swap(i, j int)
+

+ type + + ACLOperation + + +

+

+ ACLOperation enumerates the different types of ACL operation. +

+
type ACLOperation int
+

+ func + + ACLOperationFromString + + +

+
func ACLOperationFromString(aclOperationString string) (ACLOperation, error)
+

+ ACLOperationFromString translates a ACL operation name to +a ACLOperation value. +

+

+ func (ACLOperation) + + String + + +

+
func (o ACLOperation) String() string
+

+ String returns the human-readable representation of an ACLOperation +

+

+ type + + ACLPermissionType + + +

+

+ ACLPermissionType enumerates the different types of ACL permission types. +

+
type ACLPermissionType int
+

+ func + + ACLPermissionTypeFromString + + +

+
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error)
+

+ ACLPermissionTypeFromString translates a ACL permission type name to +a ACLPermissionType value. +

+

+ func (ACLPermissionType) + + String + + +

+
func (o ACLPermissionType) String() string
+

+ String returns the human-readable representation of an ACLPermissionType +

type - + AdminClient

+ NewAdminClient

-
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)
+
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)

NewAdminClient creats a new AdminClient instance with a new underlying client instance

func - + NewAdminClientFromConsumer

-
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)
+
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)

NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. The AdminClient will use the same configuration and connections as the parent instance.

func - + NewAdminClientFromProducer

-
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)
+
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)

NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. The AdminClient will use the same configuration and connections as the parent instance.

func (*AdminClient) - + AlterConfigs

-
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
+
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)

AlterConfigs alters/updates cluster resource configuration.

@@ -1478,7 +1859,7 @@

func (*AdminClient) - + Close

+ ClusterID

-
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)
+
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)

ClusterID returns the cluster ID as reported in broker metadata.

@@ -1512,14 +1893,14 @@

func (*AdminClient) - + ControllerID

-
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)
+
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)

ControllerID returns the broker ID of the current controller as reported in broker metadata. @@ -1532,29 +1913,53 @@

Requires broker version >= 0.10.0.

+

+ func (*AdminClient) + + CreateACLs + + +

+
func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error)
+

+ CreateACLs creates one or more ACL bindings. +

+

+ Parameters: +

+
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+* `aclBindings` - A slice of ACL binding specifications to create.
+* `options` - Create ACLs options
+
+

+ Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +plus an error that is not nil for client level errors +

func (*AdminClient) - + CreatePartitions

-
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)
+
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)

CreatePartitions creates additional partitions for topics.

func (*AdminClient) - + CreateTopics

-
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)
+
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)

CreateTopics creates topics in cluster.

@@ -1568,16 +1973,45 @@

Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API.

+

+ func (*AdminClient) + + DeleteACLs + + +

+
func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error)
+

+ DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +

+

+ Parameters: +

+
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+* `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete.
+   string attributes match exact values or any string if set to empty string.
+   Enum attributes match exact values or any value if ending with `Any`.
+   If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns all
+   the ACL bindings with `ResourcePatternTypeLiteral`, `ResourcePatternTypeWildcard`
+   or `ResourcePatternTypePrefixed` pattern type that match the resource name.
+* `options` - Delete ACLs options
+
+

+ Returns a slice of ACLBinding for each filter when the operation was successful +plus an error that is not `nil` for client level errors +

func (*AdminClient) - + DeleteTopics

-
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
+
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)

DeleteTopics deletes a batch of topics.

@@ -1591,16 +2025,45 @@

Requires broker version >= 0.10.1.0

+

+ func (*AdminClient) + + DescribeACLs + + +

+
func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error)
+

+ DescribeACLs matches ACL bindings by filter. +

+

+ Parameters: +

+
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
+* `aclBindingFilter` - A filter with attributes that must match.
+   string attributes match exact values or any string if set to empty string.
+   Enum attributes match exact values or any value if ending with `Any`.
+   If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns all
+   the ACL bindings with `ResourcePatternTypeLiteral`, `ResourcePatternTypeWildcard`
+   or `ResourcePatternTypePrefixed` pattern type that match the resource name.
+* `options` - Describe ACLs options
+
+

+ Returns a slice of ACLBindings when the operation was successful +plus an error that is not `nil` for client level errors +

func (*AdminClient) - + DescribeConfigs

-
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)
+
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)

DescribeConfigs retrieves configuration for cluster resources.

@@ -1633,14 +2096,14 @@

func (*AdminClient) - + GetMetadata

-
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)

GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -1650,14 +2113,14 @@

func (*AdminClient) - + SetOAuthBearerToken

-
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error

SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -1675,14 +2138,14 @@

func (*AdminClient) - + SetOAuthBearerTokenFailure

-
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error
+
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error

SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -1694,20 +2157,20 @@

func (*AdminClient) - + String

-
func (a *AdminClient) String() string
+
func (a *AdminClient) String() string

String returns a human readable name for an AdminClient instance

type - + AdminOption

+ AdminOptionOperationTimeout

+ SetAdminOperationTimeout

-
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)
+
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)

SetAdminOperationTimeout sets the broker's operation timeout, such as the timeout for CreateTopics to complete the creation of topics on the controller @@ -1782,7 +2245,7 @@

type - + AdminOptionRequestTimeout

+ SetAdminRequestTimeout

-
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)
+
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)

SetAdminRequestTimeout sets the overall request timeout, including broker lookup, request transmission, operation time on broker, and response. @@ -1825,7 +2288,7 @@

type - + AdminOptionValidateOnly

+ SetAdminValidateOnly

-
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)
+
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)

SetAdminValidateOnly tells the broker to only validate the request, without performing the requested operation (create topics, etc). @@ -1868,7 +2331,7 @@

type - + AlterConfigsAdminOption

+ AlterOperation

int
+
type AlterOperation int

func (AlterOperation) - + String

-
func (o AlterOperation) String() string
+
func (o AlterOperation) String() string

String returns the human-readable representation of an AlterOperation

type - + AssignedPartitions

+ String

-
func (e AssignedPartitions) String() string
+
func (e AssignedPartitions) String() string

type - + BrokerMetadata

int32
-    Host string
-    Port int
+    ID   int32
+    Host string
+    Port int
 }
 

type - + ConfigEntry

string
+    Name string
     // Value of configuration entry.
-    Value string
+    Value string
     // Operation to perform on the entry.
     Operation AlterOperation
 }
 

func - + StringMapToConfigEntries

-
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry
+
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry

StringMapToConfigEntries creates a new map of ConfigEntry objects from the provided string map. The AlterOperation is set on each created entry.

func (ConfigEntry) - + String

-
func (c ConfigEntry) String() string
+
func (c ConfigEntry) String() string

String returns a human-readable representation of a ConfigEntry.

type - + ConfigEntryResult

string
+    Name string
     // Value of configuration entry.
-    Value string
+    Value string
     // Source indicates the configuration source.
     Source ConfigSource
     // IsReadOnly indicates whether the configuration entry can be altered.
-    IsReadOnly bool
+    IsReadOnly bool
     // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset.
-    IsSensitive bool
+    IsSensitive bool
     // IsSynonym indicates whether the configuration entry is a synonym for another configuration property.
-    IsSynonym bool
+    IsSynonym bool
     // Synonyms contains a map of configuration entries that are synonyms to this configuration entry.
-    Synonyms map[string]ConfigEntryResult
+    Synonyms map[string]ConfigEntryResult
 }
 

func (ConfigEntryResult) - + String

-
func (c ConfigEntryResult) String() string
+
func (c ConfigEntryResult) String() string

String returns a human-readable representation of a ConfigEntryResult.

type - + ConfigMap

string]ConfigValue
+
type ConfigMap map[string]ConfigValue

func (ConfigMap) - + Get

-
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)
+
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)

Get finds the given key in the ConfigMap and returns its value. If the key is not found `defval` is returned. @@ -2090,28 +2553,28 @@

func (ConfigMap) - + Set

-
func (m ConfigMap) Set(kv string) error
+
func (m ConfigMap) Set(kv string) error

Set implements flag.Set (command line argument parser) as a convenience for `-X key=value` config.

func (ConfigMap) - + SetKey

-
func (m ConfigMap) SetKey(key string, value ConfigValue) error
+
func (m ConfigMap) SetKey(key string, value ConfigValue) error

SetKey sets configuration property key to value.

@@ -2121,7 +2584,7 @@

type - + ConfigResource

ResourceType // Name of resource to set. - Name string + Name string // Config entries to set. // Configuration updates are atomic, any configuration property not provided // here will be reverted (by the broker) to its default value. @@ -2145,20 +2608,20 @@

func (ConfigResource) - + String

-
func (c ConfigResource) String() string
+
func (c ConfigResource) String() string

String returns a human-readable representation of a ConfigResource

type - + ConfigResourceResult

ResourceType // Name of returned result resource. - Name string + Name string // Error, if any, of returned result resource. Error Error // Config entries, if any, of returned result resource. - Config map[string]ConfigEntryResult + Config map[string]ConfigEntryResult }

func (ConfigResourceResult) - + String

-
func (c ConfigResourceResult) String() string
+
func (c ConfigResourceResult) String() string

String returns a human-readable representation of a ConfigResourceResult.

type - + ConfigSource

int
+
type ConfigSource int

func (ConfigSource) - + String

-
func (t ConfigSource) String() string
+
func (t ConfigSource) String() string

String returns the human-readable representation of a ConfigSource type

type - + ConfigValue

+ Consumer

+ NewConsumer

-
func NewConsumer(conf *ConfigMap) (*Consumer, error)
+
func NewConsumer(conf *ConfigMap) (*Consumer, error)

NewConsumer creates a new high-level Consumer instance.

@@ -2288,14 +2751,14 @@

func (*Consumer) - + Assign

-
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
+
func (c *Consumer) Assign(partitions []TopicPartition) (err error)

Assign an atomic set of partitions to consume.

@@ -2311,27 +2774,27 @@

func (*Consumer) - + Assignment

-
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)
+
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)

Assignment returns the current partition assignments

func (*Consumer) - + AssignmentLost

-
func (c *Consumer) AssignmentLost() bool
+
func (c *Consumer) AssignmentLost() bool

AssignmentLost returns true if current partition assignment has been lost. This method is only applicable for use with a subscribing consumer when @@ -2341,28 +2804,28 @@

func (*Consumer) - + Close

-
func (c *Consumer) Close() (err error)
+
func (c *Consumer) Close() (err error)

Close Consumer instance. The object is no longer usable after this call.

func (*Consumer) - + Commit

-
func (c *Consumer) Commit() ([]TopicPartition, error)
+
func (c *Consumer) Commit() ([]TopicPartition, error)

Commit offsets for currently assigned partitions This is a blocking call. @@ -2370,14 +2833,14 @@

func (*Consumer) - + CommitMessage

-
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
+
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)

CommitMessage commits offset based on the provided message. This is a blocking call. @@ -2385,14 +2848,14 @@

func (*Consumer) - + CommitOffsets

-
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
+
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)

CommitOffsets commits the provided list of offsets This is a blocking call. @@ -2400,20 +2863,20 @@

func (*Consumer) - + Committed

-
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)

Committed retrieves committed offsets for the given set of partitions

func (*Consumer) - + Events

+ GetConsumerGroupMetadata

-
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)
+
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)

GetConsumerGroupMetadata returns the consumer's current group metadata. This object should be passed to the transactional producer's @@ -2441,14 +2904,14 @@

func (*Consumer) - + GetMetadata

-
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)

GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -2458,14 +2921,14 @@

func (*Consumer) - + GetRebalanceProtocol

-
func (c *Consumer) GetRebalanceProtocol() string
+
func (c *Consumer) GetRebalanceProtocol() string

GetRebalanceProtocol returns the current consumer group rebalance protocol, which is either "EAGER" or "COOPERATIVE". @@ -2475,14 +2938,14 @@

func (*Consumer) - + GetWatermarkOffsets

-
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
+
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)

GetWatermarkOffsets returns the cached low and high offsets for the given topic and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. @@ -2491,14 +2954,14 @@

func (*Consumer) - + IncrementalAssign

-
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
+
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)

IncrementalAssign adds the specified partitions to the current set of partitions to consume. @@ -2515,14 +2978,14 @@

func (*Consumer) - + IncrementalUnassign

-
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
+
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)

IncrementalUnassign removes the specified partitions from the current set of partitions to consume. @@ -2535,7 +2998,7 @@

func (*Consumer) - + Logs

+ OffsetsForTimes

-
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)

OffsetsForTimes looks up offsets by timestamp for the given partitions.

@@ -2579,14 +3042,14 @@

func (*Consumer) - + Pause

-
func (c *Consumer) Pause(partitions []TopicPartition) (err error)
+
func (c *Consumer) Pause(partitions []TopicPartition) (err error)

Pause consumption for the provided list of partitions

@@ -2597,14 +3060,14 @@

func (*Consumer) - + Poll

-
func (c *Consumer) Poll(timeoutMs int) (event Event)
+
func (c *Consumer) Poll(timeoutMs int) (event Event)

Poll the consumer for messages or events.

@@ -2621,14 +3084,14 @@

func (*Consumer) - + Position

-
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)
+
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)

Position returns the current consume position for the given partitions. Typical use is to call Assignment() to get the partition list @@ -2639,27 +3102,27 @@

func (*Consumer) - + QueryWatermarkOffsets

-
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)

QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition.

func (*Consumer) - + ReadMessage

-
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)
+
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)

ReadMessage polls the consumer for a message.

@@ -2686,27 +3149,27 @@

func (*Consumer) - + Resume

-
func (c *Consumer) Resume(partitions []TopicPartition) (err error)
+
func (c *Consumer) Resume(partitions []TopicPartition) (err error)

Resume consumption for the provided list of partitions

func (*Consumer) - + Seek

-
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error
+
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error

Seek seeks the given topic partitions using the offset from the TopicPartition.

@@ -2728,14 +3191,14 @@

func (*Consumer) - + SetOAuthBearerToken

-
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error

SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -2753,14 +3216,14 @@

func (*Consumer) - + SetOAuthBearerTokenFailure

-
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error
+
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error

SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -2769,17 +3232,31 @@

1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; 2) SASL/OAUTHBEARER is supported but is not configured as the client's authentication mechanism. +

+

+ func (*Consumer) + + StoreMessage + + +

+
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error)
+

+ StoreMessage stores offset based on the provided message. +This is a convenience method that uses StoreOffsets to do the actual work.

func (*Consumer) - + StoreOffsets

-
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)
+
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)

StoreOffsets stores the provided list of offsets that will be committed to the offset store according to `auto.commit.interval.ms` or manual @@ -2792,87 +3269,87 @@

func (*Consumer) - + String

-
func (c *Consumer) String() string
+
func (c *Consumer) String() string

Strings returns a human readable name for a Consumer instance

func (*Consumer) - + Subscribe

-
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
+
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error

Subscribe to a single topic This replaces the current subscription

func (*Consumer) - + SubscribeTopics

-
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
+
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)

SubscribeTopics subscribes to the provided list of topics. This replaces the current subscription.

func (*Consumer) - + Subscription

-
func (c *Consumer) Subscription() (topics []string, err error)
+
func (c *Consumer) Subscription() (topics []string, err error)

Subscription returns the current subscription as set by Subscribe()

func (*Consumer) - + Unassign

-
func (c *Consumer) Unassign() (err error)
+
func (c *Consumer) Unassign() (err error)

Unassign the current set of partitions to consume.

func (*Consumer) - + Unsubscribe

-
func (c *Consumer) Unsubscribe() (err error)
+
func (c *Consumer) Unsubscribe() (err error)

Unsubscribe from the current subscription, if any.

type - + ConsumerGroupMetadata

+ NewTestConsumerGroupMetadata

-
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)
+
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)

NewTestConsumerGroupMetadata creates a new consumer group metadata instance mainly for testing use. Use GetConsumerGroupMetadata() to retrieve the real metadata.

+

+ type + + CreateACLResult + + +

+

+ CreateACLResult provides create ACL error information. +

+
type CreateACLResult struct {
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ type + + CreateACLsAdminOption + + +

+

+ CreateACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type CreateACLsAdminOption interface {
+    // contains filtered or unexported methods
+}

type - + CreatePartitionsAdminOption

+ CreateTopicsAdminOption

+ DeleteACLsAdminOption + + +

+

+ DeleteACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type DeleteACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteACLsResult + + +

+

+ DeleteACLsResult provides delete ACLs result or error information. +

+
type DeleteACLsResult = DescribeACLsResult

type - + DeleteTopicsAdminOption

+ DescribeACLsAdminOption + + +

+

+ DescribeACLsAdminOption - see setter. +

+

+ See SetAdminRequestTimeout +

+
type DescribeACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeACLsResult + + +

+

+ DescribeACLsResult provides describe ACLs result or error information. +

+
type DescribeACLsResult struct {
+    // Slice of ACL bindings matching the provided filter
+    ACLBindings ACLBindings
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+

type - + DescribeConfigsAdminOption

+ Error

+ NewError

-
func NewError(code ErrorCode, str string, fatal bool) (err Error)
+
func NewError(code ErrorCode, str string, fatal bool) (err Error)

NewError creates a new Error.

func (Error) - + Code

+ Error

-
func (e Error) Error() string
+
func (e Error) Error() string

Error returns a human readable representation of an Error Same as Error.String()

func (Error) - + IsFatal

-
func (e Error) IsFatal() bool
+
func (e Error) IsFatal() bool

IsFatal returns true if the error is a fatal error. A fatal error indicates the client instance is no longer operable and @@ -3047,14 +3627,14 @@

func (Error) - + IsRetriable

-
func (e Error) IsRetriable() bool
+
func (e Error) IsRetriable() bool

IsRetriable returns true if the operation that caused this error may be retried. @@ -3062,27 +3642,27 @@

func (Error) - + String

-
func (e Error) String() string
+
func (e Error) String() string

String returns a human readable representation of an Error

func (Error) - + TxnRequiresAbort

-
func (e Error) TxnRequiresAbort() bool
+
func (e Error) TxnRequiresAbort() bool

TxnRequiresAbort returns true if the error is an abortable transaction error that requires the application to abort the current transaction with @@ -3092,7 +3672,7 @@

type - + ErrorCode

int
+
type ErrorCode int
const (
     // ErrBadMsg Local: Bad message format
-    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
+    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
     // ErrBadCompression Local: Invalid compressed data
-    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
+    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
     // ErrDestroy Local: Broker handle destroyed
-    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
+    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
     // ErrFail Local: Communication failure with broker
-    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
+    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
     // ErrTransport Local: Broker transport failure
-    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
+    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
     // ErrCritSysResource Local: Critical system resource failure
-    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
+    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
     // ErrResolve Local: Host resolution failure
-    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
+    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
     // ErrMsgTimedOut Local: Message timed out
-    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
+    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
     // ErrPartitionEOF Broker: No more messages
-    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
+    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
     // ErrUnknownPartition Local: Unknown partition
-    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
     // ErrFs Local: File or filesystem error
-    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
+    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
     // ErrUnknownTopic Local: Unknown topic
-    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
     // ErrAllBrokersDown Local: All broker connections are down
-    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
+    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
     // ErrInvalidArg Local: Invalid argument or configuration
-    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
+    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
     // ErrTimedOut Local: Timed out
-    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
+    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
     // ErrQueueFull Local: Queue full
-    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
+    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
     // ErrIsrInsuff Local: ISR count insufficient
-    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
+    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
     // ErrNodeUpdate Local: Broker node update
-    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
+    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
     // ErrSsl Local: SSL error
-    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
+    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
     // ErrWaitCoord Local: Waiting for coordinator
-    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
+    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
     // ErrUnknownGroup Local: Unknown group
-    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
+    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
     // ErrInProgress Local: Operation in progress
-    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
+    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
     // ErrPrevInProgress Local: Previous operation in progress
-    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
+    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
     // ErrExistingSubscription Local: Existing subscription
-    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
+    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
     // ErrAssignPartitions Local: Assign partitions
-    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
     // ErrRevokePartitions Local: Revoke partitions
-    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
+    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
     // ErrConflict Local: Conflicting use
-    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
+    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
     // ErrState Local: Erroneous state
-    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
+    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
     // ErrUnknownProtocol Local: Unknown protocol
-    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
+    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
     // ErrNotImplemented Local: Not implemented
-    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
+    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
     // ErrAuthentication Local: Authentication failure
-    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
+    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
     // ErrNoOffset Local: No offset stored
-    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
+    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
     // ErrOutdated Local: Outdated
-    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
+    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
     // ErrTimedOutQueue Local: Timed out in queue
-    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
+    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
     // ErrUnsupportedFeature Local: Required feature not supported by broker
-    ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
+    ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
     // ErrWaitCache Local: Awaiting cache update
-    ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
+    ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
     // ErrIntr Local: Operation interrupted
-    ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
+    ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
     // ErrKeySerialization Local: Key serialization error
-    ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
+    ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
     // ErrValueSerialization Local: Value serialization error
-    ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
+    ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
     // ErrKeyDeserialization Local: Key deserialization error
-    ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
+    ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
     // ErrValueDeserialization Local: Value deserialization error
-    ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
+    ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
     // ErrPartial Local: Partial response
-    ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
+    ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
     // ErrReadOnly Local: Read-only object
-    ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
+    ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
     // ErrNoent Local: No such entry
-    ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
+    ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
     // ErrUnderflow Local: Read underflow
-    ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
+    ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
     // ErrInvalidType Local: Invalid type
-    ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
+    ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
     // ErrRetry Local: Retry operation
-    ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
+    ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
     // ErrPurgeQueue Local: Purged in queue
-    ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
+    ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
     // ErrPurgeInflight Local: Purged in flight
-    ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
+    ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
     // ErrFatal Local: Fatal error
-    ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
+    ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
     // ErrInconsistent Local: Inconsistent state
-    ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
+    ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
     // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
-    ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
+    ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
     // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
-    ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
+    ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
     // ErrUnknownBroker Local: Unknown broker
-    ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
+    ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
     // ErrNotConfigured Local: Functionality not configured
-    ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
+    ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
     // ErrFenced Local: This instance has been fenced by a newer instance
-    ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
+    ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
     // ErrApplication Local: Application generated error
-    ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
+    ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
     // ErrAssignmentLost Local: Group partition assignment lost
-    ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
+    ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
     // ErrNoop Local: No operation performed
-    ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
+    ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
     // ErrAutoOffsetReset Local: No offset to automatically reset to
-    ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
+    ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
     // ErrUnknown Unknown broker error
-    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
+    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
     // ErrNoError Success
-    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
+    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
     // ErrOffsetOutOfRange Broker: Offset out of range
-    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
+    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
     // ErrInvalidMsg Broker: Invalid message
-    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
+    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
     // ErrUnknownTopicOrPart Broker: Unknown topic or partition
-    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
     // ErrInvalidMsgSize Broker: Invalid message size
-    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
+    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
     // ErrLeaderNotAvailable Broker: Leader not available
-    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
     // ErrNotLeaderForPartition Broker: Not leader for partition
-    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
+    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
     // ErrRequestTimedOut Broker: Request timed out
-    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
+    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
     // ErrBrokerNotAvailable Broker: Broker not available
-    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
+    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
     // ErrReplicaNotAvailable Broker: Replica not available
-    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
+    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
     // ErrMsgSizeTooLarge Broker: Message size too large
-    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
+    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
     // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
-    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
+    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
     // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
-    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
+    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
     // ErrNetworkException Broker: Broker disconnected before response received
-    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
+    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
     // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
-    ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
+    ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
     // ErrCoordinatorNotAvailable Broker: Coordinator not available
-    ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
+    ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
     // ErrNotCoordinator Broker: Not coordinator
-    ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
+    ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
     // ErrTopicException Broker: Invalid topic
-    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
+    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
     // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
-    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
+    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
     // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
-    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
+    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
     // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
-    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
+    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
     // ErrInvalidRequiredAcks Broker: Invalid required acks value
-    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
+    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
     // ErrIllegalGeneration Broker: Specified group generation id is not valid
-    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
     // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
-    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
+    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
     // ErrInvalidGroupID Broker: Invalid group.id
-    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
+    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
     // ErrUnknownMemberID Broker: Unknown member
-    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
     // ErrInvalidSessionTimeout Broker: Invalid session timeout
-    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
+    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
     // ErrRebalanceInProgress Broker: Group rebalance in progress
-    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
+    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
     // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
-    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
+    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
     // ErrTopicAuthorizationFailed Broker: Topic authorization failed
-    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
+    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
     // ErrGroupAuthorizationFailed Broker: Group authorization failed
-    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
+    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
     // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
-    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
+    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
     // ErrInvalidTimestamp Broker: Invalid timestamp
-    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
+    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
     // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
-    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
+    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
     // ErrIllegalSaslState Broker: Request not valid in current SASL state
-    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
+    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
     // ErrUnsupportedVersion Broker: API version not supported
-    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
+    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
     // ErrTopicAlreadyExists Broker: Topic already exists
-    ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
+    ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
     // ErrInvalidPartitions Broker: Invalid number of partitions
-    ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
+    ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
     // ErrInvalidReplicationFactor Broker: Invalid replication factor
-    ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
+    ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
     // ErrInvalidReplicaAssignment Broker: Invalid replica assignment
-    ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
+    ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
     // ErrInvalidConfig Broker: Configuration is invalid
-    ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
+    ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
     // ErrNotController Broker: Not controller for cluster
-    ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
+    ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
     // ErrInvalidRequest Broker: Invalid request
-    ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
+    ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
     // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
-    ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
+    ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
     // ErrPolicyViolation Broker: Policy violation
-    ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
+    ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
     // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
-    ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
+    ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
     // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
-    ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
+    ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
     // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
-    ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
+    ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
     // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
-    ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
+    ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
     // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
-    ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
+    ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
     // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
-    ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
+    ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
     // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
-    ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
+    ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
     // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
-    ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
+    ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
     // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
-    ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
+    ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
     // ErrSecurityDisabled Broker: Security features are disabled
-    ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
+    ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
     // ErrOperationNotAttempted Broker: Operation not attempted
-    ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
+    ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
     // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
-    ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
+    ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
     // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
-    ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
+    ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
     // ErrSaslAuthenticationFailed Broker: SASL Authentication failed
-    ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
+    ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
     // ErrUnknownProducerID Broker: Unknown Producer Id
-    ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
+    ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
     // ErrReassignmentInProgress Broker: Partition reassignment is in progress
-    ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
+    ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
     // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
-    ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
+    ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
     // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
-    ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
+    ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
     // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
-    ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
+    ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
     // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
-    ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
+    ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
     // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
-    ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
+    ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
     // ErrDelegationTokenExpired Broker: Delegation Token is expired
-    ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
+    ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
     // ErrInvalidPrincipalType Broker: Supplied principalType is not supported
-    ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
+    ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
     // ErrNonEmptyGroup Broker: The group is not empty
-    ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
+    ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
     // ErrGroupIDNotFound Broker: The group id does not exist
-    ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
+    ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
     // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
-    ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
+    ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
     // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
-    ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
+    ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
     // ErrListenerNotFound Broker: No matching listener
-    ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
+    ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
     // ErrTopicDeletionDisabled Broker: Topic deletion is disabled
-    ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
+    ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
     // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
-    ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
+    ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
     // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
-    ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
+    ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
     // ErrUnsupportedCompressionType Broker: Unsupported compression type
-    ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
+    ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
     // ErrStaleBrokerEpoch Broker: Broker epoch has changed
-    ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
+    ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
     // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
-    ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
+    ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
     // ErrMemberIDRequired Broker: Group member needs a valid member ID
-    ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
+    ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
     // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
-    ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
+    ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
     // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
-    ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
+    ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
     // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
-    ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
+    ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
     // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
-    ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
+    ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
     // ErrElectionNotNeeded Broker: Leader election not needed for topic partition
-    ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
+    ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
     // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
-    ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
+    ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
     // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
-    ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
+    ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
     // ErrInvalidRecord Broker: Broker failed to validate record
-    ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
+    ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
     // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
-    ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
+    ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
     // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
-    ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
+    ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
     // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
-    ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
+    ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
     // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
-    ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
+    ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
     // ErrDuplicateResource Broker: Request illegally referred to the same resource twice
-    ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
+    ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
     // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
-    ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
+    ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
     // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
-    ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
+    ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
     // ErrInvalidUpdateVersion Broker: Invalid update version
-    ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
+    ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
     // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
-    ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
+    ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
     // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
-    ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
+    ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
 )

func (ErrorCode) - + String

-
func (c ErrorCode) String() string
+
func (c ErrorCode) String() string

String returns a human readable representation of an error code

type - + Event

string
+    String() string
 }

type - + Handle

OAuthBearerToken) error + SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error // SetOAuthBearerTokenFailure sets the error message describing why token // retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -3485,12 +4065,12 @@

// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; // 2) SASL/OAUTHBEARER is supported but is not configured as the client's // authentication mechanism. - SetOAuthBearerTokenFailure(errstr string) error + SetOAuthBearerTokenFailure(errstr string) error // contains filtered or unexported methods }

type - + LogEvent

string    // Name of client instance
-    Tag       string    // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
-    Message   string    // Log message
-    Level     int       // Log syslog level, lower is more critical.
-    Timestamp time.Time // Log timestamp
+    Name      string    // Name of client instance
+    Tag       string    // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
+    Message   string    // Log message
+    Level     int       // Log syslog level, lower is more critical.
+    Timestamp time.Time // Log timestamp
 }
 

func (LogEvent) - + String

-
func (logEvent LogEvent) String() string
+
func (logEvent LogEvent) String() string

type - + Message

TopicPartition
-    Value          []byte
-    Key            []byte
-    Timestamp      time.Time
+    Value          []byte
+    Key            []byte
+    Timestamp      time.Time
     TimestampType  TimestampType
     Opaque         interface{}
     Headers        []Header
@@ -3586,21 +4166,21 @@ 

func (*Message) - + String

-
func (m *Message) String() string
+
func (m *Message) String() string

String returns a human readable representation of a Message. Key and payload are not represented.

type - + Metadata

BrokerMetadata
-    Topics  map[string]TopicMetadata
+    Topics  map[string]TopicMetadata
 
     OriginatingBroker BrokerMetadata
 }
 
+

+ type + + MockCluster + + +

+

+ MockCluster represents a Kafka mock cluster instance which can be used +for testing. +

+
type MockCluster struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewMockCluster + + +

+
func NewMockCluster(brokerCount int) (*MockCluster, error)
+

+ NewMockCluster provides a mock Kafka cluster with a configurable +number of brokers that support a reasonable subset of Kafka protocol +operations, error injection, etc. +

+

+ Mock clusters provide localhost listeners that can be used as the bootstrap +servers by multiple Kafka client instances. +

+

+ Currently supported functionality: +- Producer +- Idempotent Producer +- Transactional Producer +- Low-level consumer +- High-level balanced consumer groups with offset commits +- Topic Metadata and auto creation +

+

+ Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +

+

+ func (*MockCluster) + + BootstrapServers + + +

+
func (mc *MockCluster) BootstrapServers() string
+

+ BootstrapServers returns the bootstrap.servers property for this MockCluster +

+

+ func (*MockCluster) + + Close + + +

+
func (mc *MockCluster) Close()
+

+ Close and destroy the MockCluster +

type - + OAuthBearerToken

string + TokenValue string // Metadata about the token indicating when it expires (local time); // it must represent a time in the future - Expiration time.Time + Expiration time.Time // Metadata about the token indicating the Kafka principal name // to which it applies (for example, "admin") - Principal string + Principal string // SASL extensions, if any, to be communicated to the broker during // authentication (all keys and values of which must meet the regular // expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1, // and it must not contain the reserved "auth" key) - Extensions map[string]string + Extensions map[string]string }

type - + OAuthBearerTokenRefresh

string
+    Config string
 }
 

func (OAuthBearerTokenRefresh) - + String

-
func (o OAuthBearerTokenRefresh) String() string
+
func (o OAuthBearerTokenRefresh) String() string

type - + Offset

int64
+
type Offset int64

func - + NewOffset

-
func NewOffset(offset interface{}) (Offset, error)
+
func NewOffset(offset interface{}) (Offset, error)

NewOffset creates a new Offset using the provided logical string, or an absolute int64 offset value. @@ -3706,7 +4360,7 @@

func - + OffsetTail

+ Set

-
func (o *Offset) Set(offset interface{}) error
+
func (o *Offset) Set(offset interface{}) error

Set offset value, see NewOffset()

func (Offset) - + String

-
func (o Offset) String() string
+
func (o Offset) String() string

type - + OffsetsCommitted

error
+    Error   error
     Offsets []TopicPartition
 }
 

func (OffsetsCommitted) - + String

-
func (o OffsetsCommitted) String() string
+
func (o OffsetsCommitted) String() string

type - + PartitionEOF

TopicPartition

func (PartitionEOF) - + String

-
func (p PartitionEOF) String() string
+
func (p PartitionEOF) String() string

type - + PartitionMetadata

int32
+    ID       int32
     Error    Error
-    Leader   int32
-    Replicas []int32
-    Isrs     []int32
+    Leader   int32
+    Replicas []int32
+    Isrs     []int32
 }
 

type - + PartitionsSpecification

string
+    Topic string
     // New partition count for topic, must be higher than current partition count.
-    IncreaseTo int
+    IncreaseTo int
     // (Optional) Explicit replica assignment. The outer array is
     // indexed by the new partition index (i.e., 0 for the first added
     // partition), while the inner per-partition array
     // contains the replica broker ids. The first broker in each
     // broker id list will be the preferred replica.
-    ReplicaAssignment [][]int32
+    ReplicaAssignment [][]int32
 }
 

type - + Producer

+ NewProducer

-
func NewProducer(conf *ConfigMap) (*Producer, error)
+
func NewProducer(conf *ConfigMap) (*Producer, error)

NewProducer creates a new high-level Producer instance.

@@ -3888,14 +4542,14 @@

func (*Producer) - + AbortTransaction

-
func (p *Producer) AbortTransaction(ctx context.Context) error
+
func (p *Producer) AbortTransaction(ctx context.Context) error

AbortTransaction aborts the ongoing transaction.

@@ -3933,14 +4587,14 @@

func (*Producer) - + BeginTransaction

-
func (p *Producer) BeginTransaction() error
+
func (p *Producer) BeginTransaction() error

BeginTransaction starts a new transaction.

@@ -3970,7 +4624,7 @@

func (*Producer) - + Close

+ CommitTransaction

-
func (p *Producer) CommitTransaction(ctx context.Context) error
+
func (p *Producer) CommitTransaction(ctx context.Context) error

CommitTransaction commits the current transaction.

@@ -4034,7 +4688,7 @@

func (*Producer) - + Events

+ Flush

-
func (p *Producer) Flush(timeoutMs int) int
+
func (p *Producer) Flush(timeoutMs int) int

Flush and wait for outstanding messages and requests to complete delivery. Includes messages on ProduceChannel. @@ -4063,27 +4717,27 @@

func (*Producer) - + GetFatalError

-
func (p *Producer) GetFatalError() error
+
func (p *Producer) GetFatalError() error

GetFatalError returns an Error object if the client instance has raised a fatal error, else nil.

func (*Producer) - + GetMetadata

-
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)

GetMetadata queries broker for cluster and topic metadata. If topic is non-nil only information about that topic is returned, else if @@ -4093,14 +4747,14 @@

func (*Producer) - + InitTransactions

-
func (p *Producer) InitTransactions(ctx context.Context) error
+
func (p *Producer) InitTransactions(ctx context.Context) error

InitTransactions Initializes transactions for the producer instance.

@@ -4148,14 +4802,14 @@

func (*Producer) - + Len

-
func (p *Producer) Len() int
+
func (p *Producer) Len() int

Len returns the number of messages and requests waiting to be transmitted to the broker as well as delivery reports queued for the application. @@ -4163,7 +4817,7 @@

func (*Producer) - + Logs

+ OffsetsForTimes

-
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)

OffsetsForTimes looks up offsets by timestamp for the given partitions.

@@ -4207,14 +4861,14 @@

func (*Producer) - + Produce

-
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
+
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error

Produce single message. This is an asynchronous call that enqueues the message on the internal @@ -4229,7 +4883,7 @@

func (*Producer) - + ProduceChannel

+ Purge

-
func (p *Producer) Purge(flags int) error
+
func (p *Producer) Purge(flags int) error

Purge messages currently handled by this producer instance.

@@ -4283,28 +4937,28 @@

func (*Producer) - + QueryWatermarkOffsets

-
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)

QueryWatermarkOffsets returns the broker's low and high offsets for the given topic and partition.

func (*Producer) - + SendOffsetsToTransaction

-
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error
+
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error

SendOffsetsToTransaction sends a list of topic partition offsets to the consumer group coordinator for `consumerMetadata`, and marks the offsets @@ -4352,14 +5006,14 @@

func (*Producer) - + SetOAuthBearerToken

-
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error

SetOAuthBearerToken sets the the data to be transmitted to a broker during SASL/OAUTHBEARER authentication. It will return nil @@ -4377,14 +5031,14 @@

func (*Producer) - + SetOAuthBearerTokenFailure

-
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error
+
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error

SetOAuthBearerTokenFailure sets the error message describing why token retrieval/setting failed; it also schedules a new token refresh event for 10 @@ -4396,34 +5050,34 @@

func (*Producer) - + String

-
func (p *Producer) String() string
+
func (p *Producer) String() string

String returns a human readable name for a Producer instance

func (*Producer) - + TestFatalError

-
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode
+
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode

TestFatalError triggers a fatal error in the underlying client. This is to be used strictly for testing purposes.

type - + RebalanceCb

Consumer, Event) error
+
type RebalanceCb func(*Consumer, Event) error
+

+ type + + ResourcePatternType + + +

+

+ ResourcePatternType enumerates the different types of Kafka resource patterns. +

+
type ResourcePatternType int
+

+ func + + ResourcePatternTypeFromString + + +

+
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error)
+

+ ResourcePatternTypeFromString translates a resource pattern type name to +a ResourcePatternType value. +

+

+ func (ResourcePatternType) + + String + + +

+
func (t ResourcePatternType) String() string
+

+ String returns the human-readable representation of a ResourcePatternType +

type - + ResourceType

int
+
type ResourceType int

func - + ResourceTypeFromString

-
func ResourceTypeFromString(typeString string) (ResourceType, error)
+
func ResourceTypeFromString(typeString string) (ResourceType, error)

ResourceTypeFromString translates a resource type name/string to a ResourceType value.

func (ResourceType) - + String

-
func (t ResourceType) String() string
+
func (t ResourceType) String() string

String returns the human-readable representation of a ResourceType

type - + RevokedPartitions

+ String

-
func (e RevokedPartitions) String() string
+
func (e RevokedPartitions) String() string

type - + Stats

+ String

-
func (e Stats) String() string
+
func (e Stats) String() string

type - + TimestampType

int
+
type TimestampType int

func (TimestampType) - + String

-
func (t TimestampType) String() string
+
func (t TimestampType) String() string

type - + TopicMetadata

string
+    Topic      string
     Partitions []PartitionMetadata
     Error      Error
 }
 

type - + TopicPartition

string
-    Partition int32
+    Topic     *string
+    Partition int32
     Offset    Offset
-    Metadata  *string
-    Error     error
+    Metadata  *string
+    Error     error
 }
 

func (TopicPartition) - + String

-
func (p TopicPartition) String() string
+
func (p TopicPartition) String() string

type - + TopicPartitions

TopicPartition

func (TopicPartitions) - + Len

-
func (tps TopicPartitions) Len() int
+
func (tps TopicPartitions) Len() int

func (TopicPartitions) - + Less

-
func (tps TopicPartitions) Less(i, j int) bool
+
func (tps TopicPartitions) Less(i, j int) bool

func (TopicPartitions) - + Swap

-
func (tps TopicPartitions) Swap(i, j int)
+
func (tps TopicPartitions) Swap(i, j int)

type - + TopicResult

string
+    Topic string
     // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
     Error Error
 }
 

func (TopicResult) - + String

-
func (t TopicResult) String() string
+
func (t TopicResult) String() string

String returns a human-readable representation of a TopicResult.

type - + TopicSpecification

string
+    Topic string
     // Number of partitions in topic.
-    NumPartitions int
+    NumPartitions int
     // Default replication factor for the topic's partitions, or zero
     // if an explicit ReplicaAssignment is set.
-    ReplicationFactor int
+    ReplicationFactor int
     // (Optional) Explicit replica assignment. The outer array is
     // indexed by the partition number, while the inner per-partition array
     // contains the replica broker ids. The first broker in each
     // broker id list will be the preferred replica.
-    ReplicaAssignment [][]int32
+    ReplicaAssignment [][]int32
     // Topic configuration.
-    Config map[string]string
+    Config map[string]string
 }
 
diff --git a/kafka/build_darwin.go b/kafka/build_darwin.go index d8886f5e4..dd34cc11a 100644 --- a/kafka/build_darwin.go +++ b/kafka/build_darwin.go @@ -6,8 +6,8 @@ package kafka // #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB -// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lz -lm -lsasl2 -ldl -lpthread +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_darwin.a -lm -lsasl2 -ldl -lpthread -framework CoreFoundation -framework SystemConfiguration import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.7.0.tgz" +const LibrdkafkaLinkInfo = "static darwin from librdkafka-static-bundle-v1.9.0.tgz" diff --git a/kafka/build_glibc_linux.go b/kafka/build_glibc_linux.go index 26cf9e95b..9a03db873 100644 --- a/kafka/build_glibc_linux.go +++ b/kafka/build_glibc_linux.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.7.0.tgz" +const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.0.tgz" diff --git a/kafka/build_musl_linux.go b/kafka/build_musl_linux.go index 8d8ddc4f0..4ef601edd 100644 --- a/kafka/build_musl_linux.go +++ b/kafka/build_musl_linux.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.7.0.tgz" +const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.9.0.tgz" diff --git a/kafka/build_windows.go b/kafka/build_windows.go index 656827afb..53f47d424 100644 --- a/kafka/build_windows.go +++ b/kafka/build_windows.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.7.0.tgz" +const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.9.0.tgz" diff --git a/kafka/config_test.go b/kafka/config_test.go index 60093e496..67ee724ac 100644 --- a/kafka/config_test.go +++ b/kafka/config_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "fmt" "testing" diff --git a/kafka/consumer.go b/kafka/consumer.go index e58ebb56b..156f34d30 100644 --- a/kafka/consumer.go +++ b/kafka/consumer.go @@ -292,6 +292,20 @@ func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []Topic return storedOffsets, nil } +// StoreMessage stores offset based on the provided message. +// This is a convenience method that uses StoreOffsets to do the actual work. +func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) { + if m.TopicPartition.Error != nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't store errored message") + } + if m.TopicPartition.Offset < 0 { + return nil, newErrorFromString(ErrInvalidArg, "Can't store message with offset less than 0") + } + offsets := []TopicPartition{m.TopicPartition} + offsets[0].Offset++ + return c.StoreOffsets(offsets) +} + // Seek seeks the given topic partitions using the offset from the TopicPartition. // // If timeoutMs is not 0 the call will wait this long for the @@ -410,30 +424,16 @@ func (c *Consumer) Close() (err error) { close(c.events) } - // librdkafka's rd_kafka_consumer_close() will block - // and trigger the rebalance_cb() if one is set, if not, which is the - // case with the Go client since it registers EVENTs rather than callbacks, - // librdkafka will shortcut the rebalance_cb and do a forced unassign. - // But we can't have that since the application might need the final RevokePartitions - // before shutting down. So we trigger an Unsubscribe() first, wait for that to - // propagate (in the Poll loop below), and then close the consumer. - c.Unsubscribe() + C.rd_kafka_consumer_close_queue(c.handle.rk, c.handle.rkq) - // Poll for rebalance events - for { - c.Poll(10 * 1000) - if int(C.rd_kafka_queue_length(c.handle.rkq)) == 0 { - break - } + for C.rd_kafka_consumer_closed(c.handle.rk) != 1 { + c.Poll(100) } // Destroy our queue C.rd_kafka_queue_destroy(c.handle.rkq) c.handle.rkq = nil - // Close the consumer - C.rd_kafka_consumer_close(c.handle.rk) - c.handle.cleanup() C.rd_kafka_destroy(c.handle.rk) diff --git a/kafka/consumer_performance_test.go b/kafka/consumer_performance_test.go index 49edbaea5..e77cf22dc 100644 --- a/kafka/consumer_performance_test.go +++ b/kafka/consumer_performance_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "fmt" "math/rand" diff --git a/kafka/consumer_test.go b/kafka/consumer_test.go index d7cacc3ed..7f5ea38d0 100644 --- a/kafka/consumer_test.go +++ b/kafka/consumer_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,14 +16,14 @@ * limitations under the License. */ -package kafka - import ( "fmt" "os" "reflect" "sort" "strings" + "sync" + "sync/atomic" "testing" "time" ) @@ -85,6 +87,16 @@ func TestConsumerAPIs(t *testing.T) { t.Errorf("StoreOffsets(empty) failed: %s", err) } + // test StoreMessage doesn't fail either + stored, err = c.StoreMessage(&Message{TopicPartition: TopicPartition{Topic: &topic, Partition: 0, Offset: 1}}) + if err != nil && err.(Error).Code() != ErrUnknownPartition { + t.Errorf("StoreMessage() failed: %s", err) + toppar := stored[0] + if toppar.Error != nil && toppar.Error.(Error).Code() == ErrUnknownPartition { + t.Errorf("StoreMessage() TopicPartition error: %s", toppar.Error) + } + } + topic1 := "gotest1" topic2 := "gotest2" err = c.Assign([]TopicPartition{{Topic: &topic1, Partition: 2}, @@ -419,3 +431,280 @@ func TestConsumerLog(t *testing.T) { } } } + +func wrapRebalanceCb(assignedEvents *int32, revokedEvents *int32, t *testing.T) func(c *Consumer, event Event) error { + return func(c *Consumer, event Event) error { + switch ev := event.(type) { + case AssignedPartitions: + atomic.AddInt32(assignedEvents, 1) + + t.Logf("%v, %s rebalance: %d new partition(s) assigned: %v\n", + c, c.GetRebalanceProtocol(), len(ev.Partitions), + ev.Partitions) + err := c.Assign(ev.Partitions) + if err != nil { + panic(err) + } + + case RevokedPartitions: + atomic.AddInt32(revokedEvents, 1) + + t.Logf("%v, %s rebalance: %d partition(s) revoked: %v\n", + c, c.GetRebalanceProtocol(), len(ev.Partitions), + ev.Partitions) + if c.AssignmentLost() { + // Our consumer has been kicked out of the group and the + // entire assignment is thus lost. + t.Logf("%v, Current assignment lost!\n", c) + } + + // The client automatically calls Unassign() unless + // the callback has already called that method. + } + return nil + } +} + +func testPoll(c *Consumer, doneChan chan bool, t *testing.T, wg *sync.WaitGroup) { + defer wg.Done() + + run := true + for run { + select { + case <-doneChan: + run = false + + default: + ev := c.Poll(100) + if ev == nil { + continue + } + switch e := ev.(type) { + case *Message: + t.Logf("Message on %s:\n%s\n", + e.TopicPartition, string(e.Value)) + if e.Headers != nil { + t.Logf("Headers: %v\n", e.Headers) + } + + case Error: + // Errors should generally be + // considered informational, the client + // will try to automatically recover. + t.Logf("Error: %v: %v for "+ + "consumer %v\n", e.Code(), e, c) + + default: + t.Logf("Ignored %v for consumer %v\n", + e, c) + } + } + } +} + +// TestConsumerCloseForStaticMember verifies the rebalance +// for static membership. +// According to KIP-345, the consumer group will not trigger rebalance unless +// 1) A new member joins +// 2) A leader rejoins (possibly due to topic assignment change) +// 3) An existing member offline time is over session timeout +// 4) Broker receives a leave group request containing alistof +// `group.instance.id`s (details later) +// +// This test uses 3 consumers while each consumer joins after the assignment +// finished for the previous consumers. +// The expected behavior for these consumers are: +// 1) First consumer joins, AssignedPartitions happens. Assign all the +// partitions to it. +// 2) Second consumer joins, RevokedPartitions happens from the first consumer, +// then AssignedPartitions happens to both consumers. +// 3) Third consumer joins, RevokedPartitions happens from the previous two +// consumers, then AssignedPartitions happens to all the three consumers. +// 4) Close the second consumer, revoke its assignments will happen, but it +// should not notice other consumers. +// 5) Rejoin the second consumer, rebalance should not happen to all the other +// consumers since it's not the leader, AssignedPartitions only happened +// to this consumer to assign the partitions. +// 6) Close the third consumer, revoke its assignments will happen, but it +// should not notice other consumers. +// 7) Close the rejoined consumer, revoke its assignments will happen, +// but it should not notice other consumers. +// 8) Close the first consumer, revoke its assignments will happen. +// +// The total number of AssignedPartitions for the first consumer is 3, +// and the total number of RevokedPartitions for the first consumer is 3. +// The total number of AssignedPartitions for the second consumer is 2, +// and the total number of RevokedPartitions for the second consumer is 2. +// The total number of AssignedPartitions for the third consumer is 1, +// and the total number of RevokedPartitions for the third consumer is 1. +// The total number of AssignedPartitions for the rejoined consumer +// (originally second consumer) is 1, +// and the total number of RevokedPartitions for the rejoined consumer +// (originally second consumer) is 1. +func TestConsumerCloseForStaticMember(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + broker := testconf.Brokers + topic := createTestTopic(t, "staticMembership", 3, 1) + + var assignedEvents1 int32 + var revokedEvents1 int32 + + var assignedEvents2 int32 + var revokedEvents2 int32 + + var assignedEvents3 int32 + var revokedEvents3 int32 + + var assignedEvents4 int32 + var revokedEvents4 int32 + + conf1 := ConfigMap{ + "bootstrap.servers": broker, + "group.id": "rebalance", + "session.timeout.ms": "6000", + "max.poll.interval.ms": "10000", + "group.instance.id": "staticmember1", + } + c1, err := NewConsumer(&conf1) + + conf2 := ConfigMap{ + "bootstrap.servers": broker, + "group.id": "rebalance", + "session.timeout.ms": "6000", + "max.poll.interval.ms": "10000", + "group.instance.id": "staticmember2", + } + c2, err := NewConsumer(&conf2) + if err != nil { + t.Fatalf("%s", err) + } + + conf3 := ConfigMap{ + "bootstrap.servers": broker, + "group.id": "rebalance", + "session.timeout.ms": "6000", + "max.poll.interval.ms": "10000", + "group.instance.id": "staticmember3", + } + + c3, err := NewConsumer(&conf3) + if err != nil { + t.Fatalf("%s", err) + } + wrapRebalancecb1 := wrapRebalanceCb(&assignedEvents1, &revokedEvents1, t) + err = c1.Subscribe(topic, wrapRebalancecb1) + if err != nil { + t.Fatalf("Failed to subscribe to topic %s: %s\n", topic, err) + } + + wg := sync.WaitGroup{} + doneChan := make(chan bool, 3) + + wg.Add(1) + go testPoll(c1, doneChan, t, &wg) + testConsumerWaitAssignment(c1, t) + + closeChan := make(chan bool) + wrapRebalancecb2 := wrapRebalanceCb(&assignedEvents2, &revokedEvents2, t) + err = c2.Subscribe(topic, wrapRebalancecb2) + if err != nil { + t.Fatalf("Failed to subscribe to topic %s: %s\n", topic, err) + } + wg.Add(1) + go testPoll(c2, closeChan, t, &wg) + testConsumerWaitAssignment(c2, t) + + wrapRebalancecb3 := wrapRebalanceCb(&assignedEvents3, &revokedEvents3, t) + err = c3.Subscribe(topic, wrapRebalancecb3) + if err != nil { + t.Fatalf("Failed to subscribe to topic %s: %s\n", topic, err) + } + wg.Add(1) + go testPoll(c3, doneChan, t, &wg) + testConsumerWaitAssignment(c3, t) + + closeChan <- true + close(closeChan) + c2.Close() + + c2, err = NewConsumer(&conf2) + if err != nil { + t.Fatalf("%s", err) + } + + wrapRebalancecb4 := wrapRebalanceCb(&assignedEvents4, &revokedEvents4, t) + err = c2.Subscribe(topic, wrapRebalancecb4) + if err != nil { + t.Fatalf("Failed to subscribe to topic %s: %s\n", topic, err) + } + + wg.Add(1) + go testPoll(c2, doneChan, t, &wg) + testConsumerWaitAssignment(c2, t) + + doneChan <- true + close(doneChan) + + c3.Close() + c2.Close() + c1.Close() + + wg.Wait() + + // Wait 2 * session.timeout.ms to make sure no revokedEvents happens + time.Sleep(2 * 6000 * time.Millisecond) + + if atomic.LoadInt32(&assignedEvents1) != 3 { + t.Fatalf("3 assignedEvents are Expected to happen for the first consumer, but %d happened\n", + atomic.LoadInt32(&assignedEvents1)) + } + + if atomic.LoadInt32(&revokedEvents1) != 3 { + t.Fatalf("3 revokedEvents are Expected to happen for the first consumer, but %d happened\n", + atomic.LoadInt32(&revokedEvents1)) + } + + if atomic.LoadInt32(&assignedEvents2) != 2 { + t.Fatalf("2 assignedEvents are Expected to happen for the second consumer, but %d happened\n", + atomic.LoadInt32(&assignedEvents2)) + } + if atomic.LoadInt32(&revokedEvents2) != 2 { + t.Fatalf("2 revokedEvents is Expected to happen for the second consumer, but %d happened\n", + atomic.LoadInt32(&revokedEvents2)) + } + + if atomic.LoadInt32(&assignedEvents3) != 1 { + t.Fatalf("1 assignedEvents is Expected to happen for the third consumer, but %d happened\n", + atomic.LoadInt32(&assignedEvents3)) + } + if atomic.LoadInt32(&revokedEvents3) != 1 { + t.Fatalf("1 revokedEvents is Expected to happen for the third consumer, but %d happened\n", + atomic.LoadInt32(&revokedEvents3)) + } + + if atomic.LoadInt32(&assignedEvents4) != 1 { + t.Fatalf("1 assignedEvents is Expected to happen for the rejoined consumer(originally second consumer), but %d happened\n", + atomic.LoadInt32(&assignedEvents4)) + } + if atomic.LoadInt32(&revokedEvents4) != 1 { + t.Fatalf("1 revokedEvents is Expected to happen for the rejoined consumer(originally second consumer), but %d happened\n", + atomic.LoadInt32(&revokedEvents4)) + } +} + +func testConsumerWaitAssignment(c *Consumer, t *testing.T) { + run := true + for run { + assignment, err := c.Assignment() + if err != nil { + t.Fatalf("Assignment failed: %s\n", err) + } + + if len(assignment) != 0 { + t.Logf("%v Assigned partitions are: %v\n", c, assignment) + run = false + } + } +} diff --git a/kafka/context.go b/kafka/context.go index 85709be0f..b0dfd0e44 100644 --- a/kafka/context.go +++ b/kafka/context.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2019 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "context" "time" diff --git a/kafka/error.go b/kafka/error.go index 1827f43e1..193e7ea3e 100644 --- a/kafka/error.go +++ b/kafka/error.go @@ -67,10 +67,8 @@ func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) { return newErrorFromString(ErrorCode(code), str) } -// newErrorFromCError creates a new Error instance and destroys -// the passed cError. -func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { - defer C.rd_kafka_error_destroy(cError) +// newErrorFromCError creates a new Error instance +func newErrorFromCError(cError *C.rd_kafka_error_t) Error { return Error{ code: ErrorCode(C.rd_kafka_error_code(cError)), str: C.GoString(C.rd_kafka_error_string(cError)), @@ -80,6 +78,13 @@ func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { } } +// newErrorFromCErrorDestroy creates a new Error instance and destroys +// the passed cError. +func newErrorFromCErrorDestroy(cError *C.rd_kafka_error_t) Error { + defer C.rd_kafka_error_destroy(cError) + return newErrorFromCError(cError) +} + // Error returns a human readable representation of an Error // Same as Error.String() func (e Error) Error() string { diff --git a/kafka/error_test.go b/kafka/error_test.go index d405ba749..414fd59e8 100644 --- a/kafka/error_test.go +++ b/kafka/error_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2019 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "strings" "testing" diff --git a/kafka/event.go b/kafka/event.go index d7f0b6189..6357ad858 100644 --- a/kafka/event.go +++ b/kafka/event.go @@ -251,6 +251,8 @@ out: select { case *ch <- msg: case <-termChan: + retval = nil + term = true break out } diff --git a/kafka/event_test.go b/kafka/event_test.go index 0c172e0b8..355ae2145 100644 --- a/kafka/event_test.go +++ b/kafka/event_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" ) diff --git a/kafka/generated_errors.go b/kafka/generated_errors.go index 96ed1eea0..568c831e8 100644 --- a/kafka/generated_errors.go +++ b/kafka/generated_errors.go @@ -1,6 +1,6 @@ package kafka -// Copyright 2016-2021 Confluent Inc. -// AUTOMATICALLY GENERATED ON 2021-05-10 11:33:08.588919179 +0200 CEST m=+0.000341587 USING librdkafka 1.7.0-dirty +// Copyright 2016-2022 Confluent Inc. +// AUTOMATICALLY GENERATED ON 2022-06-16 11:17:24.861602 -0700 PDT m=+0.000650282 USING librdkafka 1.9.0 /* #include "select_rdkafka.h" diff --git a/kafka/header_test.go b/kafka/header_test.go index f365cb912..6c0111941 100644 --- a/kafka/header_test.go +++ b/kafka/header_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" ) diff --git a/kafka/integration_test.go b/kafka/integration_test.go index b0df51535..ebb26c32b 100644 --- a/kafka/integration_test.go +++ b/kafka/integration_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "context" "encoding/binary" @@ -24,6 +24,7 @@ import ( "path" "reflect" "runtime" + "sort" "testing" "time" ) @@ -1654,3 +1655,197 @@ func TestAdminClient_ControllerID(t *testing.T) { t.Logf("ControllerID: %d\n", controllerID) } + +func TestAdminACLs(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + rand.Seed(time.Now().Unix()) + topic := testconf.Topic + group := testconf.GroupID + noError := NewError(ErrNoError, "", false) + unknownError := NewError(ErrUnknown, "Unknown broker error", false) + var expectedCreateACLs []CreateACLResult + var expectedDescribeACLs DescribeACLsResult + var expectedDeleteACLs []DeleteACLsResult + var ctx context.Context + var cancel context.CancelFunc + + a := createAdminClient(t) + defer a.Close() + + maxDuration, err := time.ParseDuration("30s") + if err != nil { + t.Fatalf("%s", err) + } + requestTimeout, err := time.ParseDuration("20s") + if err != nil { + t.Fatalf("%s", err) + } + + checkExpectedResult := func(expected interface{}, result interface{}) { + if !reflect.DeepEqual(result, expected) { + t.Fatalf("Expected result to deep equal to %v, but found %v", expected, result) + } + } + + // Create ACLs + t.Logf("Creating ACLs\n") + newACLs := ACLBindings{ + { + Type: ResourceTopic, + Name: topic, + ResourcePatternType: ResourcePatternTypeLiteral, + Principal: "User:test-user-1", + Host: "*", + Operation: ACLOperationRead, + PermissionType: ACLPermissionTypeAllow, + }, + { + Type: ResourceTopic, + Name: topic, + ResourcePatternType: ResourcePatternTypePrefixed, + Principal: "User:test-user-2", + Host: "*", + Operation: ACLOperationWrite, + PermissionType: ACLPermissionTypeDeny, + }, + { + Type: ResourceGroup, + Name: group, + ResourcePatternType: ResourcePatternTypePrefixed, + Principal: "User:test-user-2", + Host: "*", + Operation: ACLOperationAll, + PermissionType: ACLPermissionTypeAllow, + }, + } + + invalidACLs := ACLBindings{ + { + Type: ResourceTopic, + Name: topic, + ResourcePatternType: ResourcePatternTypeLiteral, + // Principal must be in the form "{principalType}:{principalName}" + // Broker returns ErrUnknown in this case + Principal: "wrong-principal", + Host: "*", + Operation: ACLOperationRead, + PermissionType: ACLPermissionTypeAllow, + }, + } + + aclBindingFilters := ACLBindingFilters{ + { + Type: ResourceAny, + ResourcePatternType: ResourcePatternTypeAny, + Operation: ACLOperationAny, + PermissionType: ACLPermissionTypeAny, + }, + { + Type: ResourceAny, + ResourcePatternType: ResourcePatternTypePrefixed, + Operation: ACLOperationAny, + PermissionType: ACLPermissionTypeAny, + }, + { + Type: ResourceTopic, + ResourcePatternType: ResourcePatternTypeAny, + Operation: ACLOperationAny, + PermissionType: ACLPermissionTypeAny, + }, + { + Type: ResourceGroup, + ResourcePatternType: ResourcePatternTypeAny, + Operation: ACLOperationAny, + PermissionType: ACLPermissionTypeAny, + }, + } + + // CreateACLs should be idempotent + for n := 0; n < 2; n++ { + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + + resultCreateACLs, err := a.CreateACLs(ctx, newACLs, SetAdminRequestTimeout(requestTimeout)) + if err != nil { + t.Fatalf("CreateACLs() failed: %s", err) + } + expectedCreateACLs = []CreateACLResult{{Error: noError}, {Error: noError}, {Error: noError}} + checkExpectedResult(expectedCreateACLs, resultCreateACLs) + } + + // CreateACLs with server side validation errors + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + + resultCreateACLs, err := a.CreateACLs(ctx, invalidACLs, SetAdminRequestTimeout(requestTimeout)) + if err != nil { + t.Fatalf("CreateACLs() failed: %s", err) + } + expectedCreateACLs = []CreateACLResult{{Error: unknownError}} + checkExpectedResult(expectedCreateACLs, resultCreateACLs) + + // DescribeACLs must return the three ACLs + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + resultDescribeACLs, err := a.DescribeACLs(ctx, aclBindingFilters[0], SetAdminRequestTimeout(requestTimeout)) + expectedDescribeACLs = DescribeACLsResult{ + Error: noError, + ACLBindings: newACLs, + } + if err != nil { + t.Fatalf("%s", err) + } + sort.Sort(&resultDescribeACLs.ACLBindings) + checkExpectedResult(expectedDescribeACLs, *resultDescribeACLs) + + // Delete the ACLs with ResourcePatternTypePrefixed + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + resultDeleteACLs, err := a.DeleteACLs(ctx, aclBindingFilters[1:2], SetAdminRequestTimeout(requestTimeout)) + expectedDeleteACLs = []DeleteACLsResult{ + { + Error: noError, + ACLBindings: newACLs[1:3], + }, + } + if err != nil { + t.Fatalf("%s", err) + } + sort.Sort(&resultDeleteACLs[0].ACLBindings) + checkExpectedResult(expectedDeleteACLs, resultDeleteACLs) + + // Delete the ACLs with ResourceTopic and ResourceGroup + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + resultDeleteACLs, err = a.DeleteACLs(ctx, aclBindingFilters[2:4], SetAdminRequestTimeout(requestTimeout)) + expectedDeleteACLs = []DeleteACLsResult{ + { + Error: noError, + ACLBindings: newACLs[0:1], + }, + { + Error: noError, + ACLBindings: ACLBindings{}, + }, + } + if err != nil { + t.Fatalf("%s", err) + } + checkExpectedResult(expectedDeleteACLs, resultDeleteACLs) + + // All the ACLs should have been deleted + ctx, cancel = context.WithTimeout(context.Background(), maxDuration) + defer cancel() + resultDescribeACLs, err = a.DescribeACLs(ctx, aclBindingFilters[0], SetAdminRequestTimeout(requestTimeout)) + expectedDescribeACLs = DescribeACLsResult{ + Error: noError, + ACLBindings: ACLBindings{}, + } + if err != nil { + t.Fatalf("%s", err) + } + checkExpectedResult(expectedDescribeACLs, *resultDescribeACLs) +} diff --git a/kafka/kafka.go b/kafka/kafka.go index 20dc30a89..254edbdbd 100644 --- a/kafka/kafka.go +++ b/kafka/kafka.go @@ -20,7 +20,7 @@ // // High-level Consumer // -// * Decide if you want to read messages and events by calling `.Poll()` or +// * Decide if you want to read messages and events by calling `.Poll()` or // the deprecated option of using the `.Events()` channel. (If you want to use // `.Events()` channel then set `"go.events.channel.enable": true`). // diff --git a/kafka/kafka_test.go b/kafka/kafka_test.go index e268f7eb6..9659c812e 100644 --- a/kafka/kafka_test.go +++ b/kafka/kafka_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" ) diff --git a/kafka/librdkafka_vendor/LICENSES.txt b/kafka/librdkafka_vendor/LICENSES.txt index f2aa57d07..1ab8a1dd4 100644 --- a/kafka/librdkafka_vendor/LICENSES.txt +++ b/kafka/librdkafka_vendor/LICENSES.txt @@ -27,6 +27,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +LICENSE.cjson +-------------------------------------------------------------- +For cJSON.c and cJSON.h: + +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + LICENSE.crc32c -------------------------------------------------------------- # For src/crc32c.c copied (with modifications) from diff --git a/kafka/librdkafka_vendor/bundle-import.sh b/kafka/librdkafka_vendor/bundle-import.sh index 2f7aedab2..5394fac69 100755 --- a/kafka/librdkafka_vendor/bundle-import.sh +++ b/kafka/librdkafka_vendor/bundle-import.sh @@ -14,28 +14,30 @@ usage() { } +# Parse dynamic libraries from linker command line. +# Will print a list matching -lfoo and -framework X.. parse_dynlibs() { - # Parse dynamic libraries from pkg-config file, - # both the ones specified with Libs: but also through Requires: - local pc=$1 local libs= - local req= - local n= - for req in $(grep ^Requires: $pc | sed -e 's/^Requires://'); do - n=$(pkg-config --libs $req) - if [[ $n == -l* ]]; then - libs="${libs} $n" - fi - done - for n in $(grep ^Libs: $pc); do - if [[ $n == -l* ]]; then - libs="${libs} $n" + while [[ $# -gt 0 ]]; do + if [[ $1 == -l* ]]; then + libs="${libs} $1" + elif [[ $1 == -framework ]]; then + libs="${libs} $1 $2" + shift # remove one (extra) arg fi + shift # remove one arg done echo "$libs" } +# Parse dynamic library dependecies from pkg-config file and print +# them to stdout. +parse_pc_dynlibs() { + local pc=$1 + parse_dynlibs $(sed -n 's/^Libs: \(..*\)/\1/p' "$pc") +} + setup_build() { # Copies static library from the temp directory into final location, # extracts dynamic lib list from the pkg-config file, @@ -54,7 +56,7 @@ setup_build() { build_tag="// +build musl" fi - local dynlibs=$(parse_dynlibs $pc) + local dynlibs=$(parse_pc_dynlibs $pc) echo "Copying $apath to $dpath" cp "$apath" "$dpath" diff --git a/kafka/librdkafka_vendor/import.sh b/kafka/librdkafka_vendor/import.sh index 702116997..7f920f3a9 100755 --- a/kafka/librdkafka_vendor/import.sh +++ b/kafka/librdkafka_vendor/import.sh @@ -51,12 +51,15 @@ fi curr_branch=$(git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3-) uncommitted=$(git status --untracked-files=no --porcelain) -if [[ $devel != 1 ]] && ( [[ $curr_branch != master ]] || [[ ! -z $uncommitted ]] ); then +if [[ ! -z $uncommitted ]]; then + echo "Error: This script must be run on a clean branch with no uncommitted changes" + echo "Uncommitted files:" + echo "$uncommitted" + exit 1 +fi + +if [[ $devel != 1 ]] && [[ $curr_branch != master ]] ; then echo "Error: This script must be run on an up-to-date, clean, master branch" - if [[ ! -z $uncommitted ]]; then - echo "Uncommitted files:" - echo "$uncommitted" - fi exit 1 fi diff --git a/kafka/librdkafka_vendor/librdkafka_darwin.a b/kafka/librdkafka_vendor/librdkafka_darwin.a index 4a036842e..1d88064d6 100644 Binary files a/kafka/librdkafka_vendor/librdkafka_darwin.a and b/kafka/librdkafka_vendor/librdkafka_darwin.a differ diff --git a/kafka/librdkafka_vendor/librdkafka_glibc_linux.a b/kafka/librdkafka_vendor/librdkafka_glibc_linux.a index 9a059c19e..a08a8facb 100644 Binary files a/kafka/librdkafka_vendor/librdkafka_glibc_linux.a and b/kafka/librdkafka_vendor/librdkafka_glibc_linux.a differ diff --git a/kafka/librdkafka_vendor/librdkafka_musl_linux.a b/kafka/librdkafka_vendor/librdkafka_musl_linux.a index ac017a510..196ed79b0 100644 Binary files a/kafka/librdkafka_vendor/librdkafka_musl_linux.a and b/kafka/librdkafka_vendor/librdkafka_musl_linux.a differ diff --git a/kafka/librdkafka_vendor/librdkafka_windows.a b/kafka/librdkafka_vendor/librdkafka_windows.a index 994cff199..bf7cc8fac 100644 Binary files a/kafka/librdkafka_vendor/librdkafka_windows.a and b/kafka/librdkafka_vendor/librdkafka_windows.a differ diff --git a/kafka/librdkafka_vendor/rdkafka.h b/kafka/librdkafka_vendor/rdkafka.h index 96701d62b..04ea7fc5d 100644 --- a/kafka/librdkafka_vendor/rdkafka.h +++ b/kafka/librdkafka_vendor/rdkafka.h @@ -60,13 +60,13 @@ extern "C" { #ifndef WIN32_MEAN_AND_LEAN #define WIN32_MEAN_AND_LEAN #endif -#include /* for sockaddr, .. */ +#include /* for sockaddr, .. */ #ifndef _SSIZE_T_DEFINED #define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; #endif #define RD_UNUSED -#define RD_INLINE __inline +#define RD_INLINE __inline #define RD_DEPRECATED __declspec(deprecated) #define RD_FORMAT(...) #undef RD_EXPORT @@ -92,7 +92,7 @@ typedef SSIZE_T ssize_t; #define RD_DEPRECATED __attribute__((deprecated)) #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) -#define RD_FORMAT(...) __attribute__((format (__VA_ARGS__))) +#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) #else #define RD_FORMAT(...) #endif @@ -109,29 +109,36 @@ typedef SSIZE_T ssize_t; * @returns \p RET */ #if LIBRDKAFKA_TYPECHECKS -#define _LRK_TYPECHECK(RET,TYPE,ARG) \ - ({ if (0) { TYPE __t RD_UNUSED = (ARG); } RET; }) - -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - } \ - RET; }) - -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) \ - ({ \ - if (0) { \ - TYPE __t RD_UNUSED = (ARG); \ - TYPE2 __t2 RD_UNUSED = (ARG2); \ - TYPE3 __t3 RD_UNUSED = (ARG3); \ - } \ - RET; }) +#define _LRK_TYPECHECK(RET, TYPE, ARG) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + } \ + RET; \ + }) + +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \ + ({ \ + if (0) { \ + TYPE __t RD_UNUSED = (ARG); \ + TYPE2 __t2 RD_UNUSED = (ARG2); \ + TYPE3 __t3 RD_UNUSED = (ARG3); \ + } \ + RET; \ + }) #else -#define _LRK_TYPECHECK(RET,TYPE,ARG) (RET) -#define _LRK_TYPECHECK2(RET,TYPE,ARG,TYPE2,ARG2) (RET) -#define _LRK_TYPECHECK3(RET,TYPE,ARG,TYPE2,ARG2,TYPE3,ARG3) (RET) +#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET) +#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET) +#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET) #endif /* @endcond */ @@ -158,7 +165,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010700ff +#define RD_KAFKA_VERSION 0x010900ff /** * @brief Returns the librdkafka version as integer. @@ -177,7 +184,7 @@ int rd_kafka_version(void); * @returns Version string */ RD_EXPORT -const char *rd_kafka_version_str (void); +const char *rd_kafka_version_str(void); /**@}*/ @@ -198,8 +205,8 @@ const char *rd_kafka_version_str (void); * @sa rd_kafka_new() */ typedef enum rd_kafka_type_t { - RD_KAFKA_PRODUCER, /**< Producer client */ - RD_KAFKA_CONSUMER /**< Consumer client */ + RD_KAFKA_PRODUCER, /**< Producer client */ + RD_KAFKA_CONSUMER /**< Consumer client */ } rd_kafka_type_t; @@ -209,9 +216,9 @@ typedef enum rd_kafka_type_t { * @sa rd_kafka_message_timestamp() */ typedef enum rd_kafka_timestamp_type_t { - RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ - RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ - RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ + RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */ + RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */ + RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */ } rd_kafka_timestamp_type_t; @@ -232,8 +239,10 @@ const char *rd_kafka_get_debug_contexts(void); * linking another version of the library. * Use rd_kafka_get_debug_contexts() instead. */ -#define RD_KAFKA_DEBUG_CONTEXTS \ - "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor,conf" +#define RD_KAFKA_DEBUG_CONTEXTS \ + "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \ + "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \ + "conf" /* @cond NO_DOC */ @@ -246,10 +255,11 @@ typedef struct rd_kafka_queue_s rd_kafka_queue_t; typedef struct rd_kafka_op_s rd_kafka_event_t; typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t; typedef struct rd_kafka_consumer_group_metadata_s -rd_kafka_consumer_group_metadata_t; + rd_kafka_consumer_group_metadata_t; typedef struct rd_kafka_error_s rd_kafka_error_t; typedef struct rd_kafka_headers_s rd_kafka_headers_t; typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t; /* @endcond */ @@ -266,80 +276,80 @@ typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; * @sa Use rd_kafka_err2str() to translate an error code a human readable string */ typedef enum { - /* Internal errors to rdkafka: */ - /** Begin internal error codes */ - RD_KAFKA_RESP_ERR__BEGIN = -200, - /** Received message is incorrect */ - RD_KAFKA_RESP_ERR__BAD_MSG = -199, - /** Bad/unknown compression */ - RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, - /** Broker is going away */ - RD_KAFKA_RESP_ERR__DESTROY = -197, - /** Generic failure */ - RD_KAFKA_RESP_ERR__FAIL = -196, - /** Broker transport failure */ - RD_KAFKA_RESP_ERR__TRANSPORT = -195, - /** Critical system resource */ - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, - /** Failed to resolve broker */ - RD_KAFKA_RESP_ERR__RESOLVE = -193, - /** Produced message timed out*/ - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, - /** Reached the end of the topic+partition queue on - * the broker. Not really an error. - * This event is disabled by default, - * see the `enable.partition.eof` configuration property. */ - RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, - /** Permanent: Partition does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, - /** File or filesystem error */ - RD_KAFKA_RESP_ERR__FS = -189, - /** Permanent: Topic does not exist in cluster. */ - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, - /** All broker connections are down. */ - RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, - /** Invalid argument, or invalid configuration */ - RD_KAFKA_RESP_ERR__INVALID_ARG = -186, - /** Operation timed out */ - RD_KAFKA_RESP_ERR__TIMED_OUT = -185, - /** Queue is full */ - RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, - /** ISR count < required.acks */ + /* Internal errors to rdkafka: */ + /** Begin internal error codes */ + RD_KAFKA_RESP_ERR__BEGIN = -200, + /** Received message is incorrect */ + RD_KAFKA_RESP_ERR__BAD_MSG = -199, + /** Bad/unknown compression */ + RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198, + /** Broker is going away */ + RD_KAFKA_RESP_ERR__DESTROY = -197, + /** Generic failure */ + RD_KAFKA_RESP_ERR__FAIL = -196, + /** Broker transport failure */ + RD_KAFKA_RESP_ERR__TRANSPORT = -195, + /** Critical system resource */ + RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194, + /** Failed to resolve broker */ + RD_KAFKA_RESP_ERR__RESOLVE = -193, + /** Produced message timed out*/ + RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192, + /** Reached the end of the topic+partition queue on + * the broker. Not really an error. + * This event is disabled by default, + * see the `enable.partition.eof` configuration property. */ + RD_KAFKA_RESP_ERR__PARTITION_EOF = -191, + /** Permanent: Partition does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190, + /** File or filesystem error */ + RD_KAFKA_RESP_ERR__FS = -189, + /** Permanent: Topic does not exist in cluster. */ + RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188, + /** All broker connections are down. */ + RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187, + /** Invalid argument, or invalid configuration */ + RD_KAFKA_RESP_ERR__INVALID_ARG = -186, + /** Operation timed out */ + RD_KAFKA_RESP_ERR__TIMED_OUT = -185, + /** Queue is full */ + RD_KAFKA_RESP_ERR__QUEUE_FULL = -184, + /** ISR count < required.acks */ RD_KAFKA_RESP_ERR__ISR_INSUFF = -183, - /** Broker node update */ + /** Broker node update */ RD_KAFKA_RESP_ERR__NODE_UPDATE = -182, - /** SSL error */ - RD_KAFKA_RESP_ERR__SSL = -181, - /** Waiting for coordinator to become available. */ + /** SSL error */ + RD_KAFKA_RESP_ERR__SSL = -181, + /** Waiting for coordinator to become available. */ RD_KAFKA_RESP_ERR__WAIT_COORD = -180, - /** Unknown client group */ + /** Unknown client group */ RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179, - /** Operation in progress */ + /** Operation in progress */ RD_KAFKA_RESP_ERR__IN_PROGRESS = -178, - /** Previous operation in progress, wait for it to finish. */ + /** Previous operation in progress, wait for it to finish. */ RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177, - /** This operation would interfere with an existing subscription */ + /** This operation would interfere with an existing subscription */ RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176, - /** Assigned partitions (rebalance_cb) */ + /** Assigned partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175, - /** Revoked partitions (rebalance_cb) */ + /** Revoked partitions (rebalance_cb) */ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174, - /** Conflicting use */ + /** Conflicting use */ RD_KAFKA_RESP_ERR__CONFLICT = -173, - /** Wrong state */ + /** Wrong state */ RD_KAFKA_RESP_ERR__STATE = -172, - /** Unknown protocol */ + /** Unknown protocol */ RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171, - /** Not implemented */ + /** Not implemented */ RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170, - /** Authentication failure*/ - RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, - /** No stored offset */ - RD_KAFKA_RESP_ERR__NO_OFFSET = -168, - /** Outdated */ - RD_KAFKA_RESP_ERR__OUTDATED = -167, - /** Timed out in queue */ - RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, + /** Authentication failure*/ + RD_KAFKA_RESP_ERR__AUTHENTICATION = -169, + /** No stored offset */ + RD_KAFKA_RESP_ERR__NO_OFFSET = -168, + /** Outdated */ + RD_KAFKA_RESP_ERR__OUTDATED = -167, + /** Timed out in queue */ + RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166, /** Feature not supported by broker */ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165, /** Awaiting cache update */ @@ -393,109 +403,109 @@ typedef enum { /** No offset to automatically reset to */ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, - /** End internal error codes */ - RD_KAFKA_RESP_ERR__END = -100, - - /* Kafka broker errors: */ - /** Unknown broker error */ - RD_KAFKA_RESP_ERR_UNKNOWN = -1, - /** Success */ - RD_KAFKA_RESP_ERR_NO_ERROR = 0, - /** Offset out of range */ - RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, - /** Invalid message */ - RD_KAFKA_RESP_ERR_INVALID_MSG = 2, - /** Unknown topic or partition */ - RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, - /** Invalid message size */ - RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, - /** Leader not available */ - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ - RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, - /** Request timed out */ - RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, - /** Broker not available */ - RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, - /** Replica not available */ - RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, - /** Message size too large */ - RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, - /** StaleControllerEpochCode */ - RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, - /** Offset metadata string too large */ - RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, - /** Broker disconnected before response received */ - RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, + /** End internal error codes */ + RD_KAFKA_RESP_ERR__END = -100, + + /* Kafka broker errors: */ + /** Unknown broker error */ + RD_KAFKA_RESP_ERR_UNKNOWN = -1, + /** Success */ + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /** Offset out of range */ + RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1, + /** Invalid message */ + RD_KAFKA_RESP_ERR_INVALID_MSG = 2, + /** Unknown topic or partition */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3, + /** Invalid message size */ + RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, + /** Leader not available */ + RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, + /** Not leader for partition */ + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, + /** Request timed out */ + RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, + /** Broker not available */ + RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8, + /** Replica not available */ + RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9, + /** Message size too large */ + RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10, + /** StaleControllerEpochCode */ + RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11, + /** Offset metadata string too large */ + RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12, + /** Broker disconnected before response received */ + RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13, /** Coordinator load in progress */ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14, - /** Group coordinator load in progress */ -#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ +/** Group coordinator load in progress */ +#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS /** Coordinator not available */ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15, - /** Group coordinator not available */ -#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ +/** Group coordinator not available */ +#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE /** Not coordinator */ RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16, - /** Not coordinator for group */ -#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ +/** Not coordinator for group */ +#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \ RD_KAFKA_RESP_ERR_NOT_COORDINATOR - /** Invalid topic */ + /** Invalid topic */ RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17, - /** Message batch larger than configured server segment size */ + /** Message batch larger than configured server segment size */ RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18, - /** Not enough in-sync replicas */ + /** Not enough in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19, - /** Message(s) written to insufficient number of in-sync replicas */ + /** Message(s) written to insufficient number of in-sync replicas */ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20, - /** Invalid required acks value */ + /** Invalid required acks value */ RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21, - /** Specified group generation id is not valid */ + /** Specified group generation id is not valid */ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22, - /** Inconsistent group protocol */ + /** Inconsistent group protocol */ RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23, - /** Invalid group.id */ - RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, - /** Unknown member */ + /** Invalid group.id */ + RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24, + /** Unknown member */ RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25, - /** Invalid session timeout */ + /** Invalid session timeout */ RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26, - /** Group rebalance in progress */ - RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, - /** Commit offset data size is not valid */ + /** Group rebalance in progress */ + RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27, + /** Commit offset data size is not valid */ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28, - /** Topic authorization failed */ + /** Topic authorization failed */ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29, - /** Group authorization failed */ - RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, - /** Cluster authorization failed */ - RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, - /** Invalid timestamp */ - RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, - /** Unsupported SASL mechanism */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, - /** Illegal SASL state */ - RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, - /** Unuspported version */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, - /** Topic already exists */ - RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, - /** Invalid number of partitions */ - RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, - /** Invalid replication factor */ - RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, - /** Invalid replica assignment */ - RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, - /** Invalid config */ - RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, - /** Not controller for cluster */ - RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, - /** Invalid request */ - RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, - /** Message format on broker does not support request */ - RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, + /** Group authorization failed */ + RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30, + /** Cluster authorization failed */ + RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31, + /** Invalid timestamp */ + RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32, + /** Unsupported SASL mechanism */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33, + /** Illegal SASL state */ + RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34, + /** Unuspported version */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35, + /** Topic already exists */ + RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36, + /** Invalid number of partitions */ + RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37, + /** Invalid replication factor */ + RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38, + /** Invalid replica assignment */ + RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39, + /** Invalid config */ + RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40, + /** Not controller for cluster */ + RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41, + /** Invalid request */ + RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42, + /** Message format on broker does not support request */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43, /** Policy violation */ RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44, /** Broker received an out of order sequence number */ @@ -527,7 +537,8 @@ typedef enum { RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55, /** Disk error when trying to access log file on the disk */ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56, - /** The user-specified log directory is not found in the broker config */ + /** The user-specified log directory is not found in the broker config + */ RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57, /** SASL Authentication failed */ RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58, @@ -624,9 +635,9 @@ typedef enum { * the full set of librdkafka error codes. */ struct rd_kafka_err_desc { - rd_kafka_resp_err_t code;/**< Error code */ - const char *name; /**< Error name, same as code enum sans prefix */ - const char *desc; /**< Human readable error description. */ + rd_kafka_resp_err_t code; /**< Error code */ + const char *name; /**< Error name, same as code enum sans prefix */ + const char *desc; /**< Human readable error description. */ }; @@ -634,9 +645,8 @@ struct rd_kafka_err_desc { * @brief Returns the full list of error codes. */ RD_EXPORT -void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, - size_t *cntp); - +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, + size_t *cntp); @@ -646,7 +656,7 @@ void rd_kafka_get_err_descs (const struct rd_kafka_err_desc **errdescs, * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2str (rd_kafka_resp_err_t err); +const char *rd_kafka_err2str(rd_kafka_resp_err_t err); @@ -656,7 +666,7 @@ const char *rd_kafka_err2str (rd_kafka_resp_err_t err); * @param err Error code to translate */ RD_EXPORT -const char *rd_kafka_err2name (rd_kafka_resp_err_t err); +const char *rd_kafka_err2name(rd_kafka_resp_err_t err); /** @@ -685,7 +695,7 @@ const char *rd_kafka_err2name (rd_kafka_resp_err_t err); * and should not be used, use rd_kafka_last_error() instead. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_last_error (void); +rd_kafka_resp_err_t rd_kafka_last_error(void); /** @@ -712,8 +722,7 @@ rd_kafka_resp_err_t rd_kafka_last_error (void); * * @sa rd_kafka_last_error() */ -RD_EXPORT RD_DEPRECATED -rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); +RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); /** @@ -728,9 +737,7 @@ rd_kafka_resp_err_t rd_kafka_errno2err(int errnox); * @deprecated Use rd_kafka_last_error() to retrieve the last error code * set by the legacy librdkafka APIs. */ -RD_EXPORT RD_DEPRECATED -int rd_kafka_errno (void); - +RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void); @@ -761,8 +768,8 @@ int rd_kafka_errno (void); * any other error code. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, - char *errstr, size_t errstr_size); +rd_kafka_resp_err_t +rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size); /** @@ -782,9 +789,9 @@ rd_kafka_resp_err_t rd_kafka_fatal_error (rd_kafka_t *rk, * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error * has already been triggered. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, - const char *reason); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + const char *reason); /** @@ -792,7 +799,7 @@ rd_kafka_test_fatal_error (rd_kafka_t *rk, rd_kafka_resp_err_t err, * \p error is NULL. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error); +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error); /** * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID", @@ -803,7 +810,7 @@ rd_kafka_resp_err_t rd_kafka_error_code (const rd_kafka_error_t *error); * @sa rd_kafka_err2name() */ RD_EXPORT -const char *rd_kafka_error_name (const rd_kafka_error_t *error); +const char *rd_kafka_error_name(const rd_kafka_error_t *error); /** * @returns a human readable error string for \p error, @@ -812,7 +819,7 @@ const char *rd_kafka_error_name (const rd_kafka_error_t *error); * @remark The lifetime of the returned pointer is the same as the error object. */ RD_EXPORT -const char *rd_kafka_error_string (const rd_kafka_error_t *error); +const char *rd_kafka_error_string(const rd_kafka_error_t *error); /** @@ -820,7 +827,7 @@ const char *rd_kafka_error_string (const rd_kafka_error_t *error); * instance is no longer usable, else 0 (also if \p error is NULL). */ RD_EXPORT -int rd_kafka_error_is_fatal (const rd_kafka_error_t *error); +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error); /** @@ -828,7 +835,7 @@ int rd_kafka_error_is_fatal (const rd_kafka_error_t *error); * else 0 (also if \p error is NULL). */ RD_EXPORT -int rd_kafka_error_is_retriable (const rd_kafka_error_t *error); +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error); /** @@ -842,7 +849,7 @@ int rd_kafka_error_is_retriable (const rd_kafka_error_t *error); * by the transactional API. */ RD_EXPORT -int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error); +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error); /** * @brief Free and destroy an error object. @@ -850,7 +857,7 @@ int rd_kafka_error_txn_requires_abort (const rd_kafka_error_t *error); * @remark As a conveniance it is permitted to pass a NULL \p error. */ RD_EXPORT -void rd_kafka_error_destroy (rd_kafka_error_t *error); +void rd_kafka_error_destroy(rd_kafka_error_t *error); /** @@ -862,9 +869,9 @@ void rd_kafka_error_destroy (rd_kafka_error_t *error); * The returned object must be destroyed with rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, - const char *fmt, ...) - RD_FORMAT(printf, 2, 3); +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, + const char *fmt, + ...) RD_FORMAT(printf, 2, 3); /** @@ -883,15 +890,15 @@ rd_kafka_error_t *rd_kafka_error_new (rd_kafka_resp_err_t code, * @sa rd_kafka_topic_partition_list_new() */ typedef struct rd_kafka_topic_partition_s { - char *topic; /**< Topic name */ - int32_t partition; /**< Partition */ - int64_t offset; /**< Offset */ - void *metadata; /**< Metadata */ - size_t metadata_size; /**< Metadata size */ - void *opaque; /**< Opaque value for application use */ - rd_kafka_resp_err_t err; /**< Error code, depending on use. */ - void *_private; /**< INTERNAL USE ONLY, - * INITIALIZE TO ZERO, DO NOT TOUCH */ + char *topic; /**< Topic name */ + int32_t partition; /**< Partition */ + int64_t offset; /**< Offset */ + void *metadata; /**< Metadata */ + size_t metadata_size; /**< Metadata size */ + void *opaque; /**< Opaque value for application use */ + rd_kafka_resp_err_t err; /**< Error code, depending on use. */ + void *_private; /**< INTERNAL USE ONLY, + * INITIALIZE TO ZERO, DO NOT TOUCH */ } rd_kafka_topic_partition_t; @@ -900,7 +907,7 @@ typedef struct rd_kafka_topic_partition_s { * @remark This must not be called for elements in a topic partition list. */ RD_EXPORT -void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); /** @@ -908,8 +915,8 @@ void rd_kafka_topic_partition_destroy (rd_kafka_topic_partition_t *rktpar); * */ typedef struct rd_kafka_topic_partition_list_s { - int cnt; /**< Current number of elements */ - int size; /**< Current allocated size */ + int cnt; /**< Current number of elements */ + int size; /**< Current allocated size */ rd_kafka_topic_partition_t *elems; /**< Element array[] */ } rd_kafka_topic_partition_list_t; @@ -929,15 +936,15 @@ typedef struct rd_kafka_topic_partition_list_s { * @sa rd_kafka_topic_partition_list_add() */ RD_EXPORT -rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new (int size); +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); /** * @brief Free all resources used by the list and the list itself. */ RD_EXPORT -void -rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlist); +void rd_kafka_topic_partition_list_destroy( + rd_kafka_topic_partition_list_t *rkparlist); /** * @brief Add topic+partition to list @@ -950,8 +957,9 @@ rd_kafka_topic_partition_list_destroy (rd_kafka_topic_partition_list_t *rkparlis */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -963,11 +971,11 @@ rd_kafka_topic_partition_list_add (rd_kafka_topic_partition_list_t *rktparlist, * @param stop Last partition of range (inclusive) */ RD_EXPORT -void -rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t - *rktparlist, - const char *topic, - int32_t start, int32_t stop); +void rd_kafka_topic_partition_list_add_range( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t start, + int32_t stop); @@ -983,9 +991,10 @@ rd_kafka_topic_partition_list_add_range (rd_kafka_topic_partition_list_t * @remark Any held indices to elems[] are unusable after this call returns 1. */ RD_EXPORT -int -rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +int rd_kafka_topic_partition_list_del( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -996,10 +1005,9 @@ rd_kafka_topic_partition_list_del (rd_kafka_topic_partition_list_t *rktparlist, * @sa rd_kafka_topic_partition_list_del() */ RD_EXPORT -int -rd_kafka_topic_partition_list_del_by_idx ( - rd_kafka_topic_partition_list_t *rktparlist, - int idx); +int rd_kafka_topic_partition_list_del_by_idx( + rd_kafka_topic_partition_list_t *rktparlist, + int idx); /** @@ -1011,8 +1019,7 @@ rd_kafka_topic_partition_list_del_by_idx ( */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); - +rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src); @@ -1024,9 +1031,11 @@ rd_kafka_topic_partition_list_copy (const rd_kafka_topic_partition_list_t *src); * in the list. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( - rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition, int64_t offset); +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset( + rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition, + int64_t offset); @@ -1036,10 +1045,10 @@ rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset ( * @returns a pointer to the first matching element, or NULL if not found. */ RD_EXPORT -rd_kafka_topic_partition_t * -rd_kafka_topic_partition_list_find ( - const rd_kafka_topic_partition_list_t *rktparlist, - const char *topic, int32_t partition); +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find( + const rd_kafka_topic_partition_list_t *rktparlist, + const char *topic, + int32_t partition); /** @@ -1051,11 +1060,10 @@ rd_kafka_topic_partition_list_find ( * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp. * */ -RD_EXPORT void -rd_kafka_topic_partition_list_sort (rd_kafka_topic_partition_list_t *rktparlist, - int (*cmp) (const void *a, const void *b, - void *cmp_opaque), - void *cmp_opaque); +RD_EXPORT void rd_kafka_topic_partition_list_sort( + rd_kafka_topic_partition_list_t *rktparlist, + int (*cmp)(const void *a, const void *b, void *cmp_opaque), + void *cmp_opaque); /**@}*/ @@ -1080,14 +1088,14 @@ typedef enum rd_kafka_vtype_t { RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */ RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */ RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */ - RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ - RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ - RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque - * value. This is the same as - * the _private field in - * rd_kafka_message_t, also known - * as the msg_opaque. */ - RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ + RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/ + RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */ + RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque + * value. This is the same as + * the _private field in + * rd_kafka_message_t, also known + * as the msg_opaque. */ + RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */ RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */ RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t) * Message Header */ @@ -1102,7 +1110,7 @@ typedef enum rd_kafka_vtype_t { * to which RD_KAFKA_VTYPE_... */ typedef struct rd_kafka_vu_s { - rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ + rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */ /** Value union, see RD_KAFKA_V_.. macros for which field to use. */ union { const char *cstr; @@ -1121,7 +1129,7 @@ typedef struct rd_kafka_vu_s { } header; rd_kafka_headers_t *headers; void *ptr; - char _pad[64]; /**< Padding size for future-proofness */ + char _pad[64]; /**< Padding size for future-proofness */ } u; } rd_kafka_vu_t; @@ -1140,41 +1148,41 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t field: u.cstr */ -#define RD_KAFKA_V_TOPIC(topic) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ - (const char *)topic +#define RD_KAFKA_V_TOPIC(topic) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \ + (const char *)topic /*! * Topic object (rd_kafka_topic_t *) * * rd_kafka_vu_t field: u.rkt */ -#define RD_KAFKA_V_RKT(rkt) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ - (rd_kafka_topic_t *)rkt +#define RD_KAFKA_V_RKT(rkt) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \ + (rd_kafka_topic_t *)rkt /*! * Partition (int32_t) * * rd_kafka_vu_t field: u.i32 */ -#define RD_KAFKA_V_PARTITION(partition) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ - (int32_t)partition +#define RD_KAFKA_V_PARTITION(partition) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \ + (int32_t)partition /*! * Message value/payload pointer and length (void *, size_t) * * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size */ -#define RD_KAFKA_V_VALUE(VALUE,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ - (void *)VALUE, (size_t)LEN +#define RD_KAFKA_V_VALUE(VALUE, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \ + (void *)VALUE, (size_t)LEN /*! * Message key pointer and length (const void *, size_t) * * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size */ -#define RD_KAFKA_V_KEY(KEY,LEN) \ - _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ - (void *)KEY, (size_t)LEN +#define RD_KAFKA_V_KEY(KEY, LEN) \ + _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \ + (void *)KEY, (size_t)LEN /*! * Message opaque pointer (void *) * Same as \c msg_opaque, \c produce(.., msg_opaque), @@ -1182,27 +1190,26 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t field: u.ptr */ -#define RD_KAFKA_V_OPAQUE(msg_opaque) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ - (void *)msg_opaque +#define RD_KAFKA_V_OPAQUE(msg_opaque) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \ + (void *)msg_opaque /*! * Message flags (int) * @sa RD_KAFKA_MSG_F_COPY, et.al. * * rd_kafka_vu_t field: u.i */ -#define RD_KAFKA_V_MSGFLAGS(msgflags) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), \ - (int)msgflags +#define RD_KAFKA_V_MSGFLAGS(msgflags) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags /*! * Timestamp in milliseconds since epoch UTC (int64_t). * A value of 0 will use the current wall-clock time. * * rd_kafka_vu_t field: u.i64 */ -#define RD_KAFKA_V_TIMESTAMP(timestamp) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ - (int64_t)timestamp +#define RD_KAFKA_V_TIMESTAMP(timestamp) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \ + (int64_t)timestamp /*! * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN). * @sa rd_kafka_header_add() @@ -1211,10 +1218,10 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size */ -#define RD_KAFKA_V_HEADER(NAME,VALUE,LEN) \ - _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ - const void *, VALUE, ssize_t, LEN), \ - (const char *)NAME, (const void *)VALUE, (ssize_t)LEN +#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \ + _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \ + const void *, VALUE, ssize_t, LEN), \ + (const char *)NAME, (const void *)VALUE, (ssize_t)LEN /*! * Message Headers list (rd_kafka_headers_t *). @@ -1227,9 +1234,9 @@ typedef struct rd_kafka_vu_s { * * rd_kafka_vu_t fields: u.headers */ -#define RD_KAFKA_V_HEADERS(HDRS) \ - _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ - (rd_kafka_headers_t *)HDRS +#define RD_KAFKA_V_HEADERS(HDRS) \ + _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \ + (rd_kafka_headers_t *)HDRS /**@}*/ @@ -1262,19 +1269,19 @@ typedef struct rd_kafka_vu_s { * Any number of headers may be added, updated and * removed regardless of the initial count. */ -RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new (size_t initial_count); +RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count); /** * @brief Destroy the headers list. The object and any returned value pointers * are not usable after this call. */ -RD_EXPORT void rd_kafka_headers_destroy (rd_kafka_headers_t *hdrs); +RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs); /** * @brief Make a copy of headers list \p src. */ RD_EXPORT rd_kafka_headers_t * -rd_kafka_headers_copy (const rd_kafka_headers_t *src); +rd_kafka_headers_copy(const rd_kafka_headers_t *src); /** * @brief Add header with name \p name and value \p val (copied) of size @@ -1293,10 +1300,11 @@ rd_kafka_headers_copy (const rd_kafka_headers_t *src); * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, * else RD_KAFKA_RESP_ERR_NO_ERROR. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_add (rd_kafka_headers_t *hdrs, - const char *name, ssize_t name_size, - const void *value, ssize_t value_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, + const char *name, + ssize_t name_size, + const void *value, + ssize_t value_size); /** * @brief Remove all headers for the given key (if any). @@ -1305,8 +1313,8 @@ rd_kafka_header_add (rd_kafka_headers_t *hdrs, * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, + const char *name); /** @@ -1327,8 +1335,10 @@ rd_kafka_header_remove (rd_kafka_headers_t *hdrs, const char *name); * the header item is valid. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, + const char *name, + const void **valuep, + size_t *sizep); /** * @brief Iterator for headers matching \p name. @@ -1344,8 +1354,11 @@ rd_kafka_header_get_last (const rd_kafka_headers_t *hdrs, * @param sizep (out) Set to the value's size (not including null-terminator). */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, - const char *name, const void **valuep, size_t *sizep); +rd_kafka_header_get(const rd_kafka_headers_t *hdrs, + size_t idx, + const char *name, + const void **valuep, + size_t *sizep); /** @@ -1356,9 +1369,11 @@ rd_kafka_header_get (const rd_kafka_headers_t *hdrs, size_t idx, * @sa rd_kafka_header_get() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, - const char **namep, - const void **valuep, size_t *sizep); +rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, + size_t idx, + const char **namep, + const void **valuep, + size_t *sizep); @@ -1391,34 +1406,34 @@ rd_kafka_header_get_all (const rd_kafka_headers_t *hdrs, size_t idx, * rd_kafka_message_destroy() unless otherwise noted. */ typedef struct rd_kafka_message_s { - rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ - rd_kafka_topic_t *rkt; /**< Topic */ - int32_t partition; /**< Partition */ - void *payload; /**< Producer: original message payload. - * Consumer: Depends on the value of \c err : - * - \c err==0: Message payload. - * - \c err!=0: Error string */ - size_t len; /**< Depends on the value of \c err : - * - \c err==0: Message payload length - * - \c err!=0: Error string length */ - void *key; /**< Depends on the value of \c err : - * - \c err==0: Optional message key */ - size_t key_len; /**< Depends on the value of \c err : - * - \c err==0: Optional message key length*/ - int64_t offset; /**< Consumer: - * - Message offset (or offset for error - * if \c err!=0 if applicable). - * Producer, dr_msg_cb: - * Message offset assigned by broker. - * May be RD_KAFKA_OFFSET_INVALID - * for retried messages when - * idempotence is enabled. */ - void *_private; /**< Consumer: - * - rdkafka private pointer: DO NOT MODIFY - * Producer: - * - dr_msg_cb: - * msg_opaque from produce() call or - * RD_KAFKA_V_OPAQUE from producev(). */ + rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */ + rd_kafka_topic_t *rkt; /**< Topic */ + int32_t partition; /**< Partition */ + void *payload; /**< Producer: original message payload. + * Consumer: Depends on the value of \c err : + * - \c err==0: Message payload. + * - \c err!=0: Error string */ + size_t len; /**< Depends on the value of \c err : + * - \c err==0: Message payload length + * - \c err!=0: Error string length */ + void *key; /**< Depends on the value of \c err : + * - \c err==0: Optional message key */ + size_t key_len; /**< Depends on the value of \c err : + * - \c err==0: Optional message key length*/ + int64_t offset; /**< Consumer: + * - Message offset (or offset for error + * if \c err!=0 if applicable). + * Producer, dr_msg_cb: + * Message offset assigned by broker. + * May be RD_KAFKA_OFFSET_INVALID + * for retried messages when + * idempotence is enabled. */ + void *_private; /**< Consumer: + * - rdkafka private pointer: DO NOT MODIFY + * Producer: + * - dr_msg_cb: + * msg_opaque from produce() call or + * RD_KAFKA_V_OPAQUE from producev(). */ } rd_kafka_message_t; @@ -1430,7 +1445,6 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); - /** * @brief Returns the error string for an errored rd_kafka_message_t or NULL if * there was no error. @@ -1438,7 +1452,7 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); * @remark This function MUST NOT be used with the producer. */ RD_EXPORT -const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage); +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); /** @@ -1453,8 +1467,8 @@ const char *rd_kafka_message_errstr (const rd_kafka_message_t *rkmessage); * @remark Message timestamps require broker version 0.10.0 or later. */ RD_EXPORT -int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, - rd_kafka_timestamp_type_t *tstype); +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, + rd_kafka_timestamp_type_t *tstype); @@ -1465,7 +1479,7 @@ int64_t rd_kafka_message_timestamp (const rd_kafka_message_t *rkmessage, * @returns the latency in microseconds, or -1 if not available. */ RD_EXPORT -int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage); /** @@ -1475,7 +1489,7 @@ int64_t rd_kafka_message_latency (const rd_kafka_message_t *rkmessage); * @returns a broker id if known, else -1. */ RD_EXPORT -int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage); +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage); /** @@ -1495,8 +1509,8 @@ int32_t rd_kafka_message_broker_id (const rd_kafka_message_t *rkmessage); * the first call to this function. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** * @brief Get the message header list and detach the list from the message @@ -1510,8 +1524,8 @@ rd_kafka_message_headers (const rd_kafka_message_t *rkmessage, * @sa rd_kafka_message_headers */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t **hdrsp); +rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t **hdrsp); /** @@ -1527,8 +1541,8 @@ rd_kafka_message_detach_headers (rd_kafka_message_t *rkmessage, * @remark The existing headers object, if any, will be destroyed. */ RD_EXPORT -void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, - rd_kafka_headers_t *hdrs); +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, + rd_kafka_headers_t *hdrs); /** @@ -1536,7 +1550,7 @@ void rd_kafka_message_set_headers (rd_kafka_message_t *rkmessage, * * @param hdrs Headers to count */ -RD_EXPORT size_t rd_kafka_header_cnt (const rd_kafka_headers_t *hdrs); +RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs); /** @@ -1558,7 +1572,7 @@ typedef enum { /** Message was written to the log and acknowledged by the broker. * No reason for application to retry. * Note: this value should only be trusted with \c acks=all. */ - RD_KAFKA_MSG_STATUS_PERSISTED = 2 + RD_KAFKA_MSG_STATUS_PERSISTED = 2 } rd_kafka_msg_status_t; @@ -1569,7 +1583,7 @@ typedef enum { * interceptors. */ RD_EXPORT rd_kafka_msg_status_t -rd_kafka_message_status (const rd_kafka_message_t *rkmessage); +rd_kafka_message_status(const rd_kafka_message_t *rkmessage); /**@}*/ @@ -1587,11 +1601,11 @@ rd_kafka_message_status (const rd_kafka_message_t *rkmessage); * @brief Configuration result type */ typedef enum { - RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ - RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or + RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or * property or value not supported in * this build. */ - RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ + RD_KAFKA_CONF_OK = 0 /**< Configuration okay */ } rd_kafka_conf_res_t; @@ -1654,9 +1668,9 @@ rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf); * prefixes to filter out (ignore) when copying. */ RD_EXPORT -rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, - size_t filter_cnt, - const char **filter); +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, + size_t filter_cnt, + const char **filter); @@ -1669,7 +1683,7 @@ rd_kafka_conf_t *rd_kafka_conf_dup_filter (const rd_kafka_conf_t *conf, * as the rd_kafka_t object. */ RD_EXPORT -const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); /** @@ -1693,9 +1707,10 @@ const rd_kafka_conf_t *rd_kafka_conf (rd_kafka_t *rk); */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** @@ -1742,11 +1757,9 @@ void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events); * * @sa rd_kafka_queue_get_background */ -RD_EXPORT void -rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, - void (*event_cb) (rd_kafka_t *rk, - rd_kafka_event_t *rkev, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_background_event_cb( + rd_kafka_conf_t *conf, + void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)); /** @@ -1754,10 +1767,12 @@ rd_kafka_conf_set_background_event_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, - void (*dr_cb) (rd_kafka_t *rk, - void *payload, size_t len, - rd_kafka_resp_err_t err, - void *opaque, void *msg_opaque)); + void (*dr_cb)(rd_kafka_t *rk, + void *payload, + size_t len, + rd_kafka_resp_err_t err, + void *opaque, + void *msg_opaque)); /** * @brief \b Producer: Set delivery report callback in provided \p conf object. @@ -1790,11 +1805,11 @@ void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, * acknowledged. */ RD_EXPORT -void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, - void (*dr_msg_cb) (rd_kafka_t *rk, - const rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_dr_msg_cb( + rd_kafka_conf_t *conf, + void (*dr_msg_cb)(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + void *opaque)); /** @@ -1805,10 +1820,9 @@ void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, - void (*consume_cb) (rd_kafka_message_t * - rkmessage, - void *opaque)); +void rd_kafka_conf_set_consume_cb( + rd_kafka_conf_t *conf, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)); /** * @brief \b Consumer: Set rebalance callback for use with @@ -1915,12 +1929,12 @@ void rd_kafka_conf_set_consume_cb (rd_kafka_conf_t *conf, * the examples/ directory. */ RD_EXPORT -void rd_kafka_conf_set_rebalance_cb ( - rd_kafka_conf_t *conf, - void (*rebalance_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *partitions, - void *opaque)); +void rd_kafka_conf_set_rebalance_cb( + rd_kafka_conf_t *conf, + void (*rebalance_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *partitions, + void *opaque)); @@ -1942,12 +1956,12 @@ void rd_kafka_conf_set_rebalance_cb ( * rd_kafka_conf_set_opaque(). */ RD_EXPORT -void rd_kafka_conf_set_offset_commit_cb ( - rd_kafka_conf_t *conf, - void (*offset_commit_cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *opaque)); +void rd_kafka_conf_set_offset_commit_cb( + rd_kafka_conf_t *conf, + void (*offset_commit_cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *opaque)); /** @@ -1974,9 +1988,10 @@ void rd_kafka_conf_set_offset_commit_cb ( */ RD_EXPORT void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, - void (*error_cb) (rd_kafka_t *rk, int err, - const char *reason, - void *opaque)); + void (*error_cb)(rd_kafka_t *rk, + int err, + const char *reason, + void *opaque)); /** * @brief Set throttle callback. @@ -1996,13 +2011,12 @@ void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, * @remark Requires broker version 0.9.0 or later. */ RD_EXPORT -void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, - void (*throttle_cb) ( - rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int throttle_time_ms, - void *opaque)); +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, + void (*throttle_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int throttle_time_ms, + void *opaque)); /** @@ -2023,8 +2037,10 @@ void rd_kafka_conf_set_throttle_cb (rd_kafka_conf_t *conf, */ RD_EXPORT void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, - void (*log_cb) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); + void (*log_cb)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -2050,11 +2066,9 @@ void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, * See STATISTICS.md for a full definition of the JSON object. */ RD_EXPORT -void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, - int (*stats_cb) (rd_kafka_t *rk, - char *json, - size_t json_len, - void *opaque)); +void rd_kafka_conf_set_stats_cb( + rd_kafka_conf_t *conf, + int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque)); /** * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object. @@ -2082,21 +2096,66 @@ void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, * * Note that before any SASL/OAUTHBEARER broker connection can succeed the * application must call rd_kafka_oauthbearer_set_token() once -- either - * directly or, more typically, by invoking either rd_kafka_poll() or - * rd_kafka_queue_poll() -- in order to cause retrieval of an initial token to - * occur. + * directly or, more typically, by invoking either rd_kafka_poll(), + * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause + * retrieval of an initial token to occur. + * + * Alternatively, the application can enable the SASL queue by calling + * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to + * creating the client instance, get the SASL queue with + * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling + * rd_kafka_queue_poll(), or redirecting the queue to the background thread to + * have the queue served automatically. For the latter case the SASL queue + * must be forwarded to the background queue with rd_kafka_queue_forward(). + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). * * An unsecured JWT refresh handler is provided by librdkafka for development * and testing purposes, it is enabled by setting * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is * mutually exclusive to using a refresh callback. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + * @sa rd_kafka_queue_get_sasl() + */ +RD_EXPORT +void rd_kafka_conf_set_oauthbearer_token_refresh_cb( + rd_kafka_conf_t *conf, + void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, + const char *oauthbearer_config, + void *opaque)); + +/** + * @brief Enable/disable creation of a queue specific to SASL events + * and callbacks. + * + * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this + * configuration API allows an application to get a dedicated + * queue for the SASL events/callbacks. After enabling the queue with this API + * the application can retrieve the queue by calling + * rd_kafka_queue_get_sasl() on the client instance. + * This queue may then be served directly by the application + * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as + * the background queue. + * + * A convenience function is available to automatically forward the SASL queue + * to librdkafka's background thread, see + * rd_kafka_sasl_background_callbacks_enable(). + * + * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(), + * et.al.) is used for SASL callbacks. + * + * @remark The SASL queue is currently only used by the SASL OAUTHBEARER + * mechanism's token_refresh_cb(). + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_sasl_background_callbacks_enable() */ + RD_EXPORT -void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( - rd_kafka_conf_t *conf, - void (*oauthbearer_token_refresh_cb) (rd_kafka_t *rk, - const char *oauthbearer_config, - void *opaque)); +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable); + /** * @brief Set socket callback. @@ -2116,10 +2175,9 @@ void rd_kafka_conf_set_oauthbearer_token_refresh_cb ( * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, - int (*socket_cb) (int domain, int type, - int protocol, - void *opaque)); +void rd_kafka_conf_set_socket_cb( + rd_kafka_conf_t *conf, + int (*socket_cb)(int domain, int type, int protocol, void *opaque)); @@ -2139,12 +2197,12 @@ void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT void -rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, - int (*connect_cb) (int sockfd, - const struct sockaddr *addr, - int addrlen, - const char *id, - void *opaque)); +rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, + int (*connect_cb)(int sockfd, + const struct sockaddr *addr, + int addrlen, + const char *id, + void *opaque)); /** * @brief Set close socket callback. @@ -2156,10 +2214,9 @@ rd_kafka_conf_set_connect_cb (rd_kafka_conf_t *conf, * * @remark The callback will be called from an internal librdkafka thread. */ -RD_EXPORT void -rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, - int (*closesocket_cb) (int sockfd, - void *opaque)); +RD_EXPORT void rd_kafka_conf_set_closesocket_cb( + rd_kafka_conf_t *conf, + int (*closesocket_cb)(int sockfd, void *opaque)); @@ -2182,10 +2239,9 @@ rd_kafka_conf_set_closesocket_cb (rd_kafka_conf_t *conf, * @remark The callback will be called from an internal librdkafka thread. */ RD_EXPORT -void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, - int (*open_cb) (const char *pathname, - int flags, mode_t mode, - void *opaque)); +void rd_kafka_conf_set_open_cb( + rd_kafka_conf_t *conf, + int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); #endif @@ -2231,16 +2287,18 @@ void rd_kafka_conf_set_open_cb (rd_kafka_conf_t *conf, * for a list of \p x509_error codes. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb ( - rd_kafka_conf_t *conf, - int (*ssl_cert_verify_cb) (rd_kafka_t *rk, - const char *broker_name, - int32_t broker_id, - int *x509_error, - int depth, - const char *buf, size_t size, - char *errstr, size_t errstr_size, - void *opaque)); +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb( + rd_kafka_conf_t *conf, + int (*ssl_cert_verify_cb)(rd_kafka_t *rk, + const char *broker_name, + int32_t broker_id, + int *x509_error, + int depth, + const char *buf, + size_t size, + char *errstr, + size_t errstr_size, + void *opaque)); /** @@ -2265,9 +2323,9 @@ typedef enum rd_kafka_cert_type_t { * @sa rd_kafka_conf_set_ssl_cert */ typedef enum rd_kafka_cert_enc_t { - RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ - RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ - RD_KAFKA_CERT_ENC_PEM, /**< PEM */ + RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */ + RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */ + RD_KAFKA_CERT_ENC_PEM, /**< PEM */ RD_KAFKA_CERT_ENC__CNT, } rd_kafka_cert_enc_t; @@ -2302,32 +2360,37 @@ typedef enum rd_kafka_cert_enc_t { * * @remark Private and public keys in PEM format may also be set with the * `ssl.key.pem` and `ssl.certificate.pem` configuration properties. + * + * @remark CA certificate in PEM format may also be set with the + * `ssl.ca.pem` configuration property. */ RD_EXPORT rd_kafka_conf_res_t -rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf, - rd_kafka_cert_type_t cert_type, - rd_kafka_cert_enc_t cert_enc, - const void *buffer, size_t size, - char *errstr, size_t errstr_size); +rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, + rd_kafka_cert_type_t cert_type, + rd_kafka_cert_enc_t cert_enc, + const void *buffer, + size_t size, + char *errstr, + size_t errstr_size); /** * @brief Set callback_data for OpenSSL engine. * * @param conf Configuration object. - * @param callback_data passed to engine callbacks, + * @param callback_data passed to engine callbacks, * e.g. \c ENGINE_load_ssl_client_cert. * - * @remark The \c ssl.engine.location configuration must be set for this + * @remark The \c ssl.engine.location configuration must be set for this * to have affect. * - * @remark The memory pointed to by \p value must remain valid for the - * lifetime of the configuration object and any Kafka clients that + * @remark The memory pointed to by \p value must remain valid for the + * lifetime of the configuration object and any Kafka clients that * use it. */ RD_EXPORT -void rd_kafka_conf_set_engine_callback_data (rd_kafka_conf_t *conf, - void *callback_data); +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, + void *callback_data); /** @@ -2361,8 +2424,8 @@ void *rd_kafka_opaque(const rd_kafka_t *rk); * global rd_kafka_conf_t object instead. */ RD_EXPORT -void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, - rd_kafka_topic_conf_t *tconf); +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, + rd_kafka_topic_conf_t *tconf); /** * @brief Gets the default topic configuration as previously set with @@ -2377,7 +2440,7 @@ void rd_kafka_conf_set_default_topic_conf (rd_kafka_conf_t *conf, * rd_kafka_conf_set_default_topic_conf(). */ RD_EXPORT rd_kafka_topic_conf_t * -rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf); +rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf); /** @@ -2400,9 +2463,10 @@ rd_kafka_conf_get_default_topic_conf (rd_kafka_conf_t *conf); * \p RD_KAFKA_CONF_UNKNOWN. */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2411,9 +2475,10 @@ rd_kafka_conf_res_t rd_kafka_conf_get (const rd_kafka_conf_t *conf, * @sa rd_kafka_conf_get() */ RD_EXPORT -rd_kafka_conf_res_t rd_kafka_topic_conf_get (const rd_kafka_topic_conf_t *conf, - const char *name, - char *dest, size_t *dest_size); +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, + const char *name, + char *dest, + size_t *dest_size); /** @@ -2438,7 +2503,7 @@ const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp); */ RD_EXPORT const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, - size_t *cntp); + size_t *cntp); /** * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or @@ -2482,15 +2547,15 @@ rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void); * @brief Creates a copy/duplicate of topic configuration object \p conf. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t - *conf); +rd_kafka_topic_conf_t * +rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf); /** * @brief Creates a copy/duplicate of \p rk 's default topic configuration * object. */ RD_EXPORT -rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup (rd_kafka_t *rk); +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk); /** @@ -2510,9 +2575,10 @@ void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf); */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, - const char *name, - const char *value, - char *errstr, size_t errstr_size); + const char *name, + const char *value, + char *errstr, + size_t errstr_size); /** * @brief Sets the application's opaque pointer that will be passed to all topic @@ -2545,15 +2611,14 @@ void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, * could not be performed. */ RD_EXPORT -void -rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, - int32_t (*partitioner) ( - const rd_kafka_topic_t *rkt, - const void *keydata, - size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque)); +void rd_kafka_topic_conf_set_partitioner_cb( + rd_kafka_topic_conf_t *topic_conf, + int32_t (*partitioner)(const rd_kafka_topic_t *rkt, + const void *keydata, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque)); /** @@ -2581,11 +2646,10 @@ rd_kafka_topic_conf_set_partitioner_cb (rd_kafka_topic_conf_t *topic_conf, * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, * DO NOT USE IN PRODUCTION. */ -RD_EXPORT void -rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, - int (*msg_order_cmp) ( - const rd_kafka_message_t *a, - const rd_kafka_message_t *b)); +RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp( + rd_kafka_topic_conf_t *topic_conf, + int (*msg_order_cmp)(const rd_kafka_message_t *a, + const rd_kafka_message_t *b)); /** @@ -2597,13 +2661,13 @@ rd_kafka_topic_conf_set_msg_order_cmp (rd_kafka_topic_conf_t *topic_conf, */ RD_EXPORT int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, - int32_t partition); + int32_t partition); /******************************************************************* - * * + * * * Partitioners provided by rdkafka * - * * + * * *******************************************************************/ /** @@ -2621,9 +2685,11 @@ int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, */ RD_EXPORT int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent partitioner. @@ -2639,10 +2705,12 @@ int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, * the CRC value of the key */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random partitioner. @@ -2660,10 +2728,12 @@ int32_t rd_kafka_msg_partitioner_consistent (const rd_kafka_topic_t *rkt, * the CRC value of the key (if provided) */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, void *msg_opaque); +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2680,11 +2750,12 @@ int32_t rd_kafka_msg_partitioner_consistent_random (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** * @brief Consistent-Random Murmur2 partitioner (Java compatible). @@ -2701,11 +2772,12 @@ int32_t rd_kafka_msg_partitioner_murmur2 (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2722,11 +2794,12 @@ int32_t rd_kafka_msg_partitioner_murmur2_random (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /** @@ -2744,11 +2817,12 @@ int32_t rd_kafka_msg_partitioner_fnv1a (const rd_kafka_topic_t *rkt, * @returns a partition between 0 and \p partition_cnt - 1. */ RD_EXPORT -int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - const void *key, size_t keylen, - int32_t partition_cnt, - void *rkt_opaque, - void *msg_opaque); +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, + const void *key, + size_t keylen, + int32_t partition_cnt, + void *rkt_opaque, + void *msg_opaque); /**@}*/ @@ -2764,7 +2838,6 @@ int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, - /** * @brief Creates a new Kafka handle and starts its operation according to the * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER). @@ -2792,8 +2865,10 @@ int32_t rd_kafka_msg_partitioner_fnv1a_random (const rd_kafka_topic_t *rkt, * @sa To destroy the Kafka handle, use rd_kafka_destroy(). */ RD_EXPORT -rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, - char *errstr, size_t errstr_size); +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, + rd_kafka_conf_t *conf, + char *errstr, + size_t errstr_size); /** @@ -2811,7 +2886,7 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, * @sa rd_kafka_destroy_flags() */ RD_EXPORT -void rd_kafka_destroy(rd_kafka_t *rk); +void rd_kafka_destroy(rd_kafka_t *rk); /** @@ -2819,7 +2894,7 @@ void rd_kafka_destroy(rd_kafka_t *rk); * */ RD_EXPORT -void rd_kafka_destroy_flags (rd_kafka_t *rk, int flags); +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags); /** * @brief Flags for rd_kafka_destroy_flags() @@ -2864,7 +2939,7 @@ rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk); * rd_kafka_mem_free() */ RD_EXPORT -char *rd_kafka_memberid (const rd_kafka_t *rk); +char *rd_kafka_memberid(const rd_kafka_t *rk); @@ -2887,7 +2962,7 @@ char *rd_kafka_memberid (const rd_kafka_t *rk); * if no ClusterId could be retrieved in the allotted timespan. */ RD_EXPORT -char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms); /** @@ -2906,7 +2981,7 @@ char *rd_kafka_clusterid (rd_kafka_t *rk, int timeout_ms); * retrieved in the allotted timespan. */ RD_EXPORT -int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); /** @@ -2931,8 +3006,9 @@ int32_t rd_kafka_controllerid (rd_kafka_t *rk, int timeout_ms); * @sa rd_kafka_topic_destroy() */ RD_EXPORT -rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, - rd_kafka_topic_conf_t *conf); +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, + const char *topic, + rd_kafka_topic_conf_t *conf); @@ -2960,7 +3036,7 @@ const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt); * with rd_kafka_topic_conf_set_opaque(). */ RD_EXPORT -void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); /** @@ -2969,7 +3045,7 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); * The unassigned partition is used by the producer API for messages * that should be partitioned using the configured or default partitioner. */ -#define RD_KAFKA_PARTITION_UA ((int32_t)-1) +#define RD_KAFKA_PARTITION_UA ((int32_t)-1) /** @@ -2994,7 +3070,8 @@ void *rd_kafka_topic_opaque (const rd_kafka_topic_t *rkt); * - error callbacks (rd_kafka_conf_set_error_cb()) [all] * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] - * - OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] + * - OAUTHBEARER token refresh callbacks + * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all] * * @returns the number of events served. */ @@ -3013,8 +3090,7 @@ int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms); * @remark This function MUST ONLY be called from within a librdkafka callback. */ RD_EXPORT -void rd_kafka_yield (rd_kafka_t *rk); - +void rd_kafka_yield(rd_kafka_t *rk); @@ -3026,8 +3102,8 @@ void rd_kafka_yield (rd_kafka_t *rk); * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_pause_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); +rd_kafka_pause_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -3039,9 +3115,8 @@ rd_kafka_pause_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_resume_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - +rd_kafka_resume_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions); @@ -3054,9 +3129,12 @@ rd_kafka_resume_partitions (rd_kafka_t *rk, * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_query_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high, int timeout_ms); +rd_kafka_query_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high, + int timeout_ms); /** @@ -3075,10 +3153,11 @@ rd_kafka_query_watermark_offsets (rd_kafka_t *rk, * * @remark Shall only be used with an active consumer instance. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_get_watermark_offsets (rd_kafka_t *rk, - const char *topic, int32_t partition, - int64_t *low, int64_t *high); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, + const char *topic, + int32_t partition, + int64_t *low, + int64_t *high); @@ -3095,7 +3174,8 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * The function will block for at most \p timeout_ms milliseconds. * * @remark Duplicate Topic+Partitions are not supported. - * @remark Per-partition errors may be returned in \c rd_kafka_topic_partition_t.err + * @remark Per-partition errors may be returned in \c + * rd_kafka_topic_partition_t.err * * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note * that per-partition errors might be set), @@ -3107,9 +3187,9 @@ rd_kafka_get_watermark_offsets (rd_kafka_t *rk, * for the given partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_for_times (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets, - int timeout_ms); +rd_kafka_offsets_for_times(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets, + int timeout_ms); @@ -3127,7 +3207,7 @@ rd_kafka_offsets_for_times (rd_kafka_t *rk, * rd_kafka_mem_free() */ RD_EXPORT -void *rd_kafka_mem_calloc (rd_kafka_t *rk, size_t num, size_t size); +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size); @@ -3145,7 +3225,7 @@ void *rd_kafka_mem_calloc (rd_kafka_t *rk, size_t num, size_t size); * rd_kafka_mem_free() */ RD_EXPORT -void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size); +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size); @@ -3166,15 +3246,13 @@ void *rd_kafka_mem_malloc (rd_kafka_t *rk, size_t size); * that explicitly mention using this function for freeing. */ RD_EXPORT -void rd_kafka_mem_free (rd_kafka_t *rk, void *ptr); +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr); /**@}*/ - - /** * @name Queue API * @{ @@ -3209,7 +3287,40 @@ void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu); * Use rd_kafka_queue_destroy() to loose the reference. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk); + + + +/** + * @returns a reference to the SASL callback queue, if a SASL mechanism + * with callbacks is configured (currently only OAUTHBEARER), else + * returns NULL. + * + * Use rd_kafka_queue_destroy() to loose the reference. + * + * @sa rd_kafka_sasl_background_callbacks_enable() + */ +RD_EXPORT +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk); + + +/** + * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka + * background thread. + * + * This serves as an alternative for applications that do not call + * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means + * of automatically trigger the refresh callbacks, which are needed to + * initiate connections to the brokers in the case a custom OAUTHBEARER + * refresh callback is configured. + * + * @returns NULL on success or an error object on error. + * + * @sa rd_kafka_queue_get_sasl() + * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); /** @@ -3222,7 +3333,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_main (rd_kafka_t *rk); * prior to calling rd_kafka_consumer_close(). */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); /** * @returns a reference to the partition's queue, or NULL if @@ -3235,34 +3346,34 @@ rd_kafka_queue_t *rd_kafka_queue_get_consumer (rd_kafka_t *rk); * @remark This function only works on consumers. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_partition (rd_kafka_t *rk, - const char *topic, - int32_t partition); +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, + const char *topic, + int32_t partition); /** * @returns a reference to the background thread queue, or NULL if the * background queue is not enabled. * - * To enable the background thread queue set a generic event handler callback - * with rd_kafka_conf_set_background_event_cb() on the client instance - * configuration object (rd_kafka_conf_t). + * The background thread queue provides the application with an automatically + * polled queue that triggers the event callback in a background thread, + * this background thread is completely managed by librdkafka. + * + * The background thread queue is automatically created if a generic event + * handler callback is configured with rd_kafka_conf_set_background_event_cb() + * or if rd_kafka_queue_get_background() is called. * * The background queue is polled and served by librdkafka and MUST NOT be * polled, forwarded, or otherwise managed by the application, it may only * be used as the destination queue passed to queue-enabled APIs, such as * the Admin API. * - * The background thread queue provides the application with an automatically - * polled queue that triggers the event callback in a background thread, - * this background thread is completely managed by librdkafka. - * * Use rd_kafka_queue_destroy() to loose the reference. * * @warning The background queue MUST NOT be read from (polled, consumed, etc), * or forwarded from. */ RD_EXPORT -rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk); /** @@ -3276,7 +3387,7 @@ rd_kafka_queue_t *rd_kafka_queue_get_background (rd_kafka_t *rk); * queue. */ RD_EXPORT -void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst); /** * @brief Forward librdkafka logs (and debug) to the specified queue @@ -3293,18 +3404,19 @@ void rd_kafka_queue_forward (rd_kafka_queue_t *src, rd_kafka_queue_t *dst); * * @remark librdkafka maintains its own reference to the provided queue. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, + * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_set_log_queue (rd_kafka_t *rk, - rd_kafka_queue_t *rkqu); +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); /** * @returns the current number of elements in queue. */ RD_EXPORT -size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu); /** @@ -3325,8 +3437,10 @@ size_t rd_kafka_queue_length (rd_kafka_queue_t *rkqu); * @remark The file-descriptor/socket must be set to non-blocking. */ RD_EXPORT -void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, - const void *payload, size_t size); +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, + int fd, + const void *payload, + size_t size); /** * @brief Enable callback event triggering for queue. @@ -3345,10 +3459,10 @@ void rd_kafka_queue_io_event_enable (rd_kafka_queue_t *rkqu, int fd, * handle). */ RD_EXPORT -void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, - void (*event_cb) (rd_kafka_t *rk, - void *qev_opaque), - void *qev_opaque); +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, + void (*event_cb)(rd_kafka_t *rk, + void *qev_opaque), + void *qev_opaque); /** @@ -3359,7 +3473,7 @@ void rd_kafka_queue_cb_event_enable (rd_kafka_queue_t *rkqu, * Must not be used from signal handlers since that may cause deadlocks. */ RD_EXPORT -void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu); /**@}*/ @@ -3372,12 +3486,15 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); */ -#define RD_KAFKA_OFFSET_BEGINNING -2 /**< Start consuming from beginning of - * kafka partition queue: oldest msg */ -#define RD_KAFKA_OFFSET_END -1 /**< Start consuming from end of kafka - * partition queue: next msg */ -#define RD_KAFKA_OFFSET_STORED -1000 /**< Start consuming from offset retrieved - * from offset store */ +#define RD_KAFKA_OFFSET_BEGINNING \ + -2 /**< Start consuming from beginning of \ + * kafka partition queue: oldest msg */ +#define RD_KAFKA_OFFSET_END \ + -1 /**< Start consuming from end of kafka \ + * partition queue: next msg */ +#define RD_KAFKA_OFFSET_STORED \ + -1000 /**< Start consuming from offset retrieved \ + * from offset store */ #define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */ @@ -3390,7 +3507,7 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); * * That is, if current end offset is 12345 and \p CNT is 200, it will start * consuming from offset \c 12345-200 = \c 12145. */ -#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) +#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT)) /** * @brief Start consuming messages for topic \p rkt and \p partition @@ -3426,8 +3543,9 @@ void rd_kafka_queue_yield (rd_kafka_queue_t *rkqu); * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t` */ RD_EXPORT -int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset); +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset); /** * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to @@ -3444,8 +3562,10 @@ int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, * be combined for the same topic and partition. */ RD_EXPORT -int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, - int64_t offset, rd_kafka_queue_t *rkqu); +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + rd_kafka_queue_t *rkqu); /** * @brief Stop consuming messages for topic \p rkt and \p partition, purging @@ -3469,9 +3589,14 @@ int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); * @brief Seek consumer for topic+partition to \p offset which is either an * absolute or logical offset. * - * If \p timeout_ms is not 0 the call will wait this long for the - * seek to be performed. If the timeout is reached the internal state - * will be unknown and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * * If \p timeout_ms is 0 it will initiate the seek but return * immediately without any error reporting (e.g., async). * @@ -3489,10 +3614,10 @@ int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition); * @deprecated Use rd_kafka_seek_partitions(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, - int32_t partition, - int64_t offset, - int timeout_ms); +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, + int32_t partition, + int64_t offset, + int timeout_ms); @@ -3502,11 +3627,13 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, * * The offset may be either absolute (>= 0) or a logical offset. * - * If \p timeout_ms is not 0 the call will wait this long for the - * seeks to be performed. If the timeout is reached the internal state - * will be unknown for the remaining partitions to seek and this function - * will return an error with the error code set to - * `RD_KAFKA_RESP_ERR__TIMED_OUT`. + * If \p timeout_ms is specified (not 0) the seek call will wait this long + * for the consumer to update its fetcher state for the given partition with + * the new offset. This guarantees that no previously fetched messages for the + * old offset (or fetch position) will be passed to the application. + * + * If the timeout is reached the internal state will be unknown to the caller + * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`. * * If \p timeout_ms is 0 it will initiate the seek but return * immediately without any error reporting (e.g., async). @@ -3526,9 +3653,9 @@ rd_kafka_resp_err_t rd_kafka_seek (rd_kafka_topic_t *rkt, * @returns NULL on success or an error object on failure. */ RD_EXPORT rd_kafka_error_t * -rd_kafka_seek_partitions (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); +rd_kafka_seek_partitions(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); /** @@ -3556,8 +3683,8 @@ rd_kafka_seek_partitions (rd_kafka_t *rk, * passing message to application. */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms); +rd_kafka_message_t * +rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms); @@ -3587,10 +3714,11 @@ rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, * passing message to application. */ RD_EXPORT -ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); @@ -3628,11 +3756,11 @@ ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, + int32_t partition, int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), + void (*consume_cb)(rd_kafka_message_t *rkmessage, + void *commit_opaque), void *commit_opaque); @@ -3663,9 +3791,9 @@ rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, */ RD_EXPORT ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, - int timeout_ms, - rd_kafka_message_t **rkmessages, - size_t rkmessages_size); + int timeout_ms, + rd_kafka_message_t **rkmessages, + size_t rkmessages_size); /** * @brief Consume multiple messages from queue with callback @@ -3677,19 +3805,17 @@ ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, * poll/queue based alternatives. */ RD_EXPORT -int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, - int timeout_ms, - void (*consume_cb) (rd_kafka_message_t - *rkmessage, - void *commit_opaque), - void *commit_opaque); +int rd_kafka_consume_callback_queue( + rd_kafka_queue_t *rkqu, + int timeout_ms, + void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), + void *commit_opaque); /**@}*/ - /** * @name Simple Consumer API (legacy): Topic+partition offset store. * @{ @@ -3706,14 +3832,23 @@ int rd_kafka_consume_callback_queue (rd_kafka_queue_t *rkqu, * The \c offset + 1 will be committed (written) to broker (or file) according * to \c `auto.commit.interval.ms` or manual offset-less commit() * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * * @remark \c `enable.auto.offset.store` must be set to "false" when using * this API. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, - int32_t partition, int64_t offset); +rd_kafka_resp_err_t +rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); /** @@ -3723,27 +3858,35 @@ rd_kafka_resp_err_t rd_kafka_offset_store (rd_kafka_topic_t *rkt, * to \c `auto.commit.interval.ms` or manual offset-less commit(). * * Per-partition success/error status propagated through each partition's - * \c .err field. + * \c .err for all return values (even NO_ERROR) except INVALID_ARG. + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. * * @remark The \c .offset field is stored as is, it will NOT be + 1. * * @remark \c `enable.auto.offset.store` must be set to "false" when using * this API. * - * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or - * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if none of the - * offsets could be stored, or + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store - * is true. + * is true, or + * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE + * if none of the offsets could be stored. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_offsets_store (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *offsets); +rd_kafka_offsets_store(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *offsets); /**@}*/ - /** * @name KafkaConsumer (C) * @{ @@ -3777,7 +3920,8 @@ rd_kafka_offsets_store (rd_kafka_t *rk, * and then start fetching messages. This cycle may take up to * \c session.timeout.ms * 2 or more to complete. * - * @remark A consumer error will be raised for each unavailable topic in the + * @remark After this call returns a consumer error will be returned by + * rd_kafka_consumer_poll (et.al) for each unavailable topic in the * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART * for non-existent topics, and * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. @@ -3793,15 +3937,15 @@ rd_kafka_offsets_store (rd_kafka_t *rk, * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscribe (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *topics); +rd_kafka_subscribe(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *topics); /** * @brief Unsubscribe from the current subscription set. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk); /** @@ -3814,8 +3958,7 @@ rd_kafka_resp_err_t rd_kafka_unsubscribe (rd_kafka_t *rk); * rd_kafka_topic_partition_list_destroy on the returned list. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_subscription (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **topics); +rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics); @@ -3849,15 +3992,15 @@ rd_kafka_subscription (rd_kafka_t *rk, * @sa rd_kafka_message_t */ RD_EXPORT -rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms); /** - * @brief Close down the KafkaConsumer. + * @brief Close the consumer. * - * @remark This call will block until the consumer has revoked its assignment, - * calling the \c rebalance_cb if it is configured, committed offsets - * to broker, and left the consumer group. - * The maximum blocking time is roughly limited to session.timeout.ms. + * This call will block until the consumer has revoked its assignment, + * calling the \c rebalance_cb if it is configured, committed offsets + * to broker, and left the consumer group (if applicable). + * The maximum blocking time is roughly limited to session.timeout.ms. * * @returns An error code indicating if the consumer close was succesful * or not. @@ -3869,7 +4012,41 @@ rd_kafka_message_t *rd_kafka_consumer_poll (rd_kafka_t *rk, int timeout_ms); * */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk); + + +/** + * @brief Asynchronously close the consumer. + * + * Performs the same actions as rd_kafka_consumer_close() but in a + * background thread. + * + * Rebalance events/callbacks (etc) will be forwarded to the + * application-provided \p rkqu. The application must poll/serve this queue + * until rd_kafka_consumer_closed() returns true. + * + * @remark Depending on consumer group join state there may or may not be + * rebalance events emitted on \p rkqu. + * + * @returns an error object if the consumer close failed, else NULL. + * + * @sa rd_kafka_consumer_closed() + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, + rd_kafka_queue_t *rkqu); + + +/** + * @returns 1 if the consumer is closed, else 0. + * + * Should be used in conjunction with rd_kafka_consumer_close_queue() to know + * when the consumer has been closed. + * + * @sa rd_kafka_consumer_close_queue() + */ +RD_EXPORT +int rd_kafka_consumer_closed(rd_kafka_t *rk); /** @@ -3890,9 +4067,8 @@ rd_kafka_resp_err_t rd_kafka_consumer_close (rd_kafka_t *rk); * rd_kafka_error_destroy(). */ RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_incremental_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** @@ -3912,10 +4088,9 @@ rd_kafka_incremental_assign (rd_kafka_t *rk, * @remark The returned error object (if not NULL) must be destroyed with * rd_kafka_error_destroy(). */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_incremental_unassign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t - *partitions); +RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** @@ -3932,7 +4107,7 @@ rd_kafka_incremental_unassign (rd_kafka_t *rk, * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success. */ RD_EXPORT -const char *rd_kafka_rebalance_protocol (rd_kafka_t *rk); +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk); /** @@ -3956,8 +4131,8 @@ const char *rd_kafka_rebalance_protocol (rd_kafka_t *rk); * a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assign (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *partitions); +rd_kafka_assign(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *partitions); /** * @brief Returns the current partition assignment as set by rd_kafka_assign() @@ -3976,8 +4151,8 @@ rd_kafka_assign (rd_kafka_t *rk, * since an application is free to assign any partitions. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_assignment (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t **partitions); +rd_kafka_assignment(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t **partitions); /** @@ -3996,8 +4171,7 @@ rd_kafka_assignment (rd_kafka_t *rk, * @returns Returns 1 if the current partition assignment is considered * lost, 0 otherwise. */ -RD_EXPORT int -rd_kafka_assignment_lost (rd_kafka_t *rk); +RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); /** @@ -4021,8 +4195,9 @@ rd_kafka_assignment_lost (rd_kafka_t *rk); * a fatal error. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, - int async); +rd_kafka_commit(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + int async); /** @@ -4032,8 +4207,9 @@ rd_kafka_commit (rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, * @sa rd_kafka_commit */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, - int async); +rd_kafka_commit_message(rd_kafka_t *rk, + const rd_kafka_message_t *rkmessage, + int async); /** @@ -4059,14 +4235,14 @@ rd_kafka_commit_message (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, * @sa rd_kafka_conf_set_offset_commit_cb() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_commit_queue (rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_queue_t *rkqu, - void (*cb) (rd_kafka_t *rk, - rd_kafka_resp_err_t err, - rd_kafka_topic_partition_list_t *offsets, - void *commit_opaque), - void *commit_opaque); +rd_kafka_commit_queue(rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_queue_t *rkqu, + void (*cb)(rd_kafka_t *rk, + rd_kafka_resp_err_t err, + rd_kafka_topic_partition_list_t *offsets, + void *commit_opaque), + void *commit_opaque); /** @@ -4087,9 +4263,9 @@ rd_kafka_commit_queue (rd_kafka_t *rk, * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_committed (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions, - int timeout_ms); +rd_kafka_committed(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *partitions, + int timeout_ms); @@ -4097,8 +4273,8 @@ rd_kafka_committed (rd_kafka_t *rk, * @brief Retrieve current positions (offsets) for topics+partitions. * * The \p offset field of each requested partition will be set to the offset - * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was - * no previous message. + * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there + * was no previous message. * * @remark In this context the last consumed message is the offset consumed * by the current librdkafka instance and, in case of rebalancing, not @@ -4110,9 +4286,7 @@ rd_kafka_committed (rd_kafka_t *rk, * Else returns an error code. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_position (rd_kafka_t *rk, - rd_kafka_topic_partition_list_t *partitions); - +rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions); @@ -4128,7 +4302,7 @@ rd_kafka_position (rd_kafka_t *rk, * @sa rd_kafka_send_offsets_to_transaction() */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata (rd_kafka_t *rk); +rd_kafka_consumer_group_metadata(rd_kafka_t *rk); /** @@ -4141,7 +4315,7 @@ rd_kafka_consumer_group_metadata (rd_kafka_t *rk); * rd_kafka_consumer_group_metadata_destroy(). */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new (const char *group_id); +rd_kafka_consumer_group_metadata_new(const char *group_id); /** @@ -4157,11 +4331,10 @@ rd_kafka_consumer_group_metadata_new (const char *group_id); * rd_kafka_consumer_group_metadata_destroy(). */ RD_EXPORT rd_kafka_consumer_group_metadata_t * -rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, - int32_t generation_id, - const char *member_id, - const char - *group_instance_id); +rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, + int32_t generation_id, + const char *member_id, + const char *group_instance_id); /** @@ -4169,7 +4342,7 @@ rd_kafka_consumer_group_metadata_new_with_genid (const char *group_id, * rd_kafka_consumer_group_metadata(). */ RD_EXPORT void -rd_kafka_consumer_group_metadata_destroy (rd_kafka_consumer_group_metadata_t *); +rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *); /** @@ -4192,10 +4365,10 @@ rd_kafka_consumer_group_metadata_destroy (rd_kafka_consumer_group_metadata_t *); * * @sa rd_kafka_consumer_group_metadata_read() */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_write ( - const rd_kafka_consumer_group_metadata_t *cgmd, - void **bufferp, size_t *sizep); +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write( + const rd_kafka_consumer_group_metadata_t *cgmd, + void **bufferp, + size_t *sizep); /** * @brief Reads serialized consumer group metadata and returns a @@ -4217,10 +4390,10 @@ rd_kafka_consumer_group_metadata_write ( * * @sa rd_kafka_consumer_group_metadata_write() */ -RD_EXPORT rd_kafka_error_t * -rd_kafka_consumer_group_metadata_read ( - rd_kafka_consumer_group_metadata_t **cgmdp, - const void *buffer, size_t size); +RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( + rd_kafka_consumer_group_metadata_t **cgmdp, + const void *buffer, + size_t size); /**@}*/ @@ -4237,21 +4410,27 @@ rd_kafka_consumer_group_metadata_read ( /** * @brief Producer message flags */ -#define RD_KAFKA_MSG_F_FREE 0x1 /**< Delegate freeing of payload to rdkafka. */ -#define RD_KAFKA_MSG_F_COPY 0x2 /**< rdkafka will make a copy of the payload. */ -#define RD_KAFKA_MSG_F_BLOCK 0x4 /**< Block produce*() on message queue full. - * WARNING: If a delivery report callback - * is used the application MUST - * call rd_kafka_poll() (or equiv.) - * to make sure delivered messages - * are drained from the internal - * delivery report queue. - * Failure to do so will result - * in indefinately blocking on - * the produce() call when the - * message queue is full. */ -#define RD_KAFKA_MSG_F_PARTITION 0x8 /**< produce_batch() will honor - * per-message partition. */ +#define RD_KAFKA_MSG_F_FREE \ + 0x1 /**< Delegate freeing of payload to rdkafka. \ + */ +#define RD_KAFKA_MSG_F_COPY \ + 0x2 /**< rdkafka will make a copy of the payload. \ + */ +#define RD_KAFKA_MSG_F_BLOCK \ + 0x4 /**< Block produce*() on message queue full. \ + * WARNING: If a delivery report callback \ + * is used the application MUST \ + * call rd_kafka_poll() (or equiv.) \ + * to make sure delivered messages \ + * are drained from the internal \ + * delivery report queue. \ + * Failure to do so will result \ + * in indefinately blocking on \ + * the produce() call when the \ + * message queue is full. */ +#define RD_KAFKA_MSG_F_PARTITION \ + 0x8 /**< produce_batch() will honor \ + * per-message partition. */ @@ -4292,13 +4471,11 @@ rd_kafka_consumer_group_metadata_read ( * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if * \p queue.buffering.max.messages or * \p queue.buffering.max.kbytes are exceeded. - * Messages are considered in-queue from the point they - * are accepted by produce() until their corresponding - * delivery report callback/event returns. - * It is thus a requirement to call - * rd_kafka_poll() (or equiv.) from a separate - * thread when F_BLOCK is used. - * See WARNING on \c RD_KAFKA_MSG_F_BLOCK above. + * Messages are considered in-queue from the point + * they are accepted by produce() until their corresponding delivery report + * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or + * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c + * RD_KAFKA_MSG_F_BLOCK above. * * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done * with it. @@ -4358,11 +4535,14 @@ rd_kafka_consumer_group_metadata_read ( * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code. */ RD_EXPORT -int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - void *payload, size_t len, - const void *key, size_t keylen, - void *msg_opaque); +int rd_kafka_produce(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + void *payload, + size_t len, + const void *key, + size_t keylen, + void *msg_opaque); /** @@ -4379,7 +4559,7 @@ int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...); /** @@ -4394,9 +4574,8 @@ rd_kafka_resp_err_t rd_kafka_producev (rd_kafka_t *rk, ...); * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END */ RD_EXPORT -rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, - const rd_kafka_vu_t *vus, - size_t cnt); +rd_kafka_error_t * +rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt); /** @@ -4429,10 +4608,11 @@ rd_kafka_error_t *rd_kafka_produceva (rd_kafka_t *rk, * the provided \p rkmessages. */ RD_EXPORT -int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, - int msgflags, - rd_kafka_message_t *rkmessages, int message_cnt); - +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, + int32_t partition, + int msgflags, + rd_kafka_message_t *rkmessages, + int message_cnt); @@ -4444,6 +4624,9 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, * * @remark This function will call rd_kafka_poll() and thus trigger callbacks. * + * @remark The \c linger.ms time will be ignored for the duration of the call, + * queued messages will be sent to the broker as soon as possible. + * * @remark If RD_KAFKA_EVENT_DR has been enabled * (through rd_kafka_conf_set_events()) this function will not call * rd_kafka_poll() but instead wait for the librdkafka-handled @@ -4458,7 +4641,7 @@ int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, * @sa rd_kafka_outq_len() */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); @@ -4493,7 +4676,7 @@ rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms); * client instance. */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags); /** @@ -4525,43 +4708,43 @@ rd_kafka_resp_err_t rd_kafka_purge (rd_kafka_t *rk, int purge_flags); /** -* @name Metadata API -* @{ -* -* -*/ + * @name Metadata API + * @{ + * + * + */ /** * @brief Broker information */ typedef struct rd_kafka_metadata_broker { - int32_t id; /**< Broker Id */ - char *host; /**< Broker hostname */ - int port; /**< Broker listening port */ + int32_t id; /**< Broker Id */ + char *host; /**< Broker hostname */ + int port; /**< Broker listening port */ } rd_kafka_metadata_broker_t; /** * @brief Partition information */ typedef struct rd_kafka_metadata_partition { - int32_t id; /**< Partition Id */ - rd_kafka_resp_err_t err; /**< Partition error reported by broker */ - int32_t leader; /**< Leader broker */ - int replica_cnt; /**< Number of brokers in \p replicas */ - int32_t *replicas; /**< Replica brokers */ - int isr_cnt; /**< Number of ISR brokers in \p isrs */ - int32_t *isrs; /**< In-Sync-Replica brokers */ + int32_t id; /**< Partition Id */ + rd_kafka_resp_err_t err; /**< Partition error reported by broker */ + int32_t leader; /**< Leader broker */ + int replica_cnt; /**< Number of brokers in \p replicas */ + int32_t *replicas; /**< Replica brokers */ + int isr_cnt; /**< Number of ISR brokers in \p isrs */ + int32_t *isrs; /**< In-Sync-Replica brokers */ } rd_kafka_metadata_partition_t; /** * @brief Topic information */ typedef struct rd_kafka_metadata_topic { - char *topic; /**< Topic name */ - int partition_cnt; /**< Number of partitions in \p partitions*/ + char *topic; /**< Topic name */ + int partition_cnt; /**< Number of partitions in \p partitions*/ struct rd_kafka_metadata_partition *partitions; /**< Partitions */ - rd_kafka_resp_err_t err; /**< Topic error reported by broker */ + rd_kafka_resp_err_t err; /**< Topic error reported by broker */ } rd_kafka_metadata_topic_t; @@ -4569,14 +4752,14 @@ typedef struct rd_kafka_metadata_topic { * @brief Metadata container */ typedef struct rd_kafka_metadata { - int broker_cnt; /**< Number of brokers in \p brokers */ - struct rd_kafka_metadata_broker *brokers; /**< Brokers */ + int broker_cnt; /**< Number of brokers in \p brokers */ + struct rd_kafka_metadata_broker *brokers; /**< Brokers */ - int topic_cnt; /**< Number of topics in \p topics */ - struct rd_kafka_metadata_topic *topics; /**< Topics */ + int topic_cnt; /**< Number of topics in \p topics */ + struct rd_kafka_metadata_topic *topics; /**< Topics */ - int32_t orig_broker_id; /**< Broker originating this metadata */ - char *orig_broker_name; /**< Name of originating broker */ + int32_t orig_broker_id; /**< Broker originating this metadata */ + char *orig_broker_name; /**< Name of originating broker */ } rd_kafka_metadata_t; @@ -4602,10 +4785,11 @@ typedef struct rd_kafka_metadata { */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_metadata (rd_kafka_t *rk, int all_topics, - rd_kafka_topic_t *only_rkt, - const struct rd_kafka_metadata **metadatap, - int timeout_ms); +rd_kafka_metadata(rd_kafka_t *rk, + int all_topics, + rd_kafka_topic_t *only_rkt, + const struct rd_kafka_metadata **metadatap, + int timeout_ms); /** * @brief Release metadata memory. @@ -4619,11 +4803,11 @@ void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); /** -* @name Client group information -* @{ -* -* -*/ + * @name Client group information + * @{ + * + * + */ /** @@ -4639,10 +4823,10 @@ struct rd_kafka_group_member_info { char *client_host; /**< Client's hostname */ void *member_metadata; /**< Member metadata (binary), * format depends on \p protocol_type. */ - int member_metadata_size; /**< Member metadata size in bytes */ + int member_metadata_size; /**< Member metadata size in bytes */ void *member_assignment; /**< Member assignment (binary), * format depends on \p protocol_type. */ - int member_assignment_size; /**< Member assignment size in bytes */ + int member_assignment_size; /**< Member assignment size in bytes */ }; /** @@ -4656,7 +4840,7 @@ struct rd_kafka_group_info { char *protocol_type; /**< Group protocol type */ char *protocol; /**< Group protocol */ struct rd_kafka_group_member_info *members; /**< Group members */ - int member_cnt; /**< Group member count */ + int member_cnt; /**< Group member count */ }; /** @@ -4665,8 +4849,8 @@ struct rd_kafka_group_info { * @sa rd_kafka_group_list_destroy() to release list memory. */ struct rd_kafka_group_list { - struct rd_kafka_group_info *groups; /**< Groups */ - int group_cnt; /**< Group count */ + struct rd_kafka_group_info *groups; /**< Groups */ + int group_cnt; /**< Group count */ }; @@ -4699,15 +4883,16 @@ struct rd_kafka_group_list { */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_list_groups (rd_kafka_t *rk, const char *group, - const struct rd_kafka_group_list **grplistp, - int timeout_ms); +rd_kafka_list_groups(rd_kafka_t *rk, + const char *group, + const struct rd_kafka_group_list **grplistp, + int timeout_ms); /** * @brief Release list memory */ RD_EXPORT -void rd_kafka_group_list_destroy (const struct rd_kafka_group_list *grplist); +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist); /**@}*/ @@ -4758,7 +4943,6 @@ int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); - /** * @brief Set logger function. * @@ -4771,10 +4955,12 @@ int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist); * * @remark \p rk may be passed as NULL in the callback. */ -RD_EXPORT RD_DEPRECATED -void rd_kafka_set_logger(rd_kafka_t *rk, - void (*func) (const rd_kafka_t *rk, int level, - const char *fac, const char *buf)); +RD_EXPORT RD_DEPRECATED void +rd_kafka_set_logger(rd_kafka_t *rk, + void (*func)(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf)); /** @@ -4794,8 +4980,10 @@ void rd_kafka_set_log_level(rd_kafka_t *rk, int level); * @brief Builtin (default) log sink: print to stderr */ RD_EXPORT -void rd_kafka_log_print(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_print(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** @@ -4804,8 +4992,10 @@ void rd_kafka_log_print(const rd_kafka_t *rk, int level, * with syslog support. */ RD_EXPORT -void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, - const char *fac, const char *buf); +void rd_kafka_log_syslog(const rd_kafka_t *rk, + int level, + const char *fac, + const char *buf); /** @@ -4831,7 +5021,7 @@ void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, * @sa rd_kafka_flush() */ RD_EXPORT -int rd_kafka_outq_len(rd_kafka_t *rk); +int rd_kafka_outq_len(rd_kafka_t *rk); @@ -4887,14 +5077,13 @@ int rd_kafka_wait_destroyed(int timeout_ms); * @returns the number of failures, or 0 if all tests passed. */ RD_EXPORT -int rd_kafka_unittest (void); +int rd_kafka_unittest(void); /**@}*/ - /** * @name Experimental APIs * @{ @@ -4908,7 +5097,7 @@ int rd_kafka_unittest (void); * main queue with rd_kafka_poll_set_consumer(). */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk); /**@}*/ @@ -4927,27 +5116,31 @@ rd_kafka_resp_err_t rd_kafka_poll_set_consumer (rd_kafka_t *rk); * @brief Event types */ typedef int rd_kafka_event_type_t; -#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ -#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ -#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ -#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ -#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ -#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ -#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ -#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ -#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ -#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ -#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT 102 /**< CreatePartitions_result_t */ +#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */ +#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */ +#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */ +#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */ +#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */ +#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */ +#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */ +#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */ +#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */ +#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */ +#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \ + 102 /**< CreatePartitions_result_t */ #define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */ -#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT 104 /**< DescribeConfigs_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \ + 104 /**< DescribeConfigs_result_t */ #define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */ -#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ +#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */ /** DeleteConsumerGroupOffsets_result_t */ #define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107 -#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 /**< SASL/OAUTHBEARER - token needs to be - refreshed */ - +/** SASL/OAUTHBEARER token needs to be refreshed */ +#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100 +#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */ +#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ +#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ /** * @returns the event type for the given event. @@ -4956,7 +5149,7 @@ typedef int rd_kafka_event_type_t; * RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev); /** * @returns the event type's name for the given event. @@ -4965,7 +5158,7 @@ rd_kafka_event_type_t rd_kafka_event_type (const rd_kafka_event_t *rkev); * the name for RD_KAFKA_EVENT_NONE is returned. */ RD_EXPORT -const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev); /** @@ -4978,7 +5171,7 @@ const char *rd_kafka_event_name (const rd_kafka_event_t *rkev); * no action is performed. */ RD_EXPORT -void rd_kafka_event_destroy (rd_kafka_event_t *rkev); +void rd_kafka_event_destroy(rd_kafka_event_t *rkev); /** @@ -4997,7 +5190,7 @@ void rd_kafka_event_destroy (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev); /** @@ -5014,9 +5207,9 @@ const rd_kafka_message_t *rd_kafka_event_message_next (rd_kafka_event_t *rkev); * from this function prior to passing message to application. */ RD_EXPORT -size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, - const rd_kafka_message_t **rkmessages, - size_t size); +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, + const rd_kafka_message_t **rkmessages, + size_t size); /** @@ -5027,7 +5220,7 @@ size_t rd_kafka_event_message_array (rd_kafka_event_t *rkev, * - RD_KAFKA_EVENT_DR (>=1 message(s)) */ RD_EXPORT -size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev); /** @@ -5042,7 +5235,7 @@ size_t rd_kafka_event_message_count (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config */ RD_EXPORT -const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev); /** @@ -5054,7 +5247,7 @@ const char *rd_kafka_event_config_string (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev); /** @@ -5066,7 +5259,7 @@ rd_kafka_resp_err_t rd_kafka_event_error (rd_kafka_event_t *rkev); * - all */ RD_EXPORT -const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev); /** @@ -5078,7 +5271,7 @@ const char *rd_kafka_event_error_string (rd_kafka_event_t *rkev); * @sa rd_kafka_fatal_error() */ RD_EXPORT -int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); /** @@ -5090,6 +5283,9 @@ int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_CREATETOPICS_RESULT * - RD_KAFKA_EVENT_DELETETOPICS_RESULT * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT + * - RD_KAFKA_EVENT_CREATEACLS_RESULT + * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + * - RD_KAFKA_EVENT_DELETEACLS_RESULT * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT @@ -5097,7 +5293,7 @@ int rd_kafka_event_error_is_fatal (rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_DELETERECORDS_RESULT */ RD_EXPORT -void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); /** @@ -5109,8 +5305,10 @@ void *rd_kafka_event_opaque (rd_kafka_event_t *rkev); * @returns 0 on success or -1 if unsupported event type. */ RD_EXPORT -int rd_kafka_event_log (rd_kafka_event_t *rkev, - const char **fac, const char **str, int *level); +int rd_kafka_event_log(rd_kafka_event_t *rkev, + const char **fac, + const char **str, + int *level); /** @@ -5125,8 +5323,9 @@ int rd_kafka_event_log (rd_kafka_event_t *rkev, * @returns 0 on success or -1 if unsupported event type. */ RD_EXPORT -int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, - char *dst, size_t dstsize); +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, + char *dst, + size_t dstsize); /** @@ -5137,43 +5336,52 @@ int rd_kafka_event_debug_contexts (rd_kafka_event_t *rkev, * * @returns stats json string. * - * @remark the returned string will be freed automatically along with the event object + * @remark the returned string will be freed automatically along with the event + * object * */ RD_EXPORT -const char *rd_kafka_event_stats (rd_kafka_event_t *rkev); +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev); /** * @returns the topic partition list from the event. * - * @remark The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy() + * @remark The list MUST NOT be freed with + * rd_kafka_topic_partition_list_destroy() * * Event types: * - RD_KAFKA_EVENT_REBALANCE * - RD_KAFKA_EVENT_OFFSET_COMMIT */ RD_EXPORT rd_kafka_topic_partition_list_t * -rd_kafka_event_topic_partition_list (rd_kafka_event_t *rkev); +rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev); /** - * @returns a newly allocated topic_partition container, if applicable for the event type, - * else NULL. + * @returns a newly allocated topic_partition container, if applicable for the + * event type, else NULL. * - * @remark The returned pointer MUST be freed with rd_kafka_topic_partition_destroy(). + * @remark The returned pointer MUST be freed with + * rd_kafka_topic_partition_destroy(). * * Event types: * RD_KAFKA_EVENT_ERROR (for partition level errors) */ RD_EXPORT rd_kafka_topic_partition_t * -rd_kafka_event_topic_partition (rd_kafka_event_t *rkev); +rd_kafka_event_topic_partition(rd_kafka_event_t *rkev); /*! CreateTopics result type */ typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t; /*! DeleteTopics result type */ typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t; +/*! CreateAcls result type */ +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t; +/*! DescribeAcls result type */ +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t; +/*! DeleteAcls result type */ +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t; /*! CreatePartitions result type */ typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; /*! AlterConfigs result type */ @@ -5197,7 +5405,7 @@ typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; * RD_KAFKA_EVENT_CREATETOPICS_RESULT */ RD_EXPORT const rd_kafka_CreateTopics_result_t * -rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteTopics result. @@ -5209,7 +5417,7 @@ rd_kafka_event_CreateTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT const rd_kafka_DeleteTopics_result_t * -rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev); /** * @brief Get CreatePartitions result. @@ -5221,7 +5429,7 @@ rd_kafka_event_DeleteTopics_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ RD_EXPORT const rd_kafka_CreatePartitions_result_t * -rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); +rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); /** * @brief Get AlterConfigs result. @@ -5233,7 +5441,7 @@ rd_kafka_event_CreatePartitions_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT */ RD_EXPORT const rd_kafka_AlterConfigs_result_t * -rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); /** * @brief Get DescribeConfigs result. @@ -5245,7 +5453,7 @@ rd_kafka_event_AlterConfigs_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT */ RD_EXPORT const rd_kafka_DescribeConfigs_result_t * -rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); +rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); /** * @returns the result of a DeleteRecords request, or NULL if event is of @@ -5255,7 +5463,7 @@ rd_kafka_event_DescribeConfigs_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETERECORDS_RESULT */ RD_EXPORT const rd_kafka_DeleteRecords_result_t * -rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteGroups result. @@ -5267,7 +5475,7 @@ rd_kafka_event_DeleteRecords_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETEGROUPS_RESULT */ RD_EXPORT const rd_kafka_DeleteGroups_result_t * -rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteConsumerGroupOffsets result. @@ -5279,7 +5487,37 @@ rd_kafka_event_DeleteGroups_result (rd_kafka_event_t *rkev); * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT */ RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t * -rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev); +rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a CreateAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT const rd_kafka_CreateAcls_result_t * +rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DescribeAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeAcls_result_t * +rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev); + +/** + * @returns the result of a DeleteAcls request, or NULL if event is of + * different type. + * + * Event types: + * RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_t * +rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); /** * @brief Poll a queue for an event for max \p timeout_ms. @@ -5291,23 +5529,24 @@ rd_kafka_event_DeleteConsumerGroupOffsets_result (rd_kafka_event_t *rkev); * @sa rd_kafka_conf_set_background_event_cb() */ RD_EXPORT -rd_kafka_event_t *rd_kafka_queue_poll (rd_kafka_queue_t *rkqu, int timeout_ms); +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); /** -* @brief Poll a queue for events served through callbacks for max \p timeout_ms. -* -* @returns the number of events served. -* -* @remark This API must only be used for queues with callbacks registered -* for all expected event types. E.g., not a message queue. -* -* @remark Also see rd_kafka_conf_set_background_event_cb() for triggering -* event callbacks from a librdkafka-managed background thread. -* -* @sa rd_kafka_conf_set_background_event_cb() -*/ + * @brief Poll a queue for events served through callbacks for max \p + * timeout_ms. + * + * @returns the number of events served. + * + * @remark This API must only be used for queues with callbacks registered + * for all expected event types. E.g., not a message queue. + * + * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering + * event callbacks from a librdkafka-managed background thread. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ RD_EXPORT -int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); /**@}*/ @@ -5352,10 +5591,11 @@ int rd_kafka_queue_poll_callback (rd_kafka_queue_t *rkqu, int timeout_ms); * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error. */ -typedef rd_kafka_resp_err_t -(rd_kafka_plugin_f_conf_init_t) (rd_kafka_conf_t *conf, - void **plug_opaquep, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)( + rd_kafka_conf_t *conf, + void **plug_opaquep, + char *errstr, + size_t errstr_size); /**@}*/ @@ -5421,6 +5661,7 @@ typedef rd_kafka_resp_err_t * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order * the interceptors were added. * + * @param conf Configuration object. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * @param name The configuration property to set. * @param val The configuration value to set, or NULL for reverting to default @@ -5436,11 +5677,13 @@ typedef rd_kafka_resp_err_t * interceptor in the chain, finally ending up at the built-in * configuration handler. */ -typedef rd_kafka_conf_res_t -(rd_kafka_interceptor_f_on_conf_set_t) (rd_kafka_conf_t *conf, - const char *name, const char *val, - char *errstr, size_t errstr_size, - void *ic_opaque); +typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)( + rd_kafka_conf_t *conf, + const char *name, + const char *val, + char *errstr, + size_t errstr_size, + void *ic_opaque); /** @@ -5452,6 +5695,11 @@ typedef rd_kafka_conf_res_t * \p old_conf being copied to \p new_conf. * * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * @param new_conf New configuration object. + * @param old_conf Old configuration object to copy properties from. + * @param filter_cnt Number of property names to filter in \p filter. + * @param filter Property names to filter out (ignore) when setting up + * \p new_conf. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code * on failure (which is logged but otherwise ignored). @@ -5459,12 +5707,12 @@ typedef rd_kafka_conf_res_t * @remark No on_conf_* interceptors are copied to the new configuration * object on rd_kafka_conf_dup(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_dup_t) (rd_kafka_conf_t *new_conf, - const rd_kafka_conf_t *old_conf, - size_t filter_cnt, - const char **filter, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)( + rd_kafka_conf_t *new_conf, + const rd_kafka_conf_t *old_conf, + size_t filter_cnt, + const char **filter, + void *ic_opaque); /** @@ -5473,8 +5721,8 @@ typedef rd_kafka_resp_err_t * * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_conf_destroy_t) (void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)( + void *ic_opaque); /** @@ -5494,10 +5742,12 @@ typedef rd_kafka_resp_err_t * other rk-specific APIs than rd_kafka_interceptor_add..(). * */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_new_t) (rd_kafka_t *rk, const rd_kafka_conf_t *conf, - void *ic_opaque, - char *errstr, size_t errstr_size); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)( + rd_kafka_t *rk, + const rd_kafka_conf_t *conf, + void *ic_opaque, + char *errstr, + size_t errstr_size); /** @@ -5507,9 +5757,8 @@ typedef rd_kafka_resp_err_t * @param rk The client instance. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_destroy_t) (rd_kafka_t *rk, void *ic_opaque); - +typedef rd_kafka_resp_err_t( + rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque); @@ -5533,10 +5782,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_send_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_acknowledgement() is called to inform interceptors that a message @@ -5560,10 +5809,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_acknowledgement_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** @@ -5582,10 +5831,10 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_consume_t) (rd_kafka_t *rk, - rd_kafka_message_t *rkmessage, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)( + rd_kafka_t *rk, + rd_kafka_message_t *rkmessage, + void *ic_opaque); /** * @brief on_commit() is called on completed or failed offset commit. @@ -5595,6 +5844,7 @@ typedef rd_kafka_resp_err_t * @param offsets List of topic+partition+offset+error that were committed. * The error message of each partition should be checked for * error. + * @param err The commit error, if any. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * * @remark This interceptor is only used by consumer instances. @@ -5607,11 +5857,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_commit_t) ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - rd_kafka_resp_err_t err, void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + rd_kafka_resp_err_t err, + void *ic_opaque); /** @@ -5624,7 +5874,7 @@ typedef rd_kafka_resp_err_t * @param brokerid Broker request is being sent to. * @param ApiKey Kafka protocol request type. * @param ApiVersion Kafka protocol request type version. - * @param Corrid Kafka protocol request correlation id. + * @param CorrId Kafka protocol request correlation id. * @param size Size of request. * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). * @@ -5635,17 +5885,16 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_request_sent_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + void *ic_opaque); /** @@ -5660,7 +5909,7 @@ typedef rd_kafka_resp_err_t * @param brokerid Broker response was received from. * @param ApiKey Kafka protocol request type or -1 on error. * @param ApiVersion Kafka protocol request type version or -1 on error. - * @param Corrid Kafka protocol request correlation id, possibly -1 on error. + * @param CorrId Kafka protocol request correlation id, possibly -1 on error. * @param size Size of response, possibly 0 on error. * @param rtt Request round-trip-time in microseconds, possibly -1 on error. * @param err Receive error. @@ -5673,19 +5922,18 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_response_received_t) ( - rd_kafka_t *rk, - int sockfd, - const char *brokername, - int32_t brokerid, - int16_t ApiKey, - int16_t ApiVersion, - int32_t CorrId, - size_t size, - int64_t rtt, - rd_kafka_resp_err_t err, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)( + rd_kafka_t *rk, + int sockfd, + const char *brokername, + int32_t brokerid, + int16_t ApiKey, + int16_t ApiVersion, + int32_t CorrId, + size_t size, + int64_t rtt, + rd_kafka_resp_err_t err, + void *ic_opaque); /** @@ -5704,12 +5952,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_start_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); /** @@ -5731,12 +5978,11 @@ typedef rd_kafka_resp_err_t * * @returns an error code on failure, the error is logged but otherwise ignored. */ -typedef rd_kafka_resp_err_t -(rd_kafka_interceptor_f_on_thread_exit_t) ( - rd_kafka_t *rk, - rd_kafka_thread_type_t thread_type, - const char *thread_name, - void *ic_opaque); +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( + rd_kafka_t *rk, + rd_kafka_thread_type_t thread_type, + const char *thread_name, + void *ic_opaque); @@ -5752,11 +5998,11 @@ typedef rd_kafka_resp_err_t * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_set ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, + void *ic_opaque); /** @@ -5771,11 +6017,11 @@ rd_kafka_conf_interceptor_add_on_conf_set ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_dup ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, + void *ic_opaque); /** * @brief Append an on_conf_destroy() interceptor. @@ -5790,11 +6036,11 @@ rd_kafka_conf_interceptor_add_on_conf_dup ( * @remark Multiple on_conf_destroy() interceptors are allowed to be added * to the same configuration object. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_conf_destroy ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( + rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, + void *ic_opaque); /** @@ -5804,7 +6050,7 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * @param ic_name Interceptor name, used in logging. * @param on_new Function pointer. * @param ic_opaque Opaque value that will be passed to the function. - * + * * @remark Since the on_new() interceptor is added to the configuration object * it may be copied by rd_kafka_conf_dup(). * An interceptor implementation must thus be able to handle @@ -5819,10 +6065,10 @@ rd_kafka_conf_interceptor_add_on_conf_destroy ( * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_conf_interceptor_add_on_new ( - rd_kafka_conf_t *conf, const char *ic_name, - rd_kafka_interceptor_f_on_new_t *on_new, - void *ic_opaque); +rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, + const char *ic_name, + rd_kafka_interceptor_f_on_new_t *on_new, + void *ic_opaque); @@ -5838,11 +6084,11 @@ rd_kafka_conf_interceptor_add_on_new ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_destroy ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_destroy_t *on_destroy, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_destroy_t *on_destroy, + void *ic_opaque); /** @@ -5858,10 +6104,10 @@ rd_kafka_interceptor_add_on_destroy ( * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_send ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_send_t *on_send, - void *ic_opaque); +rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_send_t *on_send, + void *ic_opaque); /** * @brief Append an on_acknowledgement() interceptor. @@ -5875,11 +6121,11 @@ rd_kafka_interceptor_add_on_send ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_acknowledgement ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, + void *ic_opaque); /** @@ -5894,11 +6140,11 @@ rd_kafka_interceptor_add_on_acknowledgement ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_consume ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_consume_t *on_consume, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_consume_t *on_consume, + void *ic_opaque); /** @@ -5913,11 +6159,11 @@ rd_kafka_interceptor_add_on_consume ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_commit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_commit_t *on_commit, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_commit_t *on_commit, + void *ic_opaque); /** @@ -5932,11 +6178,11 @@ rd_kafka_interceptor_add_on_commit ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_request_sent ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, + void *ic_opaque); /** @@ -5951,11 +6197,11 @@ rd_kafka_interceptor_add_on_request_sent ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_response_received ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_response_received_t *on_response_received, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_response_received_t *on_response_received, + void *ic_opaque); /** @@ -5970,11 +6216,11 @@ rd_kafka_interceptor_add_on_response_received ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_start ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, + void *ic_opaque); /** @@ -5989,11 +6235,11 @@ rd_kafka_interceptor_add_on_thread_start ( * if an existing intercepted with the same \p ic_name and function * has already been added to \p conf. */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_interceptor_add_on_thread_exit ( - rd_kafka_t *rk, const char *ic_name, - rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, - void *ic_opaque); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, + void *ic_opaque); @@ -6018,7 +6264,7 @@ rd_kafka_interceptor_add_on_thread_exit ( * @returns the error code for the given topic result. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres); /** * @returns the human readable error string for the given topic result, @@ -6027,7 +6273,7 @@ rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres); * @remark lifetime of the returned string is the same as the \p topicres. */ RD_EXPORT const char * -rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres); /** * @returns the name of the topic for the given topic result. @@ -6035,7 +6281,7 @@ rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres); * */ RD_EXPORT const char * -rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); +rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres); /** * @brief Group result provides per-group operation result information. @@ -6047,7 +6293,7 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres); * @remark lifetime of the returned error is the same as the \p groupres. */ RD_EXPORT const rd_kafka_error_t * -rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres); /** * @returns the name of the group for the given group result. @@ -6055,7 +6301,7 @@ rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres); * */ RD_EXPORT const char * -rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); /** * @returns the partitions/offsets for the given group result, if applicable @@ -6063,7 +6309,7 @@ rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres); * @remark lifetime of the returned list is the same as the \p groupres. */ RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres); +rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); /**@}*/ @@ -6102,7 +6348,7 @@ rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres); * Locally triggered errors: * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not * become available in the time allowed by AdminOption_set_request_timeout. - */ + */ /** @@ -6123,7 +6369,10 @@ typedef enum rd_kafka_admin_op_t { RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ /** DeleteConsumerGroupOffsets */ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ + RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ + RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ } rd_kafka_admin_op_t; /** @@ -6160,13 +6409,13 @@ typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; * an unknown API op type. */ RD_EXPORT rd_kafka_AdminOptions_t * -rd_kafka_AdminOptions_new (rd_kafka_t *rk, rd_kafka_admin_op_t for_api); +rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api); /** * @brief Destroy a AdminOptions object. */ -RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); +RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); /** @@ -6188,9 +6437,10 @@ RD_EXPORT void rd_kafka_AdminOptions_destroy (rd_kafka_AdminOptions_t *options); * @remark This option is valid for all Admin API requests. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** @@ -6220,9 +6470,10 @@ rd_kafka_AdminOptions_set_request_timeout (rd_kafka_AdminOptions_t *options, * CreatePartitions, and DeleteRecords. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, - int timeout_ms, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, + int timeout_ms, + char *errstr, + size_t errstr_size); /** @@ -6244,9 +6495,10 @@ rd_kafka_AdminOptions_set_operation_timeout (rd_kafka_AdminOptions_t *options, * CreatePartitions, AlterConfigs. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, +rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, - char *errstr, size_t errstr_size); + char *errstr, + size_t errstr_size); /** @@ -6275,9 +6527,10 @@ rd_kafka_AdminOptions_set_validate_only (rd_kafka_AdminOptions_t *options, * does not know where to send. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, - int32_t broker_id, - char *errstr, size_t errstr_size); +rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, + int32_t broker_id, + char *errstr, + size_t errstr_size); @@ -6286,11 +6539,8 @@ rd_kafka_AdminOptions_set_broker (rd_kafka_AdminOptions_t *options, * result event using rd_kafka_event_opaque() */ RD_EXPORT void -rd_kafka_AdminOptions_set_opaque (rd_kafka_AdminOptions_t *options, - void *ev_opaque); - - - +rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, + void *ev_opaque); @@ -6324,17 +6574,17 @@ typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t; * are invalid. * Use rd_kafka_NewTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_NewTopic_t * -rd_kafka_NewTopic_new (const char *topic, int num_partitions, - int replication_factor, - char *errstr, size_t errstr_size); +RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, + int num_partitions, + int replication_factor, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewTopic object previously created with * rd_kafka_NewTopic_new() */ -RD_EXPORT void -rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); +RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic); /** @@ -6342,9 +6592,8 @@ rd_kafka_NewTopic_destroy (rd_kafka_NewTopic_t *new_topic); * array (of \p new_topic_cnt elements). * The array itself is not freed. */ -RD_EXPORT void -rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt); +RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt); /** @@ -6369,11 +6618,12 @@ rd_kafka_NewTopic_destroy_array (rd_kafka_NewTopic_t **new_topics, * @sa rd_kafka_AdminOptions_set_validate_only() */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, - int32_t partition, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, + int32_t partition, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** * @brief Set (broker-side) topic configuration name/value pair. @@ -6388,8 +6638,9 @@ rd_kafka_NewTopic_set_replica_assignment (rd_kafka_NewTopic_t *new_topic, * @sa http://kafka.apache.org/documentation.html#topicconfigs */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, - const char *name, const char *value); +rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, + const char *name, + const char *value); /** @@ -6410,12 +6661,11 @@ rd_kafka_NewTopic_set_config (rd_kafka_NewTopic_t *new_topic, * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT */ -RD_EXPORT void -rd_kafka_CreateTopics (rd_kafka_t *rk, - rd_kafka_NewTopic_t **new_topics, - size_t new_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk, + rd_kafka_NewTopic_t **new_topics, + size_t new_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -6430,12 +6680,9 @@ rd_kafka_CreateTopics (rd_kafka_t *rk, * @param result Result to get topics from. * @param cntp Updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreateTopics_result_topics ( - const rd_kafka_CreateTopics_result_t *result, - size_t *cntp); - - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics( + const rd_kafka_CreateTopics_result_t *result, + size_t *cntp); @@ -6456,15 +6703,13 @@ typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t; * @returns a new allocated DeleteTopic object. * Use rd_kafka_DeleteTopic_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteTopic_t * -rd_kafka_DeleteTopic_new (const char *topic); +RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic); /** * @brief Destroy and free a DeleteTopic object previously created with * rd_kafka_DeleteTopic_new() */ -RD_EXPORT void -rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); +RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic); /** * @brief Helper function to destroy all DeleteTopic objects in @@ -6472,8 +6717,8 @@ rd_kafka_DeleteTopic_destroy (rd_kafka_DeleteTopic_t *del_topic); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt); +rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt); /** * @brief Delete topics from cluster as specified by the \p topics @@ -6489,11 +6734,11 @@ rd_kafka_DeleteTopic_destroy_array (rd_kafka_DeleteTopic_t **del_topics, * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT */ RD_EXPORT -void rd_kafka_DeleteTopics (rd_kafka_t *rk, - rd_kafka_DeleteTopic_t **del_topics, - size_t del_topic_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteTopics(rd_kafka_t *rk, + rd_kafka_DeleteTopic_t **del_topics, + size_t del_topic_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -6509,13 +6754,9 @@ void rd_kafka_DeleteTopics (rd_kafka_t *rk, * @param result Result to get topic results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_DeleteTopics_result_topics ( - const rd_kafka_DeleteTopics_result_t *result, - size_t *cntp); - - - +RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( + const rd_kafka_DeleteTopics_result_t *result, + size_t *cntp); @@ -6544,15 +6785,17 @@ typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t; * Use rd_kafka_NewPartitions_destroy() to free object when done. */ RD_EXPORT rd_kafka_NewPartitions_t * -rd_kafka_NewPartitions_new (const char *topic, size_t new_total_cnt, - char *errstr, size_t errstr_size); +rd_kafka_NewPartitions_new(const char *topic, + size_t new_total_cnt, + char *errstr, + size_t errstr_size); /** * @brief Destroy and free a NewPartitions object previously created with * rd_kafka_NewPartitions_new() */ RD_EXPORT void -rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); +rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts); /** * @brief Helper function to destroy all NewPartitions objects in the @@ -6560,8 +6803,8 @@ rd_kafka_NewPartitions_destroy (rd_kafka_NewPartitions_t *new_parts); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt); +rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt); /** * @brief Set the replica (broker id) assignment for \p new_partition_idx to the @@ -6585,13 +6828,13 @@ rd_kafka_NewPartitions_destroy_array (rd_kafka_NewPartitions_t **new_parts, * * @sa rd_kafka_AdminOptions_set_validate_only() */ -RD_EXPORT rd_kafka_resp_err_t -rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_parts, - int32_t new_partition_idx, - int32_t *broker_ids, - size_t broker_id_cnt, - char *errstr, - size_t errstr_size); +RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment( + rd_kafka_NewPartitions_t *new_parts, + int32_t new_partition_idx, + int32_t *broker_ids, + size_t broker_id_cnt, + char *errstr, + size_t errstr_size); /** @@ -6612,12 +6855,11 @@ rd_kafka_NewPartitions_set_replica_assignment (rd_kafka_NewPartitions_t *new_par * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT */ -RD_EXPORT void -rd_kafka_CreatePartitions (rd_kafka_t *rk, - rd_kafka_NewPartitions_t **new_parts, - size_t new_parts_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk, + rd_kafka_NewPartitions_t **new_parts, + size_t new_parts_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -6634,11 +6876,9 @@ rd_kafka_CreatePartitions (rd_kafka_t *rk, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_topic_result_t ** -rd_kafka_CreatePartitions_result_topics ( - const rd_kafka_CreatePartitions_result_t *result, - size_t *cntp); - - +rd_kafka_CreatePartitions_result_topics( + const rd_kafka_CreatePartitions_result_t *result, + size_t *cntp); @@ -6683,7 +6923,7 @@ typedef enum rd_kafka_ConfigSource_t { * @returns a string representation of the \p confsource. */ RD_EXPORT const char * -rd_kafka_ConfigSource_name (rd_kafka_ConfigSource_t confsource); +rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource); /*! Apache Kafka configuration entry. */ @@ -6693,27 +6933,27 @@ typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t; * @returns the configuration property name */ RD_EXPORT const char * -rd_kafka_ConfigEntry_name (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry); /** * @returns the configuration value, may be NULL for sensitive or unset * properties. */ RD_EXPORT const char * -rd_kafka_ConfigEntry_value (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry); /** * @returns the config source. */ RD_EXPORT rd_kafka_ConfigSource_t -rd_kafka_ConfigEntry_source (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is read-only on the broker, else 0. * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property is set to its default value on the broker, @@ -6721,7 +6961,7 @@ rd_kafka_ConfigEntry_is_read_only (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if the config property contains sensitive information (such as @@ -6731,13 +6971,13 @@ rd_kafka_ConfigEntry_is_default (const rd_kafka_ConfigEntry_t *entry); * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_sensitive (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry); /** * @returns 1 if this entry is a synonym, else 0. */ RD_EXPORT int -rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); +rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry); /** @@ -6751,27 +6991,53 @@ rd_kafka_ConfigEntry_is_synonym (const rd_kafka_ConfigEntry_t *entry); * otherwise returns NULL. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigEntry_synonyms (const rd_kafka_ConfigEntry_t *entry, - size_t *cntp); - +rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, + size_t *cntp); -/*! Apache Kafka resource types */ +/** + * @enum rd_kafka_ResourceType_t + * @brief Apache Kafka resource types + */ typedef enum rd_kafka_ResourceType_t { RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ - RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ - RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ - RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ + RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ + RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ + RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ + RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ } rd_kafka_ResourceType_t; +/** + * @enum rd_kafka_ResourcePatternType_t + * @brief Apache Kafka pattern types + */ +typedef enum rd_kafka_ResourcePatternType_t { + /** Unknown */ + RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0, + /** Any (used for lookups) */ + RD_KAFKA_RESOURCE_PATTERN_ANY = 1, + /** Match: will perform pattern matching */ + RD_KAFKA_RESOURCE_PATTERN_MATCH = 2, + /** Literal: A literal resource name */ + RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3, + /** Prefixed: A prefixed resource name */ + RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4, + RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT, +} rd_kafka_ResourcePatternType_t; + +/** + * @returns a string representation of the \p resource_pattern_type + */ +RD_EXPORT const char *rd_kafka_ResourcePatternType_name( + rd_kafka_ResourcePatternType_t resource_pattern_type); + /** * @returns a string representation of the \p restype */ RD_EXPORT const char * -rd_kafka_ResourceType_name (rd_kafka_ResourceType_t restype); +rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype); /*! Apache Kafka configuration resource. */ typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; @@ -6786,15 +7052,15 @@ typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t; * @returns a newly allocated object */ RD_EXPORT rd_kafka_ConfigResource_t * -rd_kafka_ConfigResource_new (rd_kafka_ResourceType_t restype, - const char *resname); +rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, + const char *resname); /** * @brief Destroy and free a ConfigResource object previously created with * rd_kafka_ConfigResource_new() */ RD_EXPORT void -rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config); /** @@ -6803,8 +7069,8 @@ rd_kafka_ConfigResource_destroy (rd_kafka_ConfigResource_t *config); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, - size_t config_cnt); +rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, + size_t config_cnt); /** @@ -6821,8 +7087,9 @@ rd_kafka_ConfigResource_destroy_array (rd_kafka_ConfigResource_t **config, * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, - const char *name, const char *value); +rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, + const char *name, + const char *value); /** @@ -6834,8 +7101,8 @@ rd_kafka_ConfigResource_set_config (rd_kafka_ConfigResource_t *config, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigEntry_t ** -rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, - size_t *cntp); +rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, + size_t *cntp); @@ -6843,26 +7110,26 @@ rd_kafka_ConfigResource_configs (const rd_kafka_ConfigResource_t *config, * @returns the ResourceType for \p config */ RD_EXPORT rd_kafka_ResourceType_t -rd_kafka_ConfigResource_type (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config); /** * @returns the name for \p config */ RD_EXPORT const char * -rd_kafka_ConfigResource_name (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config); /** * @returns the error for this resource from an AlterConfigs request */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_ConfigResource_error (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config); /** * @returns the error string for this resource from an AlterConfigs * request, or NULL if no error. */ RD_EXPORT const char * -rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); +rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); /* @@ -6892,11 +7159,11 @@ rd_kafka_ConfigResource_error_string (const rd_kafka_ConfigResource_t *config); * */ RD_EXPORT -void rd_kafka_AlterConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_AlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -6918,12 +7185,9 @@ void rd_kafka_AlterConfigs (rd_kafka_t *rk, * @returns an array of ConfigResource elements, or NULL if not available. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_AlterConfigs_result_resources ( - const rd_kafka_AlterConfigs_result_t *result, - size_t *cntp); - - - +rd_kafka_AlterConfigs_result_resources( + const rd_kafka_AlterConfigs_result_t *result, + size_t *cntp); @@ -6958,12 +7222,11 @@ rd_kafka_AlterConfigs_result_resources ( * in the resource. */ RD_EXPORT -void rd_kafka_DescribeConfigs (rd_kafka_t *rk, - rd_kafka_ConfigResource_t **configs, - size_t config_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); - +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -6980,9 +7243,9 @@ void rd_kafka_DescribeConfigs (rd_kafka_t *rk, * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_ConfigResource_t ** -rd_kafka_DescribeConfigs_result_resources ( - const rd_kafka_DescribeConfigs_result_t *result, - size_t *cntp); +rd_kafka_DescribeConfigs_result_resources( + const rd_kafka_DescribeConfigs_result_t *result, + size_t *cntp); /* @@ -7010,16 +7273,15 @@ typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t; * @returns a new allocated DeleteRecords object. * Use rd_kafka_DeleteRecords_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteRecords_t * -rd_kafka_DeleteRecords_new (const rd_kafka_topic_partition_list_t * - before_offsets); +RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new( + const rd_kafka_topic_partition_list_t *before_offsets); /** * @brief Destroy and free a DeleteRecords object previously created with * rd_kafka_DeleteRecords_new() */ RD_EXPORT void -rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records); +rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records); /** * @brief Helper function to destroy all DeleteRecords objects in @@ -7027,8 +7289,8 @@ rd_kafka_DeleteRecords_destroy (rd_kafka_DeleteRecords_t *del_records); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt); +rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt); /** * @brief Delete records (messages) in topic partitions older than the @@ -7051,12 +7313,11 @@ rd_kafka_DeleteRecords_destroy_array (rd_kafka_DeleteRecords_t **del_records, * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT */ -RD_EXPORT void -rd_kafka_DeleteRecords (rd_kafka_t *rk, - rd_kafka_DeleteRecords_t **del_records, - size_t del_record_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk, + rd_kafka_DeleteRecords_t **del_records, + size_t del_record_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /* @@ -7073,7 +7334,7 @@ rd_kafka_DeleteRecords (rd_kafka_t *rk, * The returned object's life-time is the same as the \p result object. */ RD_EXPORT const rd_kafka_topic_partition_list_t * -rd_kafka_DeleteRecords_result_offsets ( +rd_kafka_DeleteRecords_result_offsets( const rd_kafka_DeleteRecords_result_t *result); /* @@ -7094,15 +7355,13 @@ typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; * @returns a new allocated DeleteGroup object. * Use rd_kafka_DeleteGroup_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteGroup_t * -rd_kafka_DeleteGroup_new (const char *group); +RD_EXPORT rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); /** * @brief Destroy and free a DeleteGroup object previously created with * rd_kafka_DeleteGroup_new() */ -RD_EXPORT void -rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group); +RD_EXPORT void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); /** * @brief Helper function to destroy all DeleteGroup objects in @@ -7110,8 +7369,8 @@ rd_kafka_DeleteGroup_destroy (rd_kafka_DeleteGroup_t *del_group); * The array itself is not freed. */ RD_EXPORT void -rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt); +rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt); /** * @brief Delete groups from cluster as specified by the \p del_groups @@ -7127,11 +7386,11 @@ rd_kafka_DeleteGroup_destroy_array (rd_kafka_DeleteGroup_t **del_groups, * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT */ RD_EXPORT -void rd_kafka_DeleteGroups (rd_kafka_t *rk, - rd_kafka_DeleteGroup_t **del_groups, - size_t del_group_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteGroups(rd_kafka_t *rk, + rd_kafka_DeleteGroup_t **del_groups, + size_t del_group_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -7147,10 +7406,9 @@ void rd_kafka_DeleteGroups (rd_kafka_t *rk, * @param result Result to get group results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteGroups_result_groups ( - const rd_kafka_DeleteGroups_result_t *result, - size_t *cntp); +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); /* @@ -7161,7 +7419,7 @@ rd_kafka_DeleteGroups_result_groups ( /*! Represents consumer group committed offsets to be deleted. */ typedef struct rd_kafka_DeleteConsumerGroupOffsets_s -rd_kafka_DeleteConsumerGroupOffsets_t; + rd_kafka_DeleteConsumerGroupOffsets_t; /** * @brief Create a new DeleteConsumerGroupOffsets object. @@ -7176,27 +7434,25 @@ rd_kafka_DeleteConsumerGroupOffsets_t; * object when done. */ RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t * -rd_kafka_DeleteConsumerGroupOffsets_new (const char *group, - const rd_kafka_topic_partition_list_t - *partitions); +rd_kafka_DeleteConsumerGroupOffsets_new( + const char *group, + const rd_kafka_topic_partition_list_t *partitions); /** * @brief Destroy and free a DeleteConsumerGroupOffsets object previously * created with rd_kafka_DeleteConsumerGroupOffsets_new() */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy ( - rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy( + rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets); /** * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements). * The array itself is not freed. */ -RD_EXPORT void -rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffset_cnt); +RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffset_cnt); /** * @brief Delete committed offsets for a set of partitions in a conusmer @@ -7217,12 +7473,12 @@ rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( * @remark The current implementation only supports one group per invocation. */ RD_EXPORT -void rd_kafka_DeleteConsumerGroupOffsets ( - rd_kafka_t *rk, - rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, - size_t del_grpoffsets_cnt, - const rd_kafka_AdminOptions_t *options, - rd_kafka_queue_t *rkqu); +void rd_kafka_DeleteConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, + size_t del_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); @@ -7239,13 +7495,362 @@ void rd_kafka_DeleteConsumerGroupOffsets ( * @param cntp is updated to the number of elements in the array. */ RD_EXPORT const rd_kafka_group_result_t ** -rd_kafka_DeleteConsumerGroupOffsets_result_groups ( - const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, - size_t *cntp); +rd_kafka_DeleteConsumerGroupOffsets_result_groups( + const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, + size_t *cntp); + +/** + * @brief ACL Binding is used to create access control lists. + * + * + */ +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; +/** + * @brief ACL Binding filter is used to filter access control lists. + * + */ +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; + +/** + * @returns the error object for the given acl result, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); + + +/** + * @name AclOperation + * @{ + */ + +/** + * @enum rd_kafka_AclOperation_t + * @brief Apache Kafka ACL operation types. + */ +typedef enum rd_kafka_AclOperation_t { + RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_OPERATION_ANY = + 1, /**< In a filter, matches any AclOperation */ + RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ + RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ + RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ + RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ + RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ + RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ + RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = + 9, /**< CLUSTER_ACTION operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = + 10, /**< DESCRIBE_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = + 11, /**< ALTER_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = + 12, /**< IDEMPOTENT_WRITE operation */ + RD_KAFKA_ACL_OPERATION__CNT +} rd_kafka_AclOperation_t; + +/** + * @returns a string representation of the \p acl_operation + */ +RD_EXPORT const char * +rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); /**@}*/ +/** + * @name AclPermissionType + * @{ + */ + +/** + * @enum rd_kafka_AclPermissionType_t + * @brief Apache Kafka ACL permission types. + */ +typedef enum rd_kafka_AclPermissionType_t { + RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_PERMISSION_TYPE_ANY = + 1, /**< In a filter, matches any AclPermissionType */ + RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */ + RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */ + RD_KAFKA_ACL_PERMISSION_TYPE__CNT +} rd_kafka_AclPermissionType_t; + +/** + * @returns a string representation of the \p acl_permission_type + */ +RD_EXPORT const char *rd_kafka_AclPermissionType_name( + rd_kafka_AclPermissionType_t acl_permission_type); + +/**@}*/ + +/** + * @brief Create a new AclBinding object. This object is later passed to + * rd_kafka_CreateAcls(). + * + * @param restype The ResourceType. + * @param name The resource name. + * @param resource_pattern_type The pattern type. + * @param principal A principal, following the kafka specification. + * @param host An hostname or ip. + * @param operation A Kafka operation. + * @param permission_type A Kafka permission type. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBinding object, or NULL if the input parameters + * are invalid. + * Use rd_kafka_AclBinding_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_AclBinding_t * +rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @brief Create a new AclBindingFilter object. This object is later passed to + * rd_kafka_DescribeAcls() or + * rd_kafka_DeletesAcls() in order to filter + * the acls to retrieve or to delete. + * Use the same rd_kafka_AclBinding functions to query or destroy it. + * + * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if + * not filtering by this field. + * @param name The resource name or NULL if not filtering by this field. + * @param resource_pattern_type The pattern type or \c + * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field. + * @param principal A principal or NULL if not filtering by this field. + * @param host An hostname or ip or NULL if not filtering by this field. + * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not + * filtering by this field. + * @param permission_type A Kafka permission type or \c + * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field. + * @param errstr An error string for returning errors or NULL to not use it. + * @param errstr_size The \p errstr size or 0 to not use it. + * + * @returns a new allocated AclBindingFilter object, or NULL if the input + * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when + * done. + */ +RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new( + rd_kafka_ResourceType_t restype, + const char *name, + rd_kafka_ResourcePatternType_t resource_pattern_type, + const char *principal, + const char *host, + rd_kafka_AclOperation_t operation, + rd_kafka_AclPermissionType_t permission_type, + char *errstr, + size_t errstr_size); + +/** + * @returns the resource type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourceType_t +rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource name for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the principal for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the host for the given acl binding. + * + * @remark lifetime of the returned string is the same as the \p acl. + */ +RD_EXPORT const char * +rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the acl operation for the given acl binding. + */ +RD_EXPORT rd_kafka_AclOperation_t +rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the permission type for the given acl binding. + */ +RD_EXPORT rd_kafka_AclPermissionType_t +rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the resource pattern type for the given acl binding. + */ +RD_EXPORT rd_kafka_ResourcePatternType_t +rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl); + +/** + * @returns the error object for the given acl binding, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl); + + +/** + * @brief Destroy and free an AclBinding object previously created with + * rd_kafka_AclBinding_new() + */ +RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding); + + +/** + * @brief Helper function to destroy all AclBinding objects in + * the \p acl_bindings array (of \p acl_bindings_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void +rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, + size_t acl_bindings_cnt); + +/** + * @brief Get an array of acl results from a CreateAcls result. + * + * The returned \p acl result life-time is the same as the \p result object. + * @param result CreateAcls result to get acl results from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_acl_result_t ** +rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, + size_t *cntp); + +/** + * @brief Create acls as specified by the \p new_acls + * array of size \p new_topic_cnt elements. + * + * @param rk Client instance. + * @param new_acls Array of new acls to create. + * @param new_acls_cnt Number of elements in \p new_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_CREATEACLS_RESULT + */ +RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk, + rd_kafka_AclBinding_t **new_acls, + size_t new_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @section DescribeAcls - describe access control lists. + * + * + */ + +/** + * @brief Get an array of resource results from a DescribeAcls result. + * + * The returned \p resources life-time is the same as the \p result object. + * @param result DescribeAcls result to get acls from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, + size_t *cntp); + +/** + * @brief Describe acls matching the filter provided in \p acl_filter + * + * @param rk Client instance. + * @param acl_filter Filter for the returned acls. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t *acl_filter, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @section DeleteAcls - delete access control lists. + * + * + */ + +typedef struct rd_kafka_DeleteAcls_result_response_s + rd_kafka_DeleteAcls_result_response_t; + +/** + * @brief Get an array of DeleteAcls result responses from a DeleteAcls result. + * + * The returned \p responses life-time is the same as the \p result object. + * @param result DeleteAcls result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT const rd_kafka_DeleteAcls_result_response_t ** +rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, + size_t *cntp); + +/** + * @returns the error object for the given DeleteAcls result response, + * or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error( + const rd_kafka_DeleteAcls_result_response_t *result_response); + + +/** + * @returns the matching acls array for the given DeleteAcls result response. + * + * @remark lifetime of the returned acl bindings is the same as the \p + * result_response. + */ +RD_EXPORT const rd_kafka_AclBinding_t ** +rd_kafka_DeleteAcls_result_response_matching_acls( + const rd_kafka_DeleteAcls_result_response_t *result_response, + size_t *matching_acls_cntp); + +/** + * @brief Delete acls matching the filteres provided in \p del_acls + * array of size \p del_acls_cnt. + * + * @param rk Client instance. + * @param del_acls Filters for the acls to delete. + * @param del_acls_cnt Number of elements in \p del_acls array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 0 + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DELETEACLS_RESULT + */ +RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk, + rd_kafka_AclBindingFilter_t **del_acls, + size_t del_acls_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ /** * @name Security APIs @@ -7299,12 +7904,14 @@ rd_kafka_DeleteConsumerGroupOffsets_result_groups ( */ RD_EXPORT rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, - const char *token_value, - int64_t md_lifetime_ms, - const char *md_principal_name, - const char **extensions, size_t extension_size, - char *errstr, size_t errstr_size); +rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, + const char *token_value, + int64_t md_lifetime_ms, + const char *md_principal_name, + const char **extensions, + size_t extension_size, + char *errstr, + size_t errstr_size); /** * @brief SASL/OAUTHBEARER token refresh failure indicator. @@ -7327,8 +7934,8 @@ rd_kafka_oauthbearer_set_token (rd_kafka_t *rk, * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb */ RD_EXPORT -rd_kafka_resp_err_t -rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, + const char *errstr); /**@}*/ @@ -7511,8 +8118,7 @@ rd_kafka_oauthbearer_set_token_failure (rd_kafka_t *rk, const char *errstr); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms); @@ -7561,7 +8167,7 @@ rd_kafka_init_transactions (rd_kafka_t *rk, int timeout_ms); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk); +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk); /** @@ -7629,12 +8235,11 @@ rd_kafka_error_t *rd_kafka_begin_transaction (rd_kafka_t *rk); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_send_offsets_to_transaction ( - rd_kafka_t *rk, - const rd_kafka_topic_partition_list_t *offsets, - const rd_kafka_consumer_group_metadata_t *cgmetadata, - int timeout_ms); +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( + rd_kafka_t *rk, + const rd_kafka_topic_partition_list_t *offsets, + const rd_kafka_consumer_group_metadata_t *cgmetadata, + int timeout_ms); /** @@ -7701,8 +8306,7 @@ rd_kafka_send_offsets_to_transaction ( * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms); /** @@ -7761,8 +8365,7 @@ rd_kafka_commit_transaction (rd_kafka_t *rk, int timeout_ms); * rd_kafka_error_destroy(). */ RD_EXPORT -rd_kafka_error_t * -rd_kafka_abort_transaction (rd_kafka_t *rk, int timeout_ms); +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms); /**@}*/ diff --git a/kafka/librdkafka_vendor/rdkafka_mock.h b/kafka/librdkafka_vendor/rdkafka_mock.h new file mode 100644 index 000000000..099280078 --- /dev/null +++ b/kafka/librdkafka_vendor/rdkafka_mock.h @@ -0,0 +1,331 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2019 Magnus Edenhill + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_MOCK_H_ +#define _RDKAFKA_MOCK_H_ + +#ifndef _RDKAFKA_H_ +#error "rdkafka_mock.h must be included after rdkafka.h" +#endif + +#ifdef __cplusplus +extern "C" { +#if 0 +} /* Restore indent */ +#endif +#endif + + +/** + * @name Mock cluster + * + * Provides a mock Kafka cluster with a configurable number of brokers + * that support a reasonable subset of Kafka protocol operations, + * error injection, etc. + * + * There are two ways to use the mock clusters, the most simple approach + * is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t + * in an existing application, which will replace the configured + * `bootstrap.servers` with the mock cluster brokers. + * This approach is convenient to easily test existing applications. + * + * The second approach is to explicitly create a mock cluster on an + * rd_kafka_t instance by using rd_kafka_mock_cluster_new(). + * + * Mock clusters provide localhost listeners that can be used as the bootstrap + * servers by multiple rd_kafka_t instances. + * + * Currently supported functionality: + * - Producer + * - Idempotent Producer + * - Transactional Producer + * - Low-level consumer + * - High-level balanced consumer groups with offset commits + * - Topic Metadata and auto creation + * + * @remark High-level consumers making use of the balanced consumer groups + * are not supported. + * + * @remark This is an experimental public API that is NOT covered by the + * librdkafka API or ABI stability guarantees. + * + * + * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. + * + * @{ + */ + +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t; + + +/** + * @brief Create new mock cluster with \p broker_cnt brokers. + * + * The broker ids will start at 1 up to and including \p broker_cnt. + * + * The \p rk instance is required for internal book keeping but continues + * to operate as usual. + */ +RD_EXPORT +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new (rd_kafka_t *rk, + int broker_cnt); + + +/** + * @brief Destroy mock cluster. + */ +RD_EXPORT +void rd_kafka_mock_cluster_destroy (rd_kafka_mock_cluster_t *mcluster); + + + +/** + * @returns the rd_kafka_t instance for a cluster as passed to + * rd_kafka_mock_cluster_new(). + */ +RD_EXPORT rd_kafka_t * +rd_kafka_mock_cluster_handle (const rd_kafka_mock_cluster_t *mcluster); + + +/** + * @returns the rd_kafka_mock_cluster_t instance as created by + * setting the `test.mock.num.brokers` configuration property, + * or NULL if no such instance. + */ +RD_EXPORT rd_kafka_mock_cluster_t * +rd_kafka_handle_mock_cluster (const rd_kafka_t *rk); + + + +/** + * @returns the mock cluster's bootstrap.servers list + */ +RD_EXPORT const char * +rd_kafka_mock_cluster_bootstraps (const rd_kafka_mock_cluster_t *mcluster); + + +/** + * @brief Clear the cluster's error state for the given \p ApiKey. + */ +RD_EXPORT +void rd_kafka_mock_clear_request_errors (rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey); + + +/** + * @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's + * error stack for the given \p ApiKey. + * + * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * + * The following \p cnt protocol requests matching \p ApiKey will fail with the + * provided error code and removed from the stack, starting with + * the first error code, then the second, etc. + * + * Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker + * disconnect the client which can be useful to trigger a disconnect on certain + * requests. + */ +RD_EXPORT +void rd_kafka_mock_push_request_errors (rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, size_t cnt, ...); + + +/** + * @brief Same as rd_kafka_mock_push_request_errors() but takes + * an array of errors. + */ +RD_EXPORT void +rd_kafka_mock_push_request_errors_array (rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + size_t cnt, + const rd_kafka_resp_err_t *errors); + + +/** + * @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto + * the broker's error stack for the given \p ApiKey. + * + * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0). + * + * Each entry is a tuple of: + * rd_kafka_resp_err_t err - error to return (or 0) + * int rtt_ms - response RTT/delay in milliseconds (or 0) + * + * The following \p cnt protocol requests matching \p ApiKey will fail with the + * provided error code and removed from the stack, starting with + * the first error code, then the second, etc. + * + * @remark The broker errors take precedence over the cluster errors. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_push_request_error_rtts (rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, + int16_t ApiKey, size_t cnt, ...); + + +/** + * @brief Set the topic error to return in protocol requests. + * + * Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest. + */ +RD_EXPORT +void rd_kafka_mock_topic_set_error (rd_kafka_mock_cluster_t *mcluster, + const char *topic, + rd_kafka_resp_err_t err); + + +/** + * @brief Creates a topic. + * + * This is an alternative to automatic topic creation as performed by + * the client itself. + * + * @remark The Topic Admin API (CreateTopics) is not supported by the + * mock broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_topic_create (rd_kafka_mock_cluster_t *mcluster, + const char *topic, int partition_cnt, + int replication_factor); + + +/** + * @brief Sets the partition leader. + * + * The topic will be created if it does not exist. + * + * \p broker_id needs to be an existing broker, or -1 to make the + * partition leader-less. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_leader (rd_kafka_mock_cluster_t *mcluster, + const char *topic, int32_t partition, + int32_t broker_id); + +/** + * @brief Sets the partition's preferred replica / follower. + * + * The topic will be created if it does not exist. + * + * \p broker_id does not need to point to an existing broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower (rd_kafka_mock_cluster_t *mcluster, + const char *topic, int32_t partition, + int32_t broker_id); + +/** + * @brief Sets the partition's preferred replica / follower low and high + * watermarks. + * + * The topic will be created if it does not exist. + * + * Setting an offset to -1 will revert back to the leader's corresponding + * watermark. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_partition_set_follower_wmarks (rd_kafka_mock_cluster_t *mcluster, + const char *topic, + int32_t partition, + int64_t lo, int64_t hi); + + +/** + * @brief Disconnects the broker and disallows any new connections. + * This does NOT trigger leader change. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_down (rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); + +/** + * @brief Makes the broker accept connections again. + * This does NOT trigger leader change. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_up (rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id); + + +/** + * @brief Set broker round-trip-time delay in milliseconds. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rtt (rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, int rtt_ms); + +/** + * @brief Sets the broker's rack as reported in Metadata to the client. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_broker_set_rack (rd_kafka_mock_cluster_t *mcluster, + int32_t broker_id, const char *rack); + + + +/** + * @brief Explicitly sets the coordinator. If this API is not a standard + * hashing scheme will be used. + * + * @param key_type "transaction" or "group" + * @param key The transactional.id or group.id + * @param broker_id The new coordinator, does not have to be a valid broker. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_coordinator_set (rd_kafka_mock_cluster_t *mcluster, + const char *key_type, const char *key, + int32_t broker_id); + + + +/** + * @brief Set the allowed ApiVersion range for \p ApiKey. + * + * Set \p MinVersion and \p MaxVersion to -1 to disable the API + * completely. + * + * \p MaxVersion MUST not exceed the maximum implemented value, + * see rdkafka_mock_handlers.c. + * + * @param ApiKey Protocol request type/key + * @param MinVersion Minimum version supported (or -1 to disable). + * @param MinVersion Maximum version supported (or -1 to disable). + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_set_apiversion (rd_kafka_mock_cluster_t *mcluster, + int16_t ApiKey, + int16_t MinVersion, int16_t MaxVersion); + + +/**@}*/ + +#ifdef __cplusplus +} +#endif +#endif /* _RDKAFKA_MOCK_H_ */ \ No newline at end of file diff --git a/kafka/message_test.go b/kafka/message_test.go index e868dcc7a..39b071849 100644 --- a/kafka/message_test.go +++ b/kafka/message_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" ) diff --git a/kafka/metadata_test.go b/kafka/metadata_test.go index 96e8a9141..de2f60ba8 100644 --- a/kafka/metadata_test.go +++ b/kafka/metadata_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" ) diff --git a/kafka/misc.go b/kafka/misc.go index 6d602ce77..e47a188c5 100644 --- a/kafka/misc.go +++ b/kafka/misc.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import "C" // bool2int converts a bool to a C.int (1 or 0) diff --git a/kafka/mockcluster.go b/kafka/mockcluster.go new file mode 100644 index 000000000..613589149 --- /dev/null +++ b/kafka/mockcluster.go @@ -0,0 +1,84 @@ +package kafka + +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import "unsafe" + +/* +#include +#include "select_rdkafka.h" +#include "glue_rdkafka.h" +*/ +import "C" + +// MockCluster represents a Kafka mock cluster instance which can be used +// for testing. +type MockCluster struct { + rk *C.rd_kafka_t + mcluster *C.rd_kafka_mock_cluster_t +} + +// NewMockCluster provides a mock Kafka cluster with a configurable +// number of brokers that support a reasonable subset of Kafka protocol +// operations, error injection, etc. +// +// Mock clusters provide localhost listeners that can be used as the bootstrap +// servers by multiple Kafka client instances. +// +// Currently supported functionality: +// - Producer +// - Idempotent Producer +// - Transactional Producer +// - Low-level consumer +// - High-level balanced consumer groups with offset commits +// - Topic Metadata and auto creation +// +// Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +func NewMockCluster(brokerCount int) (*MockCluster, error) { + + mc := &MockCluster{} + + cErrstr := (*C.char)(C.malloc(C.size_t(512))) + defer C.free(unsafe.Pointer(cErrstr)) + + cConf := C.rd_kafka_conf_new() + + mc.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if mc.rk == nil { + C.rd_kafka_conf_destroy(cConf) + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + mc.mcluster = C.rd_kafka_mock_cluster_new(mc.rk, C.int(brokerCount)) + if mc.mcluster == nil { + C.rd_kafka_destroy(mc.rk) + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + return mc, nil +} + +// BootstrapServers returns the bootstrap.servers property for this MockCluster +func (mc *MockCluster) BootstrapServers() string { + return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster)) +} + +// Close and destroy the MockCluster +func (mc *MockCluster) Close() { + C.rd_kafka_mock_cluster_destroy(mc.mcluster) + C.rd_kafka_destroy(mc.rk) +} diff --git a/kafka/offset.go b/kafka/offset.go index 4cb1819c8..5d62b119d 100644 --- a/kafka/offset.go +++ b/kafka/offset.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2017 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "fmt" "strconv" diff --git a/kafka/producer.go b/kafka/producer.go index f68854a96..34ce2ece4 100644 --- a/kafka/producer.go +++ b/kafka/producer.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "context" "fmt" diff --git a/kafka/producer_performance_test.go b/kafka/producer_performance_test.go index f9473ed70..01f7920c1 100644 --- a/kafka/producer_performance_test.go +++ b/kafka/producer_performance_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "fmt" "strings" @@ -70,7 +70,7 @@ func producerPerfTest(b *testing.B, testname string, msgcnt int, withDr bool, ba "queue.buffering.max.messages": msgcnt, "api.version.request": "true", "broker.version.fallback": "0.9.0.1", - "acks": 1} + "acks": 1} conf.updateFromTestconf() diff --git a/kafka/producer_test.go b/kafka/producer_test.go index 1192406f6..19c395a3d 100644 --- a/kafka/producer_test.go +++ b/kafka/producer_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "bytes" "context" diff --git a/kafka/select_rdkafka.h b/kafka/select_rdkafka.h index 98fe330ae..3cfd095b2 100644 --- a/kafka/select_rdkafka.h +++ b/kafka/select_rdkafka.h @@ -24,6 +24,8 @@ #ifdef USE_VENDORED_LIBRDKAFKA #include "librdkafka_vendor/rdkafka.h" +#include "librdkafka_vendor/rdkafka_mock.h" #else #include +#include #endif diff --git a/kafka/testhelpers_test.go b/kafka/testhelpers_test.go index 4fca59e70..a2997d301 100644 --- a/kafka/testhelpers_test.go +++ b/kafka/testhelpers_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2016 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import ( "testing" "time" diff --git a/kafka/time.go b/kafka/time.go index ff93f0ad2..62b2a11d8 100644 --- a/kafka/time.go +++ b/kafka/time.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2019 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - import "C" import ( diff --git a/kafka/txn_integration_test.go b/kafka/txn_integration_test.go index 4cd298465..daea77b4e 100644 --- a/kafka/txn_integration_test.go +++ b/kafka/txn_integration_test.go @@ -1,3 +1,5 @@ +package kafka + /** * Copyright 2020 Confluent Inc. * @@ -14,8 +16,6 @@ * limitations under the License. */ -package kafka - // Integration tests for the transactional producer import ( diff --git a/kafkatest/go.mod b/kafkatest/go.mod new file mode 100644 index 000000000..504833c20 --- /dev/null +++ b/kafkatest/go.mod @@ -0,0 +1,13 @@ +module github.com/confluentinc/confluent-kafka-go/kafkatest + +go 1.13 + +replace github.com/confluentinc/confluent-kafka-go => ../ + +require ( + github.com/alecthomas/kingpin v2.2.6+incompatible + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/confluentinc/confluent-kafka-go v0.0.0-00010101000000-000000000000 + github.com/stretchr/testify v1.7.1 // indirect +) diff --git a/kafkatest/go.sum b/kafkatest/go.sum new file mode 100644 index 000000000..a5cd8082f --- /dev/null +++ b/kafkatest/go.sum @@ -0,0 +1,21 @@ +github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go b/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go index b705f4e0c..5c0079951 100644 --- a/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go +++ b/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go @@ -20,7 +20,7 @@ package main import ( "encoding/json" "fmt" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/alecthomas/kingpin" "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" diff --git a/kafkatest/go_verifiable_producer/go_verifiable_producer.go b/kafkatest/go_verifiable_producer/go_verifiable_producer.go index de6eaf930..8e1ed38ae 100644 --- a/kafkatest/go_verifiable_producer/go_verifiable_producer.go +++ b/kafkatest/go_verifiable_producer/go_verifiable_producer.go @@ -20,7 +20,7 @@ package main import ( "encoding/json" "fmt" - "gopkg.in/alecthomas/kingpin.v2" + "github.com/alecthomas/kingpin" "github.com/confluentinc/confluent-kafka-go/kafka" "os" "os/signal" diff --git a/mk/doc-gen.py b/mk/doc-gen.py index 6656ba482..323e6614b 100755 --- a/mk/doc-gen.py +++ b/mk/doc-gen.py @@ -5,15 +5,31 @@ # for inclusion in Confluent doc tree. -import subprocess, re +import subprocess +import re from bs4 import BeautifulSoup +def convert_path(url, base_url, after): + relative_path = url[url.rfind(after) + len(after):] + if relative_path == "style.css": + relative_path = "styles.css" + return f'{base_url}/{relative_path}' + + if __name__ == '__main__': + tag = "v1.9.0" + base_css = "https://go.dev/css" + base_js = "https://go.dev/js" + base_src = "https://github.com/confluentinc/" + \ + f"confluent-kafka-go/blob/{tag}" + base_pkg = "https://pkg.go.dev" + # Use godoc client to extract our package docs html_in = subprocess.check_output( - 'godoc -url=/pkg/github.com/confluentinc/confluent-kafka-go/kafka | egrep -v "^using (GOPATH|module) mode"', shell=True) + 'godoc -url=/pkg/github.com/confluentinc/confluent-kafka-go/kafka ' + + '| egrep -v "^using (GOPATH|module) mode"', shell=True) # Parse HTML soup = BeautifulSoup(html_in, 'html.parser') @@ -23,17 +39,24 @@ # Remove "Subdirectories" soup.find(id='pkg-subdirectories').decompose() - soup.find(attrs={'class':'pkg-dir'}).decompose() + soup.find(attrs={'class': 'pkg-dir'}).decompose() for t in soup.find_all(href='#pkg-subdirectories'): t.decompose() # Use golang.org for external resources (such as CSS and JS) + # Use github.com for source files for t in soup.find_all(href=re.compile(r'^/')): - t['href'] = '//golang.org' + t['href'] + href = t['href'] + if href.endswith(".css"): + t['href'] = convert_path(href, base_css, "/") + elif href.startswith("/src/"): + t['href'] = convert_path(href, base_src, "/confluent-kafka-go/") + elif href.startswith("/pkg/"): + t['href'] = convert_path(href, base_pkg, "/pkg/") for t in soup.find_all(src=re.compile(r'^/')): - t['src'] = '//golang.org' + t['src'] + if t['src'].endswith(".js"): + t['src'] = convert_path(t['src'], base_js, "/") # Write updated HTML to stdout print(soup.prettify()) - diff --git a/service.yml b/service.yml new file mode 100644 index 000000000..80f95640b --- /dev/null +++ b/service.yml @@ -0,0 +1,10 @@ +name: confluent-kafka-go +lang: go +lang_version: 1.18 +git: + enable: true +github: + enable: true + repo_name: confluentinc/confluent-kafka-go +semaphore: + enable: true diff --git a/soaktest/go.mod b/soaktest/go.mod new file mode 100644 index 000000000..b4412eec5 --- /dev/null +++ b/soaktest/go.mod @@ -0,0 +1,16 @@ +module github.com/confluentinc/confluent-kafka-go/soaktest + +go 1.13 + +replace github.com/confluentinc/confluent-kafka-go => ../ + +require ( + github.com/DataDog/datadog-go v4.8.3+incompatible + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/confluentinc/confluent-kafka-go v0.0.0-00010101000000-000000000000 + github.com/shirou/gopsutil v3.21.11+incompatible + github.com/stretchr/testify v1.7.1 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f // indirect +) diff --git a/soaktest/go.sum b/soaktest/go.sum new file mode 100644 index 000000000..b9564cf6a --- /dev/null +++ b/soaktest/go.sum @@ -0,0 +1,35 @@ +github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= +github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/soaktest/soakclient/soakclient.go b/soaktest/soakclient/soakclient.go index c99ff92db..b6b671810 100644 --- a/soaktest/soakclient/soakclient.go +++ b/soaktest/soakclient/soakclient.go @@ -20,9 +20,9 @@ import ( "context" "flag" "fmt" + "github.com/confluentinc/confluent-kafka-go/soaktest" "os" "os/signal" - "soaktest" "sync" "syscall" "time" diff --git a/soaktest/soakclient_transaction/soakclient_transaction.go b/soaktest/soakclient_transaction/soakclient_transaction.go index 0341027b2..63d2e6bd1 100644 --- a/soaktest/soakclient_transaction/soakclient_transaction.go +++ b/soaktest/soakclient_transaction/soakclient_transaction.go @@ -20,9 +20,9 @@ import ( "context" "flag" "fmt" + "github.com/confluentinc/confluent-kafka-go/soaktest" "os" "os/signal" - "soaktest" "sync" "syscall" "time"