diff --git a/README.md b/README.md
index 7efda9d3..52fcba26 100644
--- a/README.md
+++ b/README.md
@@ -822,7 +822,7 @@ Example of configuration with the use of a ticket cache:
#### Quick Start Confluent Cloud Example
-Check out the [adapters.xml](/examples/vendors/confluent/quickstart-confluent-cloud/adapters.xml#L22) file of the [_Quick Start Confluent Cloud_](/examples/vendors/confluent/quickstart-confluent-cloud/) app, where you can find an example of an authentication configuration that uses SASL/PLAIN.
+Check out the [adapters.xml](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L454) file of the [_Quick Start Confluent Cloud_](/examples/vendors/confluent/quickstart-confluent/) app, where you can find an example of an authentication configuration that uses SASL/PLAIN.
#### Quick Start Redpanda Serverless Example
@@ -870,6 +870,42 @@ Example:
EARLIEST
```
+#### `record.consume.with.num.threads`
+
+_Optional_. The number of threads to be used for concurrent processing of the incoming deserialized records. If set to `-1`, the number of threads will be automatically determined based on the number of available CPU cores.
+
+Default value: `1`.
+
+Example:
+
+```xml
+4
+```
+
+#### `record.consume.with.order.strategy`
+
+_Optional but only effective if [`record.consume.with.num.threads`](#recordconsumewithnumthreads) is set to a value greater than `1` (which includes hte default value)_. The order strategy to be used for concurrent processing of the incoming deserialized records. Can be one of the following:
+
+- `ORDER_BY_PARTITION`: maintain the order of records within each partition.
+
+ If you have multiple partitions, records from different partitions can be processed concurrently by different threads, but the order of records from a single partition will always be preserved. This is the default and generally a good balance between performance and order.
+
+- `ORDER_BY_KEY`: maintain the order among the records sharing the same key.
+
+ Different keys can be processed concurrently by different threads. So, while all records with key "A" are processed in order, and all records with key "B" are processed in order, the processing of "A" and "B" records can happen concurrently and interleaved in time. There's no guaranteed order between records of different keys.
+
+- `UNORDERED`: provide no ordering guarantees.
+
+ Records from any partition and with any key can be processed by any thread at any time. This offers the highest throughput when an high number of subscriptions is involved, but the order in which records are delivered to Lightstreamer clients might not match the order they were written to Kafka. This is suitable for use cases where message order is not important.
+
+Default value: `ORDER_BY_PARTITION`.
+
+Example:
+
+```xml
+ORDER_BY_KEY
+```
+
#### `record.key.evaluator.type` and `record.value.evaluator.type`
_Optional_. The format to be used to deserialize respectively the key and value of a Kafka record. Can be one of the following:
@@ -1066,7 +1102,7 @@ To configure the mapping, you define the set of all subscribable fields through
The configuration specifies that the field `fieldNameX` will contain the value extracted from the deserialized Kafka record through the `extractionExpressionX`, written using the [_Data Extraction Language_](#data-extraction-language). This approach makes it possible to transform a Kafka record of any complexity to the flat structure required by Lightstreamer.
-The `QuickStart` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L352) shows a basic example, where a simple _direct_ mapping has been defined between every attribute of the JSON record value and a Lightstreamer field with the corresponding name. Of course, thanks to the _Data Extraction Language_, more complex mapping can be employed.
+The `QuickStart` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L374) shows a basic example, where a simple _direct_ mapping has been defined between every attribute of the JSON record value and a Lightstreamer field with the corresponding name. Of course, thanks to the _Data Extraction Language_, more complex mapping can be employed.
```xml
...
diff --git a/docs/javadoc/allclasses-index.html b/docs/javadoc/allclasses-index.html
index 4831ee4a..50ee01f9 100644
--- a/docs/javadoc/allclasses-index.html
+++ b/docs/javadoc/allclasses-index.html
@@ -2,7 +2,7 @@
-All Classes and Interfaces (kafka-connector 1.0.7 API)
+All Classes and Interfaces (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/allpackages-index.html b/docs/javadoc/allpackages-index.html
index 0dfa3321..d2dc80af 100644
--- a/docs/javadoc/allpackages-index.html
+++ b/docs/javadoc/allpackages-index.html
@@ -2,7 +2,7 @@
-All Packages (kafka-connector 1.0.7 API)
+All Packages (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/KafkaConnectorMetadataAdapter.html b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/KafkaConnectorMetadataAdapter.html
index 688ff5f1..37681ac6 100644
--- a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/KafkaConnectorMetadataAdapter.html
+++ b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/KafkaConnectorMetadataAdapter.html
@@ -2,7 +2,7 @@
-KafkaConnectorMetadataAdapter (kafka-connector 1.0.7 API)
+KafkaConnectorMetadataAdapter (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-summary.html b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-summary.html
index f1bbc15c..c5497ba5 100644
--- a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-summary.html
+++ b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-summary.html
@@ -2,7 +2,7 @@
-com.lightstreamer.kafka.adapters.pub (kafka-connector 1.0.7 API)
+com.lightstreamer.kafka.adapters.pub (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-tree.html b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-tree.html
index 3238d2a0..1c27e16b 100644
--- a/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-tree.html
+++ b/docs/javadoc/com/lightstreamer/kafka/adapters/pub/package-tree.html
@@ -2,7 +2,7 @@
-com.lightstreamer.kafka.adapters.pub Class Hierarchy (kafka-connector 1.0.7 API)
+com.lightstreamer.kafka.adapters.pub Class Hierarchy (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/help-doc.html b/docs/javadoc/help-doc.html
index 386b2910..306009db 100644
--- a/docs/javadoc/help-doc.html
+++ b/docs/javadoc/help-doc.html
@@ -2,7 +2,7 @@
-API Help (kafka-connector 1.0.7 API)
+API Help (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/index-all.html b/docs/javadoc/index-all.html
index f425ff56..1d9cb650 100644
--- a/docs/javadoc/index-all.html
+++ b/docs/javadoc/index-all.html
@@ -2,7 +2,7 @@
-Index (kafka-connector 1.0.7 API)
+Index (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/index.html b/docs/javadoc/index.html
index 0133597f..e0854ca3 100644
--- a/docs/javadoc/index.html
+++ b/docs/javadoc/index.html
@@ -2,7 +2,7 @@
-Overview (kafka-connector 1.0.7 API)
+Overview (kafka-connector 1.1.0 API)
diff --git a/docs/javadoc/overview-summary.html b/docs/javadoc/overview-summary.html
index e6511112..f1505b10 100644
--- a/docs/javadoc/overview-summary.html
+++ b/docs/javadoc/overview-summary.html
@@ -2,7 +2,7 @@
-kafka-connector 1.0.7 API
+kafka-connector 1.1.0 API
diff --git a/docs/javadoc/overview-tree.html b/docs/javadoc/overview-tree.html
index cbe3b4e0..5a047e0f 100644
--- a/docs/javadoc/overview-tree.html
+++ b/docs/javadoc/overview-tree.html
@@ -2,7 +2,7 @@
-Class Hierarchy (kafka-connector 1.0.7 API)
+Class Hierarchy (kafka-connector 1.1.0 API)
diff --git a/examples/compose-templates/log4j.properties b/examples/compose-templates/log4j.properties
index 6620bcb6..87640a34 100644
--- a/examples/compose-templates/log4j.properties
+++ b/examples/compose-templates/log4j.properties
@@ -17,7 +17,7 @@ log4j.logger.com.lightstreamer.kafka.adapters.pub.KafkaConnectorMetadataAdapter
log4j.logger.org.apache.kafka=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=[%d] [%-10c{1}] %-5p %m%n
+log4j.appender.stdout.layout.ConversionPattern=[%d] [%-20t] [%-10c{1}] %-5p %m%n
log4j.appender.stdout.Target=System.out
# QuickStart logger
diff --git a/examples/vendors/confluent/README.md b/examples/vendors/confluent/README.md
index 309a43c6..3bff990c 100644
--- a/examples/vendors/confluent/README.md
+++ b/examples/vendors/confluent/README.md
@@ -237,7 +237,7 @@ To quickly complete the installation and verify the successful integration with
To enable a generic Lightstreamer client to receive real-time updates, it needs to subscribe to one or more items. Therefore, the Kafka Connector provides suitable mechanisms to map Kafka topics to Lightstreamer items effectively.
- The `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L419) comes with a straightforward mapping defined through the following settings:
+ The `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L441) comes with a straightforward mapping defined through the following settings:
- An item template:
```xml
@@ -878,7 +878,7 @@ Example of configuration with the use of a ticket cache:
true
```
-Check out the `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L432) file, where you can find an example of an authentication configuration that uses SASL/PLAIN.
+Check out the `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L454) file, where you can find an example of an authentication configuration that uses SASL/PLAIN.
### Record Evaluation
@@ -922,6 +922,42 @@ Example:
EARLIEST
```
+#### `record.consume.with.num.threads`
+
+_Optional_. The number of threads to be used for concurrent processing of the incoming deserialized records. If set to `-1`, the number of threads will be automatically determined based on the number of available CPU cores.
+
+Default value: `1`.
+
+Example:
+
+```xml
+4
+```
+
+#### `record.consume.with.order.strategy`
+
+_Optional but only effective if [`record.consume.with.num.threads`](#recordconsumewithnumthreads) is set to a value greater than `1` (which includes hte default value)_. The order strategy to be used for concurrent processing of the incoming deserialized records. Can be one of the following:
+
+- `ORDER_BY_PARTITION`: maintain the order of records within each partition.
+
+ If you have multiple partitions, records from different partitions can be processed concurrently by different threads, but the order of records from a single partition will always be preserved. This is the default and generally a good balance between performance and order.
+
+- `ORDER_BY_KEY`: maintain the order among the records sharing the same key.
+
+ Different keys can be processed concurrently by different threads. So, while all records with key "A" are processed in order, and all records with key "B" are processed in order, the processing of "A" and "B" records can happen concurrently and interleaved in time. There's no guaranteed order between records of different keys.
+
+- `UNORDERED`: provide no ordering guarantees.
+
+ Records from any partition and with any key can be processed by any thread at any time. This offers the highest throughput when an high number of subscriptions is involved, but the order in which records are delivered to Lightstreamer clients might not match the order they were written to Kafka. This is suitable for use cases where message order is not important.
+
+Default value: `ORDER_BY_PARTITION`.
+
+Example:
+
+```xml
+ORDER_BY_KEY
+```
+
#### `record.key.evaluator.type` and `record.value.evaluator.type`
_Optional_. The format to be used to deserialize respectively the key and value of a Kafka record. Can be one of the following:
@@ -1118,7 +1154,7 @@ To configure the mapping, you define the set of all subscribable fields through
The configuration specifies that the field `fieldNameX` will contain the value extracted from the deserialized Kafka record through the `extractionExpressionX`, written using the [_Data Extraction Language_](#data-extraction-language). This approach makes it possible to transform a Kafka record of any complexity to the flat structure required by Lightstreamer.
-The `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L448) shows a basic example, where a simple _direct_ mapping has been defined between every attribute of the JSON record value and a Lightstreamer field with the corresponding name. Of course, thanks to the _Data Extraction Language_, more complex mapping can be employed.
+The `QuickStartConfluentCloud` [factory configuration](/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml#L469) shows a basic example, where a simple _direct_ mapping has been defined between every attribute of the JSON record value and a Lightstreamer field with the corresponding name. Of course, thanks to the _Data Extraction Language_, more complex mapping can be employed.
```xml
...
diff --git a/kafka-connector-project/buildSrc/src/main/groovy/lightstreamer-kafka-connector.gradle b/kafka-connector-project/buildSrc/src/main/groovy/lightstreamer-kafka-connector.gradle
index c810d194..32f5f0f2 100644
--- a/kafka-connector-project/buildSrc/src/main/groovy/lightstreamer-kafka-connector.gradle
+++ b/kafka-connector-project/buildSrc/src/main/groovy/lightstreamer-kafka-connector.gradle
@@ -16,7 +16,7 @@ spotless {
greclipse()
}
java {
- target 'src/main/**/*.java','src/test/**/*.java'
+ target 'src/main/**/*.java','src/test/**/*.java','src/jmh/**/*.java'
googleJavaFormat()
.aosp()
.reorderImports(true)
diff --git a/kafka-connector-project/gradle.properties b/kafka-connector-project/gradle.properties
index c0f92edc..ff434e19 100644
--- a/kafka-connector-project/gradle.properties
+++ b/kafka-connector-project/gradle.properties
@@ -1,5 +1,5 @@
-version=1.0.7
+version=1.1.0
brandedProjectName=Lightstreamer Kafka Connector
-release_date=2024-06-11
+release_date=UNRELEASED
connect_owner=lightstreamer
connect_componentName=kafka-connect-lightstreamer
diff --git a/kafka-connector-project/gradle/wrapper/gradle-wrapper.properties b/kafka-connector-project/gradle/wrapper/gradle-wrapper.properties
index 9355b415..e2847c82 100644
--- a/kafka-connector-project/gradle/wrapper/gradle-wrapper.properties
+++ b/kafka-connector-project/gradle/wrapper/gradle-wrapper.properties
@@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.10-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-bin.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME
diff --git a/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml b/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml
index f204591d..4bda9d09 100644
--- a/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml
+++ b/kafka-connector-project/kafka-connector/src/adapter/dist/adapters.xml
@@ -259,6 +259,28 @@
Default value: LATEST. -->
EARLIEST
+
+
+
+
+
+