diff --git a/CHANGELOG.md b/CHANGELOG.md
index 59e55f09e6..6e3e55fa74 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,7 +2,29 @@
## [Unreleased](https://github.com/aklivity/zilla/tree/HEAD)
-[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.94...HEAD)
+[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.95...HEAD)
+
+**Implemented enhancements:**
+
+- Support Kafka topics create, alter, delete [\#1059](https://github.com/aklivity/zilla/issues/1059)
+
+**Fixed bugs:**
+
+- `zilla` Fails to Load Configuration from Specified location if the initial attempts are unsuccessful [\#1226](https://github.com/aklivity/zilla/issues/1226)
+
+**Merged pull requests:**
+
+- Risingwave SInk primary key fix [\#1273](https://github.com/aklivity/zilla/pull/1273) ([akrambek](https://github.com/akrambek))
+- Risingwave and PsqlKafka bug fixes [\#1272](https://github.com/aklivity/zilla/pull/1272) ([akrambek](https://github.com/akrambek))
+- create external function issue fix [\#1271](https://github.com/aklivity/zilla/pull/1271) ([ankitk-me](https://github.com/ankitk-me))
+- Remove produceRecordFramingSize constraints [\#1270](https://github.com/aklivity/zilla/pull/1270) ([akrambek](https://github.com/akrambek))
+- External header pattern fix [\#1269](https://github.com/aklivity/zilla/pull/1269) ([ankitk-me](https://github.com/ankitk-me))
+- Detect config update after initial 404 status [\#1267](https://github.com/aklivity/zilla/pull/1267) ([jfallows](https://github.com/jfallows))
+- Support Kafka topics alter, delete [\#1265](https://github.com/aklivity/zilla/pull/1265) ([akrambek](https://github.com/akrambek))
+
+## [0.9.95](https://github.com/aklivity/zilla/tree/0.9.95) (2024-09-23)
+
+[Full Changelog](https://github.com/aklivity/zilla/compare/0.9.94...0.9.95)
**Fixed bugs:**
diff --git a/build/flyweight-maven-plugin/pom.xml b/build/flyweight-maven-plugin/pom.xml
index d394c18661..4e51fb4fee 100644
--- a/build/flyweight-maven-plugin/pom.xml
+++ b/build/flyweight-maven-plugin/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
build
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/build/pom.xml b/build/pom.xml
index d62e98a529..2d5bf2e334 100644
--- a/build/pom.xml
+++ b/build/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/cloud/docker-image/pom.xml b/cloud/docker-image/pom.xml
index 9b0b4cd651..6c3c548e79 100644
--- a/cloud/docker-image/pom.xml
+++ b/cloud/docker-image/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
cloud
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/cloud/helm-chart/pom.xml b/cloud/helm-chart/pom.xml
index 6b8b53a20e..410695b422 100644
--- a/cloud/helm-chart/pom.xml
+++ b/cloud/helm-chart/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
cloud
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/cloud/pom.xml b/cloud/pom.xml
index ca27290f53..d599c6d64e 100644
--- a/cloud/pom.xml
+++ b/cloud/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/conf/pom.xml b/conf/pom.xml
index bba5cba0ba..588f98f17b 100644
--- a/conf/pom.xml
+++ b/conf/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-amqp.spec/pom.xml b/incubator/binding-amqp.spec/pom.xml
index 1d50f6d850..06b01a3747 100644
--- a/incubator/binding-amqp.spec/pom.xml
+++ b/incubator/binding-amqp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-amqp/pom.xml b/incubator/binding-amqp/pom.xml
index 95098cc482..e506f51ec4 100644
--- a/incubator/binding-amqp/pom.xml
+++ b/incubator/binding-amqp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-pgsql-kafka.spec/pom.xml b/incubator/binding-pgsql-kafka.spec/pom.xml
index 3875b4fac9..01d0dd98b6 100644
--- a/incubator/binding-pgsql-kafka.spec/pom.xml
+++ b/incubator/binding-pgsql-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/client.rpt b/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/client.rpt
index c39986a2ab..3396ec5eee 100644
--- a/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/client.rpt
+++ b/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/client.rpt
@@ -25,7 +25,7 @@ write zilla:begin.ext ${kafka:beginEx()
.name("dev.cities")
.partitionCount(1)
.replicas(1)
- .config("cleanup.policy", "compact")
+ .config("cleanup.policy", "delete")
.build()
.timeout(30000)
.validateOnly("false")
diff --git a/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/server.rpt b/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/server.rpt
index 2504756e81..8e4570a35b 100644
--- a/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/server.rpt
+++ b/incubator/binding-pgsql-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/pgsql/kafka/streams/kafka/create.topic/server.rpt
@@ -29,7 +29,7 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.name("dev.cities")
.partitionCount(1)
.replicas(1)
- .config("cleanup.policy", "compact")
+ .config("cleanup.policy", "delete")
.build()
.timeout(30000)
.validateOnly("false")
diff --git a/incubator/binding-pgsql-kafka/pom.xml b/incubator/binding-pgsql-kafka/pom.xml
index 65055b2cc2..4177ed2bba 100644
--- a/incubator/binding-pgsql-kafka/pom.xml
+++ b/incubator/binding-pgsql-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaAvroSchemaTemplate.java b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaAvroSchemaTemplate.java
index 03a64ea96c..3e17017ba5 100644
--- a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaAvroSchemaTemplate.java
+++ b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaAvroSchemaTemplate.java
@@ -21,15 +21,15 @@ protected String convertPgsqlTypeToAvro(
{
return switch (pgsqlType.toLowerCase())
{
- case "varchar", "text", "char", "bpchar" -> // Blank-padded char in PG
- "string";
- case "int", "integer", "serial" -> "int";
- case "bigint", "bigserial" -> "long";
- case "boolean", "bool" -> "boolean";
- case "real", "float4" -> "float";
- case "double precision", "float8" -> "double"; // Timestamp with time zone
- case "timestamp", "timestamptz", "date", "time" ->
- "timestamp-millis"; // Avro logical type for date/time values
+ case "varchar", "text", "char", "bpchar" -> "\\\"string\\\"";
+ case "int", "integer", "serial" -> "\\\"int\\\"";
+ case "numeric" -> "\\\"double\\\"";
+ case "bigint", "bigserial" -> "\\\"long\\\"";
+ case "boolean", "bool" -> "\\\"boolean\\\"";
+ case "real", "float4" -> "\\\"float\\\"";
+ case "double", "double precision", "float8" -> "\\\"double\\\"";
+ case "timestamp", "timestampz", "date", "time" ->
+ "{ \\\"type\\\": \\\"long\\\", \\\"logicalTyp\\\": \\\"timestamp-millis\\\" }";
default -> null;
};
}
diff --git a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaKeyAvroSchemaTemplate.java b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaKeyAvroSchemaTemplate.java
index c92a3319cb..cecef1e11f 100644
--- a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaKeyAvroSchemaTemplate.java
+++ b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaKeyAvroSchemaTemplate.java
@@ -60,7 +60,7 @@ public String generateSchema(
String avroType = convertPgsqlTypeToAvro(pgsqlType);
schemaBuilder.append(" {\\\"name\\\": \\\"").append(fieldName).append("\\\",");
- schemaBuilder.append(" \\\"type\\\": [\\\"").append(avroType).append("\\\", \\\"null\\\"] },");
+ schemaBuilder.append(" \\\"type\\\": [").append(avroType).append(", \\\"null\\\"] },");
}
// Remove the last comma and close the fields array
diff --git a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaValueAvroSchemaTemplate.java b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaValueAvroSchemaTemplate.java
index a6a712fe86..fe53279963 100644
--- a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaValueAvroSchemaTemplate.java
+++ b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/schema/PgsqlKafkaValueAvroSchemaTemplate.java
@@ -60,7 +60,7 @@ public String generateSchema(
String avroType = convertPgsqlTypeToAvro(pgsqlType);
schemaBuilder.append(" {\\\"name\\\": \\\"").append(fieldName).append("\\\",");
- schemaBuilder.append(" \\\"type\\\": \\\"").append(avroType).append("\\\"},");
+ schemaBuilder.append(" \\\"type\\\": ").append(avroType).append("},");
}
// Remove the last comma and close the fields array
diff --git a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/stream/PgsqlKafkaProxyFactory.java b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/stream/PgsqlKafkaProxyFactory.java
index e173ce5ad6..d7ca9e8180 100644
--- a/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/stream/PgsqlKafkaProxyFactory.java
+++ b/incubator/binding-pgsql-kafka/src/main/java/io/aklivity/zilla/runtime/binding/pgsql/kafka/internal/stream/PgsqlKafkaProxyFactory.java
@@ -1256,6 +1256,7 @@ else if (server.commandsProcessed == 0)
final PgsqlKafkaBindingConfig binding = server.binding;
final String primaryKey = binding.avroValueSchema.primaryKey(createTable);
+ final int primaryKeyCount = binding.avroValueSchema.primaryKeyCount(createTable);
int versionId = NO_ERROR_SCHEMA_VERSION_ID;
if (primaryKey != null)
@@ -1263,7 +1264,6 @@ else if (server.commandsProcessed == 0)
//TODO: assign versionId to avoid test failure
final String subjectKey = String.format("%s.%s-key", server.database, topic);
- final int primaryKeyCount = binding.avroValueSchema.primaryKeyCount(createTable);
String keySchema = primaryKeyCount > 1
? binding.avroKeySchema.generateSchema(server.database, createTable)
: AVRO_KEY_SCHEMA;
@@ -1278,7 +1278,7 @@ else if (server.commandsProcessed == 0)
if (versionId != NO_VERSION_ID)
{
- final String policy = primaryKey != null
+ final String policy = primaryKey != null && primaryKeyCount == 1
? "compact"
: "delete";
diff --git a/incubator/binding-pgsql.spec/pom.xml b/incubator/binding-pgsql.spec/pom.xml
index 4c5e5de946..d69d6e4fbd 100644
--- a/incubator/binding-pgsql.spec/pom.xml
+++ b/incubator/binding-pgsql.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-pgsql/pom.xml b/incubator/binding-pgsql/pom.xml
index 85a73114f1..bcef9a3453 100644
--- a/incubator/binding-pgsql/pom.xml
+++ b/incubator/binding-pgsql/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-risingwave.spec/pom.xml b/incubator/binding-risingwave.spec/pom.xml
index e8fc26ab9c..a3a65f9c09 100644
--- a/incubator/binding-risingwave.spec/pom.xml
+++ b/incubator/binding-risingwave.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/client.rpt
new file mode 100644
index 0000000000..e07f659466
--- /dev/null
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/client.rpt
@@ -0,0 +1,57 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/app1"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${pgsql:beginEx()
+ .typeId(zilla:id("pgsql"))
+ .parameter("user", "root")
+ .parameter("database", "dev")
+ .parameter("application_name", "psql")
+ .parameter("client_encoding", "UTF8")
+ .build()}
+
+connected
+
+write zilla:data.ext ${pgsql:dataEx()
+ .typeId(zilla:id("pgsql"))
+ .query()
+ .build()
+ .build()}
+write "CREATE FUNCTION series(int)\n"
+ "RETURNS TABLE (x int)\n"
+ "AS series\n"
+ "LANGUAGE java\n"
+ "USING LINK 'http://localhost:8815';"
+ [0x00]
+
+write flush
+
+read advised zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .completion()
+ .tag("CREATE_FUNCTION")
+ .build()
+ .build()}
+
+read advised zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .ready()
+ .status("IDLE")
+ .build()
+ .build()}
+
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/server.rpt
new file mode 100644
index 0000000000..885d785192
--- /dev/null
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function.return.table/server.rpt
@@ -0,0 +1,57 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+accept "zilla://streams/app1"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${pgsql:beginEx()
+ .typeId(zilla:id("pgsql"))
+ .parameter("user", "root")
+ .parameter("database", "dev")
+ .parameter("application_name", "psql")
+ .parameter("client_encoding", "UTF8")
+ .build()}
+
+connected
+
+read zilla:data.ext ${pgsql:dataEx()
+ .typeId(zilla:id("pgsql"))
+ .query()
+ .build()
+ .build()}
+read "CREATE FUNCTION series(int)\n"
+ "RETURNS TABLE (x int)\n"
+ "AS series\n"
+ "LANGUAGE java\n"
+ "USING LINK 'http://localhost:8815';"
+ [0x00]
+
+write advise zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .completion()
+ .tag("CREATE_FUNCTION")
+ .build()
+ .build()}
+
+write advise zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .ready()
+ .status("IDLE")
+ .build()
+ .build()}
+
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/client.rpt
index e07f659466..182d430247 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/client.rpt
@@ -32,9 +32,9 @@ write zilla:data.ext ${pgsql:dataEx()
.query()
.build()
.build()}
-write "CREATE FUNCTION series(int)\n"
- "RETURNS TABLE (x int)\n"
- "AS series\n"
+write "CREATE FUNCTION gcd(int , int)\n"
+ "RETURNS int\n"
+ "AS gcd\n"
"LANGUAGE java\n"
"USING LINK 'http://localhost:8815';"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/server.rpt
index 885d785192..475e1d7c17 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.function/server.rpt
@@ -34,9 +34,9 @@ read zilla:data.ext ${pgsql:dataEx()
.query()
.build()
.build()}
-read "CREATE FUNCTION series(int)\n"
- "RETURNS TABLE (x int)\n"
- "AS series\n"
+read "CREATE FUNCTION gcd(int , int)\n"
+ "RETURNS int\n"
+ "AS gcd\n"
"LANGUAGE java\n"
"USING LINK 'http://localhost:8815';"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/client.rpt
index 3da60e9f42..ac1ef564cb 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/client.rpt
@@ -154,10 +154,10 @@ write "CREATE SINK distinct_cities_sink\n"
" connector='kafka',\n"
" properties.bootstrap.server='localhost:9092',\n"
" topic='dev.distinct_cities',\n"
- " primary_key='city'\n"
+ " primary_key='id'\n"
") FORMAT UPSERT ENCODE AVRO (\n"
" schema.registry='http://localhost:8081'\n"
- ");"
+ ") KEY ENCODE TEXT;"
[0x00]
write flush
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/server.rpt
index b0e6c2c687..102aeed230 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.materialized.view/server.rpt
@@ -162,10 +162,10 @@ read "CREATE SINK distinct_cities_sink\n"
" connector='kafka',\n"
" properties.bootstrap.server='localhost:9092',\n"
" topic='dev.distinct_cities',\n"
- " primary_key='city'\n"
+ " primary_key='id'\n"
") FORMAT UPSERT ENCODE AVRO (\n"
" schema.registry='http://localhost:8081'\n"
- ");"
+ ") KEY ENCODE TEXT;"
[0x00]
write advise zilla:flush ${pgsql:flushEx()
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/client.rpt
index e702643dc8..f9451bc7bb 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/client.rpt
@@ -34,7 +34,7 @@ write zilla:data.ext ${pgsql:dataEx()
.build()}
write "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"INCLUDE header 'zilla:correlation-id' AS correlation_id\n"
@@ -46,7 +46,7 @@ write "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/server.rpt
index 00c261f9db..189026d000 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key.and.includes/server.rpt
@@ -38,7 +38,7 @@ read zilla:data.ext ${pgsql:dataEx()
.build()}
read "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"INCLUDE header 'zilla:correlation-id' AS correlation_id\n"
@@ -50,7 +50,7 @@ read "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/client.rpt
index be0c185f67..be60e5fe79 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/client.rpt
@@ -34,7 +34,7 @@ write zilla:data.ext ${pgsql:dataEx()
.build()}
write "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"WITH (\n"
@@ -43,7 +43,7 @@ write "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/server.rpt
index 7554c745e1..8d2121373d 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/create.table.with.primary.key/server.rpt
@@ -38,7 +38,7 @@ read zilla:data.ext ${pgsql:dataEx()
.build()}
read "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"WITH (\n"
@@ -47,7 +47,7 @@ read "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/client.rpt
index a2cb2b4ad2..ab484b7434 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/client.rpt
@@ -34,7 +34,7 @@ write zilla:data.ext ${pgsql:dataEx()
.build()}
write "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"WITH (\n"
@@ -43,7 +43,7 @@ write "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/server.rpt
index 6596416bdd..39350568c4 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/effective/query.with.multiple.statements/server.rpt
@@ -38,7 +38,7 @@ read zilla:data.ext ${pgsql:dataEx()
.build()}
read "CREATE TABLE IF NOT EXISTS cities (\n"
" *,\n"
- " PRIMARY KEY (key)\n"
+ " PRIMARY KEY (id)\n"
")\n"
"INCLUDE KEY AS key\n"
"WITH (\n"
@@ -47,7 +47,7 @@ read "CREATE TABLE IF NOT EXISTS cities (\n"
" topic='dev.cities',\n"
" scan.startup.mode='latest',\n"
" scan.startup.timestamp.millis='140000000'\n"
- ") FORMAT UPSERT ENCODE AVRO (\n"
+ ") FORMAT PLAIN ENCODE AVRO (\n"
" schema.registry = 'http://localhost:8081'\n"
");"
[0x00]
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/client.rpt
new file mode 100644
index 0000000000..8f7384ab62
--- /dev/null
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/client.rpt
@@ -0,0 +1,54 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+connect "zilla://streams/app0"
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+write zilla:begin.ext ${pgsql:beginEx()
+ .typeId(zilla:id("pgsql"))
+ .parameter("user", "root")
+ .parameter("database", "dev")
+ .parameter("application_name", "psql")
+ .parameter("client_encoding", "UTF8")
+ .build()}
+
+connected
+
+write zilla:data.ext ${pgsql:dataEx()
+ .typeId(zilla:id("pgsql"))
+ .query()
+ .build()
+ .build()}
+write "CREATE FUNCTION series(int) RETURNS TABLE (x int) "
+ "AS series;"
+ [0x00]
+
+write flush
+
+read advised zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .completion()
+ .tag("CREATE_FUNCTION")
+ .build()
+ .build()}
+
+read advised zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .ready()
+ .status("IDLE")
+ .build()
+ .build()}
+
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/server.rpt
new file mode 100644
index 0000000000..073520fff6
--- /dev/null
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function.return.table/server.rpt
@@ -0,0 +1,55 @@
+#
+# Copyright 2021-2023 Aklivity Inc
+#
+# Licensed under the Aklivity Community License (the "License"); you may not use
+# this file except in compliance with the License. You may obtain a copy of the
+# License at
+#
+# https://www.aklivity.io/aklivity-community-license/
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+#
+
+property serverAddress "zilla://streams/app0"
+
+accept ${serverAddress}
+ option zilla:window 8192
+ option zilla:transmission "duplex"
+
+accepted
+
+read zilla:begin.ext ${pgsql:beginEx()
+ .typeId(zilla:id("pgsql"))
+ .parameter("user", "root")
+ .parameter("database", "dev")
+ .parameter("application_name", "psql")
+ .parameter("client_encoding", "UTF8")
+ .build()}
+
+connected
+
+read zilla:data.ext ${pgsql:dataEx()
+ .typeId(zilla:id("pgsql"))
+ .query()
+ .build()
+ .build()}
+read "CREATE FUNCTION series(int) RETURNS TABLE (x int) "
+ "AS series;"
+ [0x00]
+
+write advise zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .completion()
+ .tag("CREATE_FUNCTION")
+ .build()
+ .build()}
+
+write advise zilla:flush ${pgsql:flushEx()
+ .typeId(zilla:id("pgsql"))
+ .ready()
+ .status("IDLE")
+ .build()
+ .build()}
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/client.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/client.rpt
index 8f7384ab62..49ccf60200 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/client.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/client.rpt
@@ -32,8 +32,8 @@ write zilla:data.ext ${pgsql:dataEx()
.query()
.build()
.build()}
-write "CREATE FUNCTION series(int) RETURNS TABLE (x int) "
- "AS series;"
+write "CREATE FUNCTION gcd(int, int) RETURNS int "
+ "AS gcd;"
[0x00]
write flush
diff --git a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/server.rpt b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/server.rpt
index 073520fff6..4fc06f7a40 100644
--- a/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/server.rpt
+++ b/incubator/binding-risingwave.spec/src/main/scripts/io/aklivity/zilla/specs/binding/risingwave/streams/pgsql/create.function/server.rpt
@@ -36,8 +36,8 @@ read zilla:data.ext ${pgsql:dataEx()
.query()
.build()
.build()}
-read "CREATE FUNCTION series(int) RETURNS TABLE (x int) "
- "AS series;"
+read "CREATE FUNCTION gcd(int, int) RETURNS int "
+ "AS gcd;"
[0x00]
write advise zilla:flush ${pgsql:flushEx()
diff --git a/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/EffectiveIT.java b/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/EffectiveIT.java
index e28c85b0db..d1b671d511 100644
--- a/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/EffectiveIT.java
+++ b/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/EffectiveIT.java
@@ -86,6 +86,16 @@ public void shouldCreateFunction() throws Exception
k3po.finish();
}
+ @Test
+ @Specification({
+ "${app}/create.function.return.table/client",
+ "${app}/create.function.return.table/server"
+ })
+ public void shouldCreateFunctionReturnTable() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Specification({
"${app}/create.function.embedded/client",
diff --git a/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/PgsqlIT.java b/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/PgsqlIT.java
index 70ef76ef9f..7227533b33 100644
--- a/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/PgsqlIT.java
+++ b/incubator/binding-risingwave.spec/src/test/java/io/aklivity/zilla/specs/binding/risingwave/streams/PgsqlIT.java
@@ -86,6 +86,16 @@ public void shouldCreateFunction() throws Exception
k3po.finish();
}
+ @Test
+ @Specification({
+ "${app}/create.function.return.table/client",
+ "${app}/create.function.return.table/server"
+ })
+ public void shouldCreateFunctionReturnTable() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Specification({
"${app}/create.function.embedded/client",
diff --git a/incubator/binding-risingwave/pom.xml b/incubator/binding-risingwave/pom.xml
index 2e8578d634..ce688c194e 100644
--- a/incubator/binding-risingwave/pom.xml
+++ b/incubator/binding-risingwave/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateSinkTemplate.java b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateSinkTemplate.java
index 1c34fcba3c..d5133d2eaf 100644
--- a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateSinkTemplate.java
+++ b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateSinkTemplate.java
@@ -15,6 +15,7 @@
package io.aklivity.zilla.runtime.binding.risingwave.internal.statement;
import java.util.Map;
+import java.util.Optional;
import net.sf.jsqlparser.statement.Statement;
import net.sf.jsqlparser.statement.create.view.CreateView;
@@ -27,11 +28,12 @@ public class RisingwaveCreateSinkTemplate extends RisingwaveCommandTemplate
WITH (
connector='kafka',
properties.bootstrap.server='%s',
- topic='%s.%s',
- primary_key='%s'
+ topic='%s.%s'%s
) FORMAT UPSERT ENCODE AVRO (
schema.registry='%s'
- );\u0000""";
+ ) KEY ENCODE TEXT;\u0000""";
+
+ private final String primaryKeyFormat = ",\n primary_key='%s'";
private final String bootstrapServer;
private final String schemaRegistry;
@@ -51,7 +53,20 @@ public String generate(
{
CreateView createView = (CreateView) statement;
String viewName = createView.getView().getName();
- String primaryKey = columns.keySet().iterator().next();
+
+ Optional> primaryKeyMatch = columns.entrySet().stream()
+ .filter(e -> "id".equalsIgnoreCase(e.getKey()))
+ .findFirst();
+
+ if (primaryKeyMatch.isEmpty())
+ {
+ primaryKeyMatch = columns.entrySet().stream()
+ .filter(e -> e.getKey().toLowerCase().contains("id"))
+ .findFirst();
+ }
+
+ String textPrimaryKey = primaryKeyMatch.map(Map.Entry::getKey).orElse(null);
+ String primaryKey = textPrimaryKey != null ? primaryKeyFormat.formatted(textPrimaryKey) : "";
return String.format(sqlFormat, viewName, viewName, bootstrapServer, database, viewName, primaryKey, schemaRegistry);
}
diff --git a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateTableTemplate.java b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateTableTemplate.java
index 8922b37cff..ac4d8507dc 100644
--- a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateTableTemplate.java
+++ b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveCreateTableTemplate.java
@@ -23,7 +23,7 @@ public class RisingwaveCreateTableTemplate extends RisingwaveCommandTemplate
private final String sqlFormat = """
CREATE TABLE IF NOT EXISTS %s (
*,
- PRIMARY KEY (key)
+ PRIMARY KEY (%s)
)
INCLUDE KEY AS key%s
WITH (
@@ -32,7 +32,7 @@ PRIMARY KEY (key)
topic='%s.%s',
scan.startup.mode='latest',
scan.startup.timestamp.millis='%d'
- ) FORMAT UPSERT ENCODE AVRO (
+ ) FORMAT PLAIN ENCODE AVRO (
schema.registry = '%s'
);\u0000""";
@@ -57,6 +57,8 @@ public String generate(
CreateTable createTable = command.createTable;
String table = createTable.getTable().getName();
+ String primaryKey = primaryKey(createTable);
+
includeBuilder.setLength(0);
final Map includes = command.includes;
if (includes != null && !includes.isEmpty())
@@ -66,7 +68,7 @@ public String generate(
includeBuilder.delete(includeBuilder.length() - 1, includeBuilder.length());
}
- return String.format(sqlFormat, table, includeBuilder, bootstrapServer, database,
+ return String.format(sqlFormat, table, primaryKey, includeBuilder, bootstrapServer, database,
table, scanStartupMil, schemaRegistry);
}
}
diff --git a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwavePgsqlTypeMapping.java b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwavePgsqlTypeMapping.java
index 54079411bd..516ceaffec 100644
--- a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwavePgsqlTypeMapping.java
+++ b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwavePgsqlTypeMapping.java
@@ -31,6 +31,7 @@ public final class RisingwavePgsqlTypeMapping
TYPE_MAPPINGS.put("timestamp without time zone", "TIMESTAMP");
TYPE_MAPPINGS.put("timestamp with time zone", "TIMESTAMPZ");
TYPE_MAPPINGS.put("double precision", "DOUBLE");
+ TYPE_MAPPINGS.put("numeric", "NUMERIC");
}
private RisingwavePgsqlTypeMapping()
diff --git a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveSqlCommandParser.java b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveSqlCommandParser.java
index 205467ddfa..619a25ddfb 100644
--- a/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveSqlCommandParser.java
+++ b/incubator/binding-risingwave/src/main/java/io/aklivity/zilla/runtime/binding/risingwave/internal/statement/RisingwaveSqlCommandParser.java
@@ -26,7 +26,7 @@ public final class RisingwaveSqlCommandParser
"CREATE\\s+FUNCTION[\\s\\S]+?\\$\\$[\\s\\S]+?\\$\\$";
private static final String SQL_COMMAND_PATTERN =
"(?i)\\b(CREATE FUNCTION)\\b.*?\\$\\$(.*?)\\$\\$\\s*;[\\x00\\n]*" +
- "|\\b(CREATE FUNCTION)\\b.*?RETURNS TABLE.*?AS.*?;[\\x00\\n]*" +
+ "|\\b(CREATE FUNCTION)\\b.*?RETURNS .*?AS.*?;[\\x00\\n]*" +
"|\\b(CREATE MATERIALIZED VIEW|CREATE SOURCE|CREATE SINK|CREATE INDEX" +
"|CREATE VIEW|SHOW TABLES|DESCRIBE|SHOW)\\b.*?;[\\x00\\n]*" +
"|\\b(SELECT|INSERT|UPDATE|DELETE|ALTER|DROP|CREATE TABLE|CREATE SCHEMA|CREATE DATABASE)\\b.*?;[\\x00\\n]*";
diff --git a/incubator/binding-risingwave/src/test/java/io/aklivity/zilla/runtime/binding/risingwave/internal/stream/ProxyIT.java b/incubator/binding-risingwave/src/test/java/io/aklivity/zilla/runtime/binding/risingwave/internal/stream/ProxyIT.java
index d7e9fd64cf..316ab6d996 100644
--- a/incubator/binding-risingwave/src/test/java/io/aklivity/zilla/runtime/binding/risingwave/internal/stream/ProxyIT.java
+++ b/incubator/binding-risingwave/src/test/java/io/aklivity/zilla/runtime/binding/risingwave/internal/stream/ProxyIT.java
@@ -121,6 +121,16 @@ public void shouldCreateFunction() throws Exception
k3po.finish();
}
+ @Test
+ @Configuration("proxy.function.yaml")
+ @Specification({
+ "${pgsql}/create.function.return.table/client",
+ "${effective}/create.function.return.table/server" })
+ public void shouldCreateFunctionReturnTable() throws Exception
+ {
+ k3po.finish();
+ }
+
@Test
@Configuration("proxy.function.yaml")
@Specification({
diff --git a/incubator/command-dump/pom.xml b/incubator/command-dump/pom.xml
index 5b51dacee3..9a8543e19c 100644
--- a/incubator/command-dump/pom.xml
+++ b/incubator/command-dump/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/command-log/pom.xml b/incubator/command-log/pom.xml
index a50e591b6a..dd93d393a1 100644
--- a/incubator/command-log/pom.xml
+++ b/incubator/command-log/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/command-tune/pom.xml b/incubator/command-tune/pom.xml
index 39a9e5ef93..7ea44ca2ae 100644
--- a/incubator/command-tune/pom.xml
+++ b/incubator/command-tune/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
incubator
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/incubator/pom.xml b/incubator/pom.xml
index 896a536bf8..b76c84e7f1 100644
--- a/incubator/pom.xml
+++ b/incubator/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/manager/pom.xml b/manager/pom.xml
index 9e310f2313..0dc13e571b 100644
--- a/manager/pom.xml
+++ b/manager/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/pom.xml b/pom.xml
index ab84154331..a1b9917186 100644
--- a/pom.xml
+++ b/pom.xml
@@ -7,7 +7,7 @@
4.0.0
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
pom
zilla
https://github.com/aklivity/zilla
diff --git a/runtime/binding-asyncapi/pom.xml b/runtime/binding-asyncapi/pom.xml
index dc8e886b9a..39794069a2 100644
--- a/runtime/binding-asyncapi/pom.xml
+++ b/runtime/binding-asyncapi/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-echo/pom.xml b/runtime/binding-echo/pom.xml
index 5387a3c312..99f4bf8462 100644
--- a/runtime/binding-echo/pom.xml
+++ b/runtime/binding-echo/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-fan/pom.xml b/runtime/binding-fan/pom.xml
index 4298a009d8..3020df23a2 100644
--- a/runtime/binding-fan/pom.xml
+++ b/runtime/binding-fan/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-filesystem/pom.xml b/runtime/binding-filesystem/pom.xml
index 02047ec134..d042600d28 100644
--- a/runtime/binding-filesystem/pom.xml
+++ b/runtime/binding-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-grpc-kafka/pom.xml b/runtime/binding-grpc-kafka/pom.xml
index 2842b9b123..299ffeff63 100644
--- a/runtime/binding-grpc-kafka/pom.xml
+++ b/runtime/binding-grpc-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-grpc/pom.xml b/runtime/binding-grpc/pom.xml
index 64db9f6b85..dade8f75a8 100644
--- a/runtime/binding-grpc/pom.xml
+++ b/runtime/binding-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-http-filesystem/pom.xml b/runtime/binding-http-filesystem/pom.xml
index 821529422d..229dfe5413 100644
--- a/runtime/binding-http-filesystem/pom.xml
+++ b/runtime/binding-http-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-http-kafka/pom.xml b/runtime/binding-http-kafka/pom.xml
index c11314e102..dc58093614 100644
--- a/runtime/binding-http-kafka/pom.xml
+++ b/runtime/binding-http-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-http/pom.xml b/runtime/binding-http/pom.xml
index 0af601ac15..fb6183cccc 100644
--- a/runtime/binding-http/pom.xml
+++ b/runtime/binding-http/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-kafka-grpc/pom.xml b/runtime/binding-kafka-grpc/pom.xml
index 6ac327434a..19fcec0b84 100644
--- a/runtime/binding-kafka-grpc/pom.xml
+++ b/runtime/binding-kafka-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-kafka/pom.xml b/runtime/binding-kafka/pom.xml
index 9757f1b6b3..915c1ac818 100644
--- a/runtime/binding-kafka/pom.xml
+++ b/runtime/binding-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientAlterConfigsFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientAlterConfigsFactory.java
new file mode 100644
index 0000000000..037dc86874
--- /dev/null
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientAlterConfigsFactory.java
@@ -0,0 +1,1574 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM;
+import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID;
+import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX;
+import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+import java.util.function.LongFunction;
+import java.util.function.UnaryOperator;
+
+import org.agrona.DirectBuffer;
+import org.agrona.MutableDirectBuffer;
+import org.agrona.collections.LongLongConsumer;
+import org.agrona.concurrent.UnsafeBuffer;
+
+import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig;
+import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding;
+import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration;
+import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.KafkaResourceType;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.AlterConfigsRequestFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.AlterConfigsRequestPart2FW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.AlterConfigsResponseFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.ConfigRequestFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.ResourceRequestFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.alter_configs.ResourceResponseFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaAlterConfigsRequestBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW;
+import io.aklivity.zilla.runtime.engine.EngineContext;
+import io.aklivity.zilla.runtime.engine.binding.BindingHandler;
+import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer;
+import io.aklivity.zilla.runtime.engine.budget.BudgetDebitor;
+import io.aklivity.zilla.runtime.engine.buffer.BufferPool;
+import io.aklivity.zilla.runtime.engine.concurrent.Signaler;
+
+public final class KafkaClientAlterConfigsFactory extends KafkaClientSaslHandshaker implements BindingHandler
+{
+ private static final int ERROR_NONE = 0;
+ private static final int SIGNAL_NEXT_REQUEST = 1;
+
+ private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer();
+ private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0);
+ private static final Consumer EMPTY_EXTENSION = ex -> {};
+
+ private static final short ALTER_CONFIGS_API_KEY = 33;
+ private static final short ALTER_CONFIGS_API_VERSION = 1;
+
+ private final BeginFW beginRO = new BeginFW();
+ private final DataFW dataRO = new DataFW();
+ private final EndFW endRO = new EndFW();
+ private final AbortFW abortRO = new AbortFW();
+ private final ResetFW resetRO = new ResetFW();
+ private final WindowFW windowRO = new WindowFW();
+ private final SignalFW signalRO = new SignalFW();
+ private final ExtensionFW extensionRO = new ExtensionFW();
+ private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW();
+
+ private final BeginFW.Builder beginRW = new BeginFW.Builder();
+ private final DataFW.Builder dataRW = new DataFW.Builder();
+ private final EndFW.Builder endRW = new EndFW.Builder();
+ private final AbortFW.Builder abortRW = new AbortFW.Builder();
+ private final ResetFW.Builder resetRW = new ResetFW.Builder();
+ private final WindowFW.Builder windowRW = new WindowFW.Builder();
+ private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder();
+ private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder();
+ private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder();
+
+ private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder();
+ private final AlterConfigsRequestFW.Builder alterConfigsRequestRW = new AlterConfigsRequestFW.Builder();
+ private final ResourceRequestFW.Builder resourceRequestRW = new ResourceRequestFW.Builder();
+ private final ConfigRequestFW.Builder configRequestRW = new ConfigRequestFW.Builder();
+ private final AlterConfigsRequestPart2FW.Builder alterConfigsRequestPart2RW = new AlterConfigsRequestPart2FW.Builder();
+
+ private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW();
+ private final AlterConfigsResponseFW alterConfigsResponseRO = new AlterConfigsResponseFW();
+ private final ResourceResponseFW resourceResponseRO = new ResourceResponseFW();
+
+ private final KafkaAlterConfigsClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse;
+ private final KafkaAlterConfigsClientDecoder decodeSaslHandshake = this::decodeSaslHandshake;
+ private final KafkaAlterConfigsClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms;
+ private final KafkaAlterConfigsClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism;
+ private final KafkaAlterConfigsClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse;
+ private final KafkaAlterConfigsClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate;
+ private final KafkaAlterConfigsClientDecoder decodeAlterConfigsResponse = this::decodeAlterConfigsResponse;
+ private final KafkaAlterConfigsClientDecoder decodeIgnoreAll = this::decodeIgnoreAll;
+ private final KafkaAlterConfigsClientDecoder decodeReject = this::decodeReject;
+
+ private final int kafkaTypeId;
+ private final int proxyTypeId;
+ private final MutableDirectBuffer writeBuffer;
+ private final MutableDirectBuffer extBuffer;
+ private final BufferPool decodePool;
+ private final BufferPool encodePool;
+ private final Signaler signaler;
+ private final BindingHandler streamFactory;
+ private final UnaryOperator resolveSasl;
+ private final LongFunction supplyBinding;
+ private final LongFunction supplyDebitor;
+ private final List responseTopics;
+
+ public KafkaClientAlterConfigsFactory(
+ KafkaConfiguration config,
+ EngineContext context,
+ LongFunction supplyBinding,
+ LongFunction supplyDebitor,
+ Signaler signaler,
+ BindingHandler streamFactory,
+ UnaryOperator resolveSasl)
+ {
+ super(config, context);
+ this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME);
+ this.proxyTypeId = context.supplyTypeId("proxy");
+ this.signaler = signaler;
+ this.streamFactory = streamFactory;
+ this.resolveSasl = resolveSasl;
+ this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]);
+ this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]);
+ this.decodePool = context.bufferPool();
+ this.encodePool = context.bufferPool();
+ this.supplyBinding = supplyBinding;
+ this.supplyDebitor = supplyDebitor;
+ this.responseTopics = new ArrayList<>();
+ }
+
+ @Override
+ public MessageConsumer newStream(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length,
+ MessageConsumer application)
+ {
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ final long originId = begin.originId();
+ final long routedId = begin.routedId();
+ final long initialId = begin.streamId();
+ final long affinity = begin.affinity();
+ final long authorization = begin.authorization();
+ final OctetsFW extension = begin.extension();
+ final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
+ final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ?
+ kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null;
+
+ assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_REQUEST;
+ final KafkaAlterConfigsRequestBeginExFW kafkaAlterConfigsBeginEx = kafkaBeginEx.request().alterConfigs();
+
+ MessageConsumer newStream = null;
+
+ final KafkaBindingConfig binding = supplyBinding.apply(routedId);
+ final KafkaRouteConfig resolved = binding != null
+ ? binding.resolve(authorization, null)
+ : null;
+
+ if (resolved != null)
+ {
+ final long resolvedId = resolved.id;
+ final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl());
+
+ List topics = new ArrayList<>();
+ kafkaAlterConfigsBeginEx.resources().forEach(t ->
+ {
+ KafkaResourceType type = t.type().get();
+ String name = t.name().asString();
+ List configs = new ArrayList<>();
+ t.configs().forEach(c -> configs.add(new ConfigInfo(c.name().asString(), c.value().asString())));
+
+ topics.add(new ResourceInfo(type, name, configs));
+ });
+ byte validateOnly = (byte) kafkaAlterConfigsBeginEx.validateOnly();
+
+ final AlterConfigsRequestInfo request = new AlterConfigsRequestInfo(topics, validateOnly);
+
+ newStream = new KafkaAlterConfigsStream(
+ application,
+ originId,
+ routedId,
+ initialId,
+ affinity,
+ resolvedId,
+ request,
+ binding.servers(),
+ sasl)::onApplication;
+ }
+
+ return newStream;
+ }
+
+ private MessageConsumer newStream(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long affinity,
+ Consumer extension)
+ {
+ final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .affinity(affinity)
+ .extension(extension)
+ .build();
+
+ final MessageConsumer receiver =
+ streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender);
+
+ receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof());
+
+ return receiver;
+ }
+
+ private void doBegin(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long affinity,
+ Consumer extension)
+ {
+ final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .affinity(affinity)
+ .extension(extension)
+ .build();
+
+ receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof());
+ }
+
+ private void doData(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer payload,
+ int offset,
+ int length,
+ Consumer extension)
+ {
+ final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .reserved(reserved)
+ .payload(payload, offset, length)
+ .extension(extension)
+ .build();
+
+ receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof());
+ }
+
+ private void doData(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer payload,
+ int offset,
+ int length,
+ Flyweight extension)
+ {
+ final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .reserved(reserved)
+ .payload(payload, offset, length)
+ .extension(extension.buffer(), extension.offset(), extension.sizeof())
+ .build();
+
+ receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof());
+ }
+
+ private void doEnd(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Consumer extension)
+ {
+ final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension)
+ .build();
+
+ receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof());
+ }
+
+ private void doAbort(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Consumer extension)
+ {
+ final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension)
+ .build();
+
+ receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof());
+ }
+
+ private void doWindow(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int padding)
+ {
+ final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .padding(padding)
+ .build();
+
+ sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof());
+ }
+
+ private void doReset(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Flyweight extension)
+ {
+ final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension.buffer(), extension.offset(), extension.sizeof())
+ .build();
+
+ sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof());
+ }
+
+ @FunctionalInterface
+ private interface KafkaAlterConfigsClientDecoder
+ {
+ int decode(
+ KafkaAlterConfigsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ MutableDirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit);
+ }
+
+ private int decodeAlterConfigsResponse(
+ KafkaAlterConfigsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ final int length = limit - progress;
+
+ decode:
+ if (length != 0)
+ {
+ final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit);
+ if (responseHeader == null)
+ {
+ break decode;
+ }
+
+ progress = responseHeader.limit();
+
+ final AlterConfigsResponseFW alterConfigsResponse = alterConfigsResponseRO.tryWrap(buffer, progress, limit);
+ if (alterConfigsResponse == null)
+ {
+ break decode;
+ }
+
+ progress = alterConfigsResponse.limit();
+
+ final int topicCount = alterConfigsResponse.resourceCount();
+ final int throttle = alterConfigsResponse.throttleTimeMillis();
+
+ responseTopics.clear();
+ for (int topicIndex = 0; topicIndex < topicCount; topicIndex++)
+ {
+ final ResourceResponseFW resource = resourceResponseRO.tryWrap(buffer, progress, limit);
+ if (resource == null)
+ {
+ client.decoder = decodeIgnoreAll;
+ break decode;
+ }
+
+ progress = resource.limit();
+
+ responseTopics.add(new AlterConfigsResponseInfo(
+ resource.error(),
+ resource.message().asString(),
+ KafkaResourceType.valueOf(resource.type()),
+ resource.name().asString()));
+ }
+
+ client.onDecodeAlterConfigsResponse(traceId, authorization, throttle, responseTopics);
+ }
+
+ return progress;
+ }
+
+ private int decodeReject(
+ KafkaAlterConfigsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ client.cleanupNetwork(traceId);
+ client.decoder = decodeIgnoreAll;
+ return limit;
+ }
+
+ private int decodeIgnoreAll(
+ KafkaAlterConfigsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ return limit;
+ }
+
+ private final class KafkaAlterConfigsStream
+ {
+ private final MessageConsumer application;
+ private final long originId;
+ private final long routedId;
+ private final long initialId;
+ private final long replyId;
+ private final long affinity;
+ private final KafkaAlterConfigsClient client;
+
+ private int state;
+
+ private long initialSeq;
+ private long initialAck;
+ private int initialMax;
+
+ private long replySeq;
+ private long replyAck;
+ private int replyMax;
+ private int replyPad;
+
+ private long replyBudgetId;
+
+ KafkaAlterConfigsStream(
+ MessageConsumer application,
+ long originId,
+ long routedId,
+ long initialId,
+ long affinity,
+ long resolvedId,
+ AlterConfigsRequestInfo request,
+ List servers,
+ KafkaSaslConfig sasl)
+ {
+ this.application = application;
+ this.originId = originId;
+ this.routedId = routedId;
+ this.initialId = initialId;
+ this.replyId = supplyReplyId.applyAsLong(initialId);
+ this.affinity = affinity;
+ this.client = new KafkaAlterConfigsClient(this, routedId, resolvedId, request, servers, sasl);
+ }
+
+ private void onApplication(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length)
+ {
+ switch (msgTypeId)
+ {
+ case BeginFW.TYPE_ID:
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ onApplicationBegin(begin);
+ break;
+ case DataFW.TYPE_ID:
+ final DataFW data = dataRO.wrap(buffer, index, index + length);
+ onApplicationData(data);
+ break;
+ case EndFW.TYPE_ID:
+ final EndFW end = endRO.wrap(buffer, index, index + length);
+ onApplicationEnd(end);
+ break;
+ case AbortFW.TYPE_ID:
+ final AbortFW abort = abortRO.wrap(buffer, index, index + length);
+ onApplicationAbort(abort);
+ break;
+ case WindowFW.TYPE_ID:
+ final WindowFW window = windowRO.wrap(buffer, index, index + length);
+ onApplicationWindow(window);
+ break;
+ case ResetFW.TYPE_ID:
+ final ResetFW reset = resetRO.wrap(buffer, index, index + length);
+ onApplicationReset(reset);
+ break;
+ default:
+ break;
+ }
+ }
+
+ private void onApplicationBegin(
+ BeginFW begin)
+ {
+ final long traceId = begin.traceId();
+ final long authorization = begin.authorization();
+
+ state = KafkaState.openingInitial(state);
+
+ client.doNetworkBegin(traceId, authorization, affinity);
+
+ doApplicationWindow(traceId, 0L, 0, 0, 0);
+ }
+
+ private void onApplicationData(
+ DataFW data)
+ {
+ final long traceId = data.traceId();
+
+ client.cleanupNetwork(traceId);
+ }
+
+ private void onApplicationEnd(
+ EndFW end)
+ {
+ final long traceId = end.traceId();
+ final long authorization = end.authorization();
+
+ state = KafkaState.closedInitial(state);
+
+ client.doNetworkEnd(traceId, authorization);
+ }
+
+ private void onApplicationAbort(
+ AbortFW abort)
+ {
+ final long traceId = abort.traceId();
+
+ state = KafkaState.closedInitial(state);
+
+ client.doNetworkAbort(traceId);
+ }
+
+ private void onApplicationWindow(
+ WindowFW window)
+ {
+ final long sequence = window.sequence();
+ final long acknowledge = window.acknowledge();
+ final int maximum = window.maximum();
+ final long budgetId = window.budgetId();
+ final int padding = window.padding();
+
+ assert acknowledge <= sequence;
+ assert sequence <= replySeq;
+ assert acknowledge >= replyAck;
+ assert maximum >= replyMax;
+
+ this.replyAck = acknowledge;
+ this.replyMax = maximum;
+ this.replyPad = padding;
+ this.replyBudgetId = budgetId;
+
+ assert replyAck <= replySeq;
+ }
+
+ private void onApplicationReset(
+ ResetFW reset)
+ {
+ final long traceId = reset.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ client.doNetworkReset(traceId);
+ }
+
+ private boolean isApplicationReplyOpen()
+ {
+ return KafkaState.replyOpening(state);
+ }
+
+ private void doApplicationBegin(
+ long traceId,
+ long authorization,
+ int throttle,
+ List topics)
+ {
+ if (!KafkaState.replyOpening(state))
+ {
+ state = KafkaState.openingReply(state);
+
+ doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, affinity,
+ ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l)
+ .typeId(kafkaTypeId)
+ .response(r -> r
+ .alterConfigs(
+ ct -> ct
+ .throttle(throttle)
+ .resources(t ->
+ topics.forEach(ts ->
+ t.item(i -> i
+ .error(ts.error)
+ .message(ts.message)
+ .type(rt -> rt.set(ts.type))
+ .name(ts.name))))))
+ .build()
+ .sizeof()));
+ }
+ }
+
+
+ private void doApplicationEnd(
+ long traceId)
+ {
+ state = KafkaState.closedReply(state);
+ doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, client.authorization, EMPTY_EXTENSION);
+ }
+
+ private void doApplicationAbort(
+ long traceId)
+ {
+ if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state))
+ {
+ state = KafkaState.closedReply(state);
+ doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, client.authorization, EMPTY_EXTENSION);
+ }
+ }
+
+ private void doApplicationWindow(
+ long traceId,
+ long budgetId,
+ int minInitialNoAck,
+ int minInitialPad,
+ int minInitialMax)
+ {
+ final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck);
+
+ if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state))
+ {
+ initialAck = newInitialAck;
+ assert initialAck <= initialSeq;
+
+ initialMax = minInitialMax;
+
+ state = KafkaState.openedInitial(state);
+
+ doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, client.authorization, budgetId, minInitialPad);
+ }
+ }
+
+ private void doApplicationReset(
+ long traceId,
+ Flyweight extension)
+ {
+ if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state))
+ {
+ state = KafkaState.closedInitial(state);
+
+ doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, client.authorization, extension);
+ }
+ }
+
+ private void cleanupApplication(
+ long traceId,
+ int error)
+ {
+ final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity())
+ .typeId(kafkaTypeId)
+ .error(error)
+ .build();
+
+ cleanupApplication(traceId, kafkaResetEx);
+ }
+
+ private void cleanupApplication(
+ long traceId,
+ Flyweight extension)
+ {
+ doApplicationReset(traceId, extension);
+ doApplicationAbort(traceId);
+ }
+ }
+
+ private final class KafkaAlterConfigsClient extends KafkaSaslClient
+ {
+ private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest;
+ private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest;
+ private final LongLongConsumer encodeAlterConfigsRequest = this::doEncodeAlterConfigsRequest;
+
+ private final KafkaAlterConfigsStream delegate;
+ private final AlterConfigsRequestInfo request;
+
+ private MessageConsumer network;
+ private int state;
+ private long authorization;
+
+ private long initialSeq;
+ private long initialAck;
+ private int initialMax;
+ private int initialMin;
+ private int initialPad;
+ private long initialBudgetId = NO_BUDGET_ID;
+ private long initialDebIndex = NO_DEBITOR_INDEX;
+
+ private long replySeq;
+ private long replyAck;
+ private int replyMax;
+
+ private int encodeSlot = NO_SLOT;
+ private int encodeSlotOffset;
+ private long encodeSlotTraceId;
+
+ private int decodeSlot = NO_SLOT;
+ private int decodeSlotOffset;
+ private int decodeSlotReserved;
+
+ private int nextResponseId;
+
+ private BudgetDebitor initialDeb;
+ private KafkaAlterConfigsClientDecoder decoder;
+ private LongLongConsumer encoder;
+
+ KafkaAlterConfigsClient(
+ KafkaAlterConfigsStream delegate,
+ long originId,
+ long routedId,
+ AlterConfigsRequestInfo request,
+ List servers,
+ KafkaSaslConfig sasl)
+ {
+ super(servers, sasl, originId, routedId);
+ this.delegate = delegate;
+ this.request = request;
+ this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeAlterConfigsRequest;
+
+ this.decoder = decodeReject;
+ }
+
+ private void onNetwork(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length)
+ {
+ switch (msgTypeId)
+ {
+ case BeginFW.TYPE_ID:
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ onNetworkBegin(begin);
+ break;
+ case DataFW.TYPE_ID:
+ final DataFW data = dataRO.wrap(buffer, index, index + length);
+ onNetworkData(data);
+ break;
+ case EndFW.TYPE_ID:
+ final EndFW end = endRO.wrap(buffer, index, index + length);
+ onNetworkEnd(end);
+ break;
+ case AbortFW.TYPE_ID:
+ final AbortFW abort = abortRO.wrap(buffer, index, index + length);
+ onNetworkAbort(abort);
+ break;
+ case ResetFW.TYPE_ID:
+ final ResetFW reset = resetRO.wrap(buffer, index, index + length);
+ onNetworkReset(reset);
+ break;
+ case WindowFW.TYPE_ID:
+ final WindowFW window = windowRO.wrap(buffer, index, index + length);
+ onNetworkWindow(window);
+ break;
+ case SignalFW.TYPE_ID:
+ final SignalFW signal = signalRO.wrap(buffer, index, index + length);
+ onNetworkSignal(signal);
+ break;
+ default:
+ break;
+ }
+ }
+
+ private void onNetworkBegin(
+ BeginFW begin)
+ {
+ final long traceId = begin.traceId();
+
+ authorization = begin.authorization();
+ state = KafkaState.openingReply(state);
+
+ doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity());
+ }
+
+ private void onNetworkData(
+ DataFW data)
+ {
+ final long sequence = data.sequence();
+ final long acknowledge = data.acknowledge();
+ final long traceId = data.traceId();
+ final long budgetId = data.budgetId();
+
+ assert acknowledge <= sequence;
+ assert sequence >= replySeq;
+
+ replySeq = sequence + data.reserved();
+ authorization = data.authorization();
+
+ assert replyAck <= replySeq;
+
+ if (replySeq > replyAck + replyMax)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ if (decodeSlot == NO_SLOT)
+ {
+ decodeSlot = decodePool.acquire(initialId);
+ }
+
+ if (decodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final OctetsFW payload = data.payload();
+ int reserved = data.reserved();
+ int offset = payload.offset();
+ int limit = payload.limit();
+
+ final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot);
+ buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset);
+ decodeSlotOffset += limit - offset;
+ decodeSlotReserved += reserved;
+
+ offset = 0;
+ limit = decodeSlotOffset;
+ reserved = decodeSlotReserved;
+
+ decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit);
+ }
+ }
+ }
+
+ private void onNetworkEnd(
+ EndFW end)
+ {
+ final long traceId = end.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ cleanupDecodeSlotIfNecessary();
+
+ if (!delegate.isApplicationReplyOpen())
+ {
+ cleanupNetwork(traceId);
+ }
+ else if (decodeSlot == NO_SLOT)
+ {
+ delegate.doApplicationEnd(traceId);
+ }
+ }
+
+ private void onNetworkAbort(
+ AbortFW abort)
+ {
+ final long traceId = abort.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ cleanupNetwork(traceId);
+ }
+
+ private void onNetworkReset(
+ ResetFW reset)
+ {
+ final long traceId = reset.traceId();
+
+ state = KafkaState.closedInitial(state);
+
+ cleanupNetwork(traceId);
+ }
+
+ private void onNetworkWindow(
+ WindowFW window)
+ {
+ final long sequence = window.sequence();
+ final long acknowledge = window.acknowledge();
+ final int minimum = window.minimum();
+ final int maximum = window.maximum();
+ final long traceId = window.traceId();
+ final long budgetId = window.budgetId();
+ final int padding = window.padding();
+
+ assert acknowledge <= sequence;
+ assert sequence <= initialSeq;
+ assert acknowledge >= initialAck;
+ assert maximum + acknowledge >= initialMax + initialAck;
+
+ this.initialAck = acknowledge;
+ this.initialMax = maximum;
+ this.initialPad = padding;
+ this.initialMin = minimum;
+ this.initialBudgetId = budgetId;
+
+ assert initialAck <= initialSeq;
+
+ this.authorization = window.authorization();
+
+ state = KafkaState.openedInitial(state);
+
+ if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX)
+ {
+ initialDeb = supplyDebitor.apply(initialBudgetId);
+ initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkData);
+ assert initialDebIndex != NO_DEBITOR_INDEX;
+ }
+
+ doNetworkData(budgetId);
+
+ doEncodeRequest(traceId, budgetId);
+ }
+
+ private void doNetworkData(
+ long traceId)
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot);
+ final int limit = encodeSlotOffset;
+
+ encodeNetwork(traceId, authorization, initialBudgetId, buffer, 0, limit);
+ }
+ }
+
+ private void onNetworkSignal(
+ SignalFW signal)
+ {
+ final long traceId = signal.traceId();
+ final int signalId = signal.signalId();
+
+ if (signalId == SIGNAL_NEXT_REQUEST)
+ {
+ doEncodeRequest(traceId, initialBudgetId);
+ }
+ }
+
+ private void doNetworkBegin(
+ long traceId,
+ long authorization,
+ long affinity)
+ {
+ state = KafkaState.openingInitial(state);
+
+ Consumer extension = EMPTY_EXTENSION;
+
+ if (server != null)
+ {
+ extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l)
+ .typeId(proxyTypeId)
+ .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM))
+ .source("0.0.0.0")
+ .destination(server.host)
+ .sourcePort(0)
+ .destinationPort(server.port)))
+ .infos(i -> i.item(ii -> ii.authority(server.host)))
+ .build()
+ .sizeof());
+ }
+
+ network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, affinity, extension);
+ }
+
+ @Override
+ protected void doNetworkData(
+ long traceId,
+ long budgetId,
+ DirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot);
+ encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset);
+ encodeSlotOffset += limit - offset;
+ encodeSlotTraceId = traceId;
+
+ buffer = encodeBuffer;
+ offset = 0;
+ limit = encodeSlotOffset;
+ }
+
+ encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit);
+ }
+
+ private void doNetworkEnd(
+ long traceId,
+ long authorization)
+ {
+ state = KafkaState.closedInitial(state);
+
+ cleanupEncodeSlotIfNecessary();
+ cleanupBudgetIfNecessary();
+
+ doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, EMPTY_EXTENSION);
+ }
+
+ private void doNetworkAbort(
+ long traceId)
+ {
+ if (!KafkaState.initialClosed(state))
+ {
+ doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, EMPTY_EXTENSION);
+ state = KafkaState.closedInitial(state);
+ }
+
+ cleanupEncodeSlotIfNecessary();
+ cleanupBudgetIfNecessary();
+ }
+
+ private void doNetworkReset(
+ long traceId)
+ {
+ if (!KafkaState.replyClosed(state))
+ {
+ doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, EMPTY_OCTETS);
+ state = KafkaState.closedReply(state);
+ }
+
+ cleanupDecodeSlotIfNecessary();
+ }
+
+ private void doNetworkWindow(
+ long traceId,
+ long budgetId,
+ int minReplyNoAck,
+ int minReplyPad,
+ int minReplyMax)
+ {
+ final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck);
+
+ if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state))
+ {
+ replyAck = newReplyAck;
+ assert replyAck <= replySeq;
+
+ replyMax = minReplyMax;
+
+ state = KafkaState.openedReply(state);
+
+ doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, budgetId, minReplyPad);
+ }
+ }
+
+ private void doEncodeRequest(
+ long traceId,
+ long budgetId)
+ {
+ if (nextRequestId == nextResponseId)
+ {
+ encoder.accept(traceId, budgetId);
+ }
+ }
+
+ private void doEncodeAlterConfigsRequest(
+ long traceId,
+ long budgetId)
+ {
+ final MutableDirectBuffer encodeBuffer = writeBuffer;
+ final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD;
+ final int encodeLimit = encodeBuffer.capacity();
+
+ int encodeProgress = encodeOffset;
+
+ final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .length(0)
+ .apiKey(ALTER_CONFIGS_API_KEY)
+ .apiVersion(ALTER_CONFIGS_API_VERSION)
+ .correlationId(0)
+ .clientId(clientId)
+ .build();
+
+ encodeProgress = requestHeader.limit();
+
+ final AlterConfigsRequestFW alterConfigsRequest =
+ alterConfigsRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .resourceCount(request.resources().size())
+ .build();
+
+ encodeProgress = alterConfigsRequest.limit();
+
+ for (ResourceInfo resource : request.resources)
+ {
+ final ResourceRequestFW resourceRequest = resourceRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .type(resource.type.value())
+ .name(resource.name)
+ .configCount(resource.configs.size())
+ .build();
+
+ encodeProgress = resourceRequest.limit();
+
+ for (ConfigInfo config : resource.configs)
+ {
+ ConfigRequestFW configRequest = configRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .name(config.name)
+ .value(config.value)
+ .build();
+
+ encodeProgress = configRequest.limit();
+ }
+ }
+
+ AlterConfigsRequestPart2FW alterConfigsRequestPart2 = alterConfigsRequestPart2RW
+ .wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .validate_only(request.validateOnly)
+ .build();
+
+ encodeProgress = alterConfigsRequestPart2.limit();
+
+ final int requestId = nextRequestId++;
+ final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY;
+
+ requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit())
+ .length(requestSize)
+ .apiKey(requestHeader.apiKey())
+ .apiVersion(requestHeader.apiVersion())
+ .correlationId(requestId)
+ .clientId(requestHeader.clientId())
+ .build();
+
+ doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress);
+
+ decoder = decodeAlterConfigsResponse;
+ }
+
+ private void encodeNetwork(
+ long traceId,
+ long authorization,
+ long budgetId,
+ DirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ final int length = limit - offset;
+ final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0);
+ final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin);
+
+ int reserved = reservedMax;
+
+ flush:
+ if (reserved > 0)
+ {
+
+ boolean claimed = false;
+
+ if (initialDebIndex != NO_DEBITOR_INDEX)
+ {
+ reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reserved, reserved, 0);
+ claimed = reserved > 0;
+ }
+
+ if (reserved < initialPad || reserved == initialPad && length > 0)
+ {
+ break flush;
+ }
+
+ doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION);
+
+ initialSeq += reserved;
+
+ assert initialAck <= initialSeq;
+ }
+
+ final int flushed = Math.max(reserved - initialPad, 0);
+ final int remaining = length - flushed;
+ if (remaining > 0)
+ {
+ if (encodeSlot == NO_SLOT)
+ {
+ encodeSlot = encodePool.acquire(initialId);
+ }
+
+ if (encodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot);
+ encodeBuffer.putBytes(0, buffer, offset + flushed, remaining);
+ encodeSlotOffset = remaining;
+ }
+ }
+ else
+ {
+ cleanupEncodeSlotIfNecessary();
+ }
+ }
+
+ private void decodeNetwork(
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ MutableDirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ KafkaAlterConfigsClientDecoder previous = null;
+ int progress = offset;
+ while (progress <= limit && previous != decoder)
+ {
+ previous = decoder;
+ progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit);
+ }
+
+ if (progress < limit)
+ {
+ if (decodeSlot == NO_SLOT)
+ {
+ decodeSlot = decodePool.acquire(initialId);
+ }
+
+ if (decodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot);
+ decodeBuffer.putBytes(0, buffer, progress, limit - progress);
+ decodeSlotOffset = limit - progress;
+ decodeSlotReserved = (limit - progress) * reserved / (limit - offset);
+ }
+
+ doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax);
+ }
+ else
+ {
+ cleanupDecodeSlotIfNecessary();
+
+ if (KafkaState.replyClosing(state))
+ {
+ delegate.doApplicationEnd(traceId);
+ }
+ else if (reserved > 0)
+ {
+ doNetworkWindow(traceId, budgetId, 0, 0, replyMax);
+ }
+ }
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeResponse(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeResponse;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshake(
+ long traceId)
+ {
+ decoder = decodeSaslHandshake;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeMechanisms(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeMechanisms;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeMechansim(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeMechanism;
+ }
+
+ @Override
+ protected void doDecodeSaslAuthenticateResponse(
+ long traceId)
+ {
+ decoder = decodeSaslAuthenticateResponse;
+ }
+
+ @Override
+ protected void doDecodeSaslAuthenticate(
+ long traceId)
+ {
+ decoder = decodeSaslAuthenticate;
+ }
+
+ @Override
+ protected void onDecodeSaslHandshakeResponse(
+ long traceId,
+ long authorization,
+ int errorCode)
+ {
+ switch (errorCode)
+ {
+ case ERROR_NONE:
+ encoder = encodeSaslAuthenticateRequest;
+ decoder = decodeSaslAuthenticateResponse;
+ break;
+ default:
+ delegate.cleanupApplication(traceId, errorCode);
+ doNetworkEnd(traceId, authorization);
+ break;
+ }
+ }
+
+ @Override
+ protected void onDecodeSaslAuthenticateResponse(
+ long traceId,
+ long authorization,
+ int errorCode)
+ {
+ switch (errorCode)
+ {
+ case ERROR_NONE:
+ encoder = encodeAlterConfigsRequest;
+ decoder = decodeAlterConfigsResponse;
+ break;
+ default:
+ delegate.cleanupApplication(traceId, errorCode);
+ doNetworkEnd(traceId, authorization);
+ break;
+ }
+ }
+
+ @Override
+ protected void onDecodeSaslResponse(
+ long traceId)
+ {
+ nextResponseId++;
+ signaler.signalNow(originId, routedId, initialId, traceId, SIGNAL_NEXT_REQUEST, 0);
+ }
+
+ private void onDecodeAlterConfigsResponse(
+ long traceId,
+ long authorization,
+ int throttle,
+ List topics)
+ {
+ delegate.doApplicationBegin(traceId, authorization, throttle, topics);
+ }
+
+ private void cleanupNetwork(
+ long traceId)
+ {
+ doNetworkReset(traceId);
+ doNetworkAbort(traceId);
+
+ delegate.cleanupApplication(traceId, ERROR_NONE);
+ }
+
+ private void cleanupDecodeSlotIfNecessary()
+ {
+ if (decodeSlot != NO_SLOT)
+ {
+ decodePool.release(decodeSlot);
+ decodeSlot = NO_SLOT;
+ decodeSlotOffset = 0;
+ decodeSlotReserved = 0;
+ }
+ }
+
+ private void cleanupEncodeSlotIfNecessary()
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ encodePool.release(encodeSlot);
+ encodeSlot = NO_SLOT;
+ encodeSlotOffset = 0;
+ encodeSlotTraceId = 0;
+ }
+ }
+
+ private void cleanupBudgetIfNecessary()
+ {
+ if (initialDebIndex != NO_DEBITOR_INDEX)
+ {
+ initialDeb.release(initialDebIndex, initialId);
+ initialDebIndex = NO_DEBITOR_INDEX;
+ }
+ }
+ }
+
+ private record AlterConfigsRequestInfo(
+ List resources,
+ byte validateOnly)
+ {
+ }
+
+ private record ResourceInfo(
+ KafkaResourceType type,
+ String name,
+ List configs)
+ {
+ }
+
+ private record ConfigInfo(
+ String name,
+ String value)
+ {
+ }
+
+ private record AlterConfigsResponseInfo(
+ short error,
+ String message,
+ KafkaResourceType type,
+ String name)
+ {
+ }
+}
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDeleteTopicsFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDeleteTopicsFactory.java
new file mode 100644
index 0000000000..1acc9f4d19
--- /dev/null
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientDeleteTopicsFactory.java
@@ -0,0 +1,1530 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.types.ProxyAddressProtocol.STREAM;
+import static io.aklivity.zilla.runtime.engine.budget.BudgetCreditor.NO_BUDGET_ID;
+import static io.aklivity.zilla.runtime.engine.budget.BudgetDebitor.NO_DEBITOR_INDEX;
+import static io.aklivity.zilla.runtime.engine.buffer.BufferPool.NO_SLOT;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Consumer;
+import java.util.function.LongFunction;
+import java.util.function.UnaryOperator;
+
+import org.agrona.DirectBuffer;
+import org.agrona.MutableDirectBuffer;
+import org.agrona.collections.LongLongConsumer;
+import org.agrona.concurrent.UnsafeBuffer;
+
+import io.aklivity.zilla.runtime.binding.kafka.config.KafkaSaslConfig;
+import io.aklivity.zilla.runtime.binding.kafka.config.KafkaServerConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaBinding;
+import io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfiguration;
+import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaRouteConfig;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.Flyweight;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.RequestHeaderFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.ResponseHeaderFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.delete_topics.DeleteTopicsRequestFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.delete_topics.DeleteTopicsRequestPart2FW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.delete_topics.DeleteTopicsResponseFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.delete_topics.TopicRequestFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.codec.delete_topics.TopicResponseFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.AbortFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.BeginFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.DataFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.EndFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ExtensionFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaDeleteTopicsRequestBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaResetExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ProxyBeginExFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.ResetFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.SignalFW;
+import io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.WindowFW;
+import io.aklivity.zilla.runtime.engine.EngineContext;
+import io.aklivity.zilla.runtime.engine.binding.BindingHandler;
+import io.aklivity.zilla.runtime.engine.binding.function.MessageConsumer;
+import io.aklivity.zilla.runtime.engine.budget.BudgetDebitor;
+import io.aklivity.zilla.runtime.engine.buffer.BufferPool;
+import io.aklivity.zilla.runtime.engine.concurrent.Signaler;
+
+public final class KafkaClientDeleteTopicsFactory extends KafkaClientSaslHandshaker implements BindingHandler
+{
+ private static final int ERROR_NONE = 0;
+ private static final int SIGNAL_NEXT_REQUEST = 1;
+
+ private static final DirectBuffer EMPTY_BUFFER = new UnsafeBuffer();
+ private static final OctetsFW EMPTY_OCTETS = new OctetsFW().wrap(EMPTY_BUFFER, 0, 0);
+ private static final Consumer EMPTY_EXTENSION = ex -> {};
+
+ private static final short DELETE_TOPICS_API_KEY = 20;
+ private static final short DELETE_TOPICS_API_VERSION = 3;
+
+ private final BeginFW beginRO = new BeginFW();
+ private final DataFW dataRO = new DataFW();
+ private final EndFW endRO = new EndFW();
+ private final AbortFW abortRO = new AbortFW();
+ private final ResetFW resetRO = new ResetFW();
+ private final WindowFW windowRO = new WindowFW();
+ private final SignalFW signalRO = new SignalFW();
+ private final ExtensionFW extensionRO = new ExtensionFW();
+ private final KafkaBeginExFW kafkaBeginExRO = new KafkaBeginExFW();
+
+ private final BeginFW.Builder beginRW = new BeginFW.Builder();
+ private final DataFW.Builder dataRW = new DataFW.Builder();
+ private final EndFW.Builder endRW = new EndFW.Builder();
+ private final AbortFW.Builder abortRW = new AbortFW.Builder();
+ private final ResetFW.Builder resetRW = new ResetFW.Builder();
+ private final WindowFW.Builder windowRW = new WindowFW.Builder();
+ private final ProxyBeginExFW.Builder proxyBeginExRW = new ProxyBeginExFW.Builder();
+ private final KafkaBeginExFW.Builder kafkaBeginExRW = new KafkaBeginExFW.Builder();
+ private final KafkaResetExFW.Builder kafkaResetExRW = new KafkaResetExFW.Builder();
+
+ private final RequestHeaderFW.Builder requestHeaderRW = new RequestHeaderFW.Builder();
+ private final DeleteTopicsRequestFW.Builder deleteTopicsRequestRW = new DeleteTopicsRequestFW.Builder();
+ private final TopicRequestFW.Builder topicRequestRW = new TopicRequestFW.Builder();
+ private final DeleteTopicsRequestPart2FW.Builder deleteTopicsRequestPart2RW = new DeleteTopicsRequestPart2FW.Builder();
+
+
+ private final ResponseHeaderFW responseHeaderRO = new ResponseHeaderFW();
+ private final DeleteTopicsResponseFW deleteTopicsResponseRO = new DeleteTopicsResponseFW();
+ private final TopicResponseFW topicResponseRO = new TopicResponseFW();
+
+ private final KafkaDeleteTopicsClientDecoder decodeSaslHandshakeResponse = this::decodeSaslHandshakeResponse;
+ private final KafkaDeleteTopicsClientDecoder decodeSaslHandshake = this::decodeSaslHandshake;
+ private final KafkaDeleteTopicsClientDecoder decodeSaslHandshakeMechanisms = this::decodeSaslHandshakeMechanisms;
+ private final KafkaDeleteTopicsClientDecoder decodeSaslHandshakeMechanism = this::decodeSaslHandshakeMechanism;
+ private final KafkaDeleteTopicsClientDecoder decodeSaslAuthenticateResponse = this::decodeSaslAuthenticateResponse;
+ private final KafkaDeleteTopicsClientDecoder decodeSaslAuthenticate = this::decodeSaslAuthenticate;
+ private final KafkaDeleteTopicsClientDecoder decodeDeleteTopicsResponse = this::decodeDeleteTopicsResponse;
+ private final KafkaDeleteTopicsClientDecoder decodeIgnoreAll = this::decodeIgnoreAll;
+ private final KafkaDeleteTopicsClientDecoder decodeReject = this::decodeReject;
+
+ private final int kafkaTypeId;
+ private final int proxyTypeId;
+ private final MutableDirectBuffer writeBuffer;
+ private final MutableDirectBuffer extBuffer;
+ private final BufferPool decodePool;
+ private final BufferPool encodePool;
+ private final Signaler signaler;
+ private final BindingHandler streamFactory;
+ private final UnaryOperator resolveSasl;
+ private final LongFunction supplyBinding;
+ private final LongFunction supplyDebitor;
+ private final List responseTopics;
+
+ public KafkaClientDeleteTopicsFactory(
+ KafkaConfiguration config,
+ EngineContext context,
+ LongFunction supplyBinding,
+ LongFunction supplyDebitor,
+ Signaler signaler,
+ BindingHandler streamFactory,
+ UnaryOperator resolveSasl)
+ {
+ super(config, context);
+ this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME);
+ this.proxyTypeId = context.supplyTypeId("proxy");
+ this.signaler = signaler;
+ this.streamFactory = streamFactory;
+ this.resolveSasl = resolveSasl;
+ this.writeBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]);
+ this.extBuffer = new UnsafeBuffer(new byte[context.writeBuffer().capacity()]);
+ this.decodePool = context.bufferPool();
+ this.encodePool = context.bufferPool();
+ this.supplyBinding = supplyBinding;
+ this.supplyDebitor = supplyDebitor;
+ this.responseTopics = new ArrayList<>();
+ }
+
+ @Override
+ public MessageConsumer newStream(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length,
+ MessageConsumer application)
+ {
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ final long originId = begin.originId();
+ final long routedId = begin.routedId();
+ final long initialId = begin.streamId();
+ final long affinity = begin.affinity();
+ final long authorization = begin.authorization();
+ final OctetsFW extension = begin.extension();
+ final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
+ final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ?
+ kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null;
+
+ assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_REQUEST;
+ final KafkaDeleteTopicsRequestBeginExFW kafkaDeleteTopicsBeginEx = kafkaBeginEx.request().deleteTopics();
+
+ MessageConsumer newStream = null;
+
+ final KafkaBindingConfig binding = supplyBinding.apply(routedId);
+ final KafkaRouteConfig resolved = binding != null
+ ? binding.resolve(authorization, null)
+ : null;
+
+ if (resolved != null)
+ {
+ final long resolvedId = resolved.id;
+ final KafkaSaslConfig sasl = resolveSasl.apply(binding.sasl());
+
+ List topics = new ArrayList<>();
+
+ kafkaDeleteTopicsBeginEx.names().forEach(t -> topics.add(t.asString()));
+ int timeout = kafkaDeleteTopicsBeginEx.timeout();
+
+ newStream = new KafkaDeleteTopicsStream(
+ application,
+ originId,
+ routedId,
+ initialId,
+ affinity,
+ resolvedId,
+ topics,
+ timeout,
+ binding.servers(),
+ sasl)::onApplication;
+ }
+
+ return newStream;
+ }
+
+ private MessageConsumer newStream(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long affinity,
+ Consumer extension)
+ {
+ final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .affinity(affinity)
+ .extension(extension)
+ .build();
+
+ final MessageConsumer receiver =
+ streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender);
+
+ receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof());
+
+ return receiver;
+ }
+
+ private void doBegin(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long affinity,
+ Consumer extension)
+ {
+ final BeginFW begin = beginRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .affinity(affinity)
+ .extension(extension)
+ .build();
+
+ receiver.accept(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof());
+ }
+
+ private void doData(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer payload,
+ int offset,
+ int length,
+ Consumer extension)
+ {
+ final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .reserved(reserved)
+ .payload(payload, offset, length)
+ .extension(extension)
+ .build();
+
+ receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof());
+ }
+
+ private void doData(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer payload,
+ int offset,
+ int length,
+ Flyweight extension)
+ {
+ final DataFW data = dataRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .reserved(reserved)
+ .payload(payload, offset, length)
+ .extension(extension.buffer(), extension.offset(), extension.sizeof())
+ .build();
+
+ receiver.accept(data.typeId(), data.buffer(), data.offset(), data.sizeof());
+ }
+
+ private void doEnd(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Consumer extension)
+ {
+ final EndFW end = endRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension)
+ .build();
+
+ receiver.accept(end.typeId(), end.buffer(), end.offset(), end.sizeof());
+ }
+
+ private void doAbort(
+ MessageConsumer receiver,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Consumer extension)
+ {
+ final AbortFW abort = abortRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension)
+ .build();
+
+ receiver.accept(abort.typeId(), abort.buffer(), abort.offset(), abort.sizeof());
+ }
+
+ private void doWindow(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int padding)
+ {
+ final WindowFW window = windowRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .budgetId(budgetId)
+ .padding(padding)
+ .build();
+
+ sender.accept(window.typeId(), window.buffer(), window.offset(), window.sizeof());
+ }
+
+ private void doReset(
+ MessageConsumer sender,
+ long originId,
+ long routedId,
+ long streamId,
+ long sequence,
+ long acknowledge,
+ int maximum,
+ long traceId,
+ long authorization,
+ Flyweight extension)
+ {
+ final ResetFW reset = resetRW.wrap(writeBuffer, 0, writeBuffer.capacity())
+ .originId(originId)
+ .routedId(routedId)
+ .streamId(streamId)
+ .sequence(sequence)
+ .acknowledge(acknowledge)
+ .maximum(maximum)
+ .traceId(traceId)
+ .authorization(authorization)
+ .extension(extension.buffer(), extension.offset(), extension.sizeof())
+ .build();
+
+ sender.accept(reset.typeId(), reset.buffer(), reset.offset(), reset.sizeof());
+ }
+
+ @FunctionalInterface
+ private interface KafkaDeleteTopicsClientDecoder
+ {
+ int decode(
+ KafkaDeleteTopicsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ MutableDirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit);
+ }
+
+ private int decodeDeleteTopicsResponse(
+ KafkaDeleteTopicsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ final int length = limit - progress;
+
+ decode:
+ if (length != 0)
+ {
+ final ResponseHeaderFW responseHeader = responseHeaderRO.tryWrap(buffer, progress, limit);
+ if (responseHeader == null)
+ {
+ break decode;
+ }
+
+ progress = responseHeader.limit();
+
+ final DeleteTopicsResponseFW deleteTopicsResponse = deleteTopicsResponseRO.tryWrap(buffer, progress, limit);
+ if (deleteTopicsResponse == null)
+ {
+ break decode;
+ }
+
+ progress = deleteTopicsResponse.limit();
+
+ final int topicCount = deleteTopicsResponse.topicCount();
+ final int throttle = deleteTopicsResponse.throttleTimeMillis();
+
+ responseTopics.clear();
+ for (int topicIndex = 0; topicIndex < topicCount; topicIndex++)
+ {
+ final TopicResponseFW topic = topicResponseRO.tryWrap(buffer, progress, limit);
+ if (topic == null)
+ {
+ client.decoder = decodeIgnoreAll;
+ break decode;
+ }
+
+ progress = topic.limit();
+
+ responseTopics.add(new DeleteTopicsResponseInfo(
+ topic.name().asString(), topic.error()));
+ }
+
+ client.onDecodeDeleteTopicsResponse(traceId, authorization, throttle, responseTopics);
+ }
+
+ return progress;
+ }
+
+ private int decodeReject(
+ KafkaDeleteTopicsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ client.cleanupNetwork(traceId);
+ client.decoder = decodeIgnoreAll;
+ return limit;
+ }
+
+ private int decodeIgnoreAll(
+ KafkaDeleteTopicsClient client,
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ DirectBuffer buffer,
+ int offset,
+ int progress,
+ int limit)
+ {
+ return limit;
+ }
+
+ private final class KafkaDeleteTopicsStream
+ {
+ private final MessageConsumer application;
+ private final long originId;
+ private final long routedId;
+ private final long initialId;
+ private final long replyId;
+ private final long affinity;
+ private final KafkaDeleteTopicsClient client;
+
+ private int state;
+
+ private long initialSeq;
+ private long initialAck;
+ private int initialMax;
+
+ private long replySeq;
+ private long replyAck;
+ private int replyMax;
+ private int replyPad;
+
+ private long replyBudgetId;
+
+ KafkaDeleteTopicsStream(
+ MessageConsumer application,
+ long originId,
+ long routedId,
+ long initialId,
+ long affinity,
+ long resolvedId,
+ List topics,
+ int timeout,
+ List servers,
+ KafkaSaslConfig sasl)
+ {
+ this.application = application;
+ this.originId = originId;
+ this.routedId = routedId;
+ this.initialId = initialId;
+ this.replyId = supplyReplyId.applyAsLong(initialId);
+ this.affinity = affinity;
+ this.client = new KafkaDeleteTopicsClient(this, routedId, resolvedId, topics, timeout, servers, sasl);
+ }
+
+ private void onApplication(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length)
+ {
+ switch (msgTypeId)
+ {
+ case BeginFW.TYPE_ID:
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ onApplicationBegin(begin);
+ break;
+ case DataFW.TYPE_ID:
+ final DataFW data = dataRO.wrap(buffer, index, index + length);
+ onApplicationData(data);
+ break;
+ case EndFW.TYPE_ID:
+ final EndFW end = endRO.wrap(buffer, index, index + length);
+ onApplicationEnd(end);
+ break;
+ case AbortFW.TYPE_ID:
+ final AbortFW abort = abortRO.wrap(buffer, index, index + length);
+ onApplicationAbort(abort);
+ break;
+ case WindowFW.TYPE_ID:
+ final WindowFW window = windowRO.wrap(buffer, index, index + length);
+ onApplicationWindow(window);
+ break;
+ case ResetFW.TYPE_ID:
+ final ResetFW reset = resetRO.wrap(buffer, index, index + length);
+ onApplicationReset(reset);
+ break;
+ default:
+ break;
+ }
+ }
+
+ private void onApplicationBegin(
+ BeginFW begin)
+ {
+ final long traceId = begin.traceId();
+ final long authorization = begin.authorization();
+
+ state = KafkaState.openingInitial(state);
+
+ client.doNetworkBegin(traceId, authorization, affinity);
+
+ doApplicationWindow(traceId, 0L, 0, 0, 0);
+ }
+
+ private void onApplicationData(
+ DataFW data)
+ {
+ final long traceId = data.traceId();
+
+ client.cleanupNetwork(traceId);
+ }
+
+ private void onApplicationEnd(
+ EndFW end)
+ {
+ final long traceId = end.traceId();
+ final long authorization = end.authorization();
+
+ state = KafkaState.closedInitial(state);
+
+ client.doNetworkEnd(traceId, authorization);
+ }
+
+ private void onApplicationAbort(
+ AbortFW abort)
+ {
+ final long traceId = abort.traceId();
+
+ state = KafkaState.closedInitial(state);
+
+ client.doNetworkAbort(traceId);
+ }
+
+ private void onApplicationWindow(
+ WindowFW window)
+ {
+ final long sequence = window.sequence();
+ final long acknowledge = window.acknowledge();
+ final int maximum = window.maximum();
+ final long budgetId = window.budgetId();
+ final int padding = window.padding();
+
+ assert acknowledge <= sequence;
+ assert sequence <= replySeq;
+ assert acknowledge >= replyAck;
+ assert maximum >= replyMax;
+
+ this.replyAck = acknowledge;
+ this.replyMax = maximum;
+ this.replyPad = padding;
+ this.replyBudgetId = budgetId;
+
+ assert replyAck <= replySeq;
+ }
+
+ private void onApplicationReset(
+ ResetFW reset)
+ {
+ final long traceId = reset.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ client.doNetworkReset(traceId);
+ }
+
+ private boolean isApplicationReplyOpen()
+ {
+ return KafkaState.replyOpening(state);
+ }
+
+ private void doApplicationBegin(
+ long traceId,
+ long authorization,
+ int throttle,
+ List topics)
+ {
+ if (!KafkaState.replyOpening(state))
+ {
+ state = KafkaState.openingReply(state);
+
+ doBegin(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, affinity,
+ ex -> ex.set((b, o, l) -> kafkaBeginExRW.wrap(b, o, l)
+ .typeId(kafkaTypeId)
+ .response(r -> r
+ .deleteTopics(
+ ct -> ct
+ .throttle(throttle)
+ .topics(t ->
+ topics.forEach(ts ->
+ t.item(i -> i
+ .name(ts.name)
+ .error(ts.error))))))
+ .build()
+ .sizeof()));
+ }
+ }
+
+
+ private void doApplicationEnd(
+ long traceId)
+ {
+ state = KafkaState.closedReply(state);
+ doEnd(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, client.authorization, EMPTY_EXTENSION);
+ }
+
+ private void doApplicationAbort(
+ long traceId)
+ {
+ if (KafkaState.replyOpening(state) && !KafkaState.replyClosed(state))
+ {
+ state = KafkaState.closedReply(state);
+ doAbort(application, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, client.authorization, EMPTY_EXTENSION);
+ }
+ }
+
+ private void doApplicationWindow(
+ long traceId,
+ long budgetId,
+ int minInitialNoAck,
+ int minInitialPad,
+ int minInitialMax)
+ {
+ final long newInitialAck = Math.max(initialSeq - minInitialNoAck, initialAck);
+
+ if (newInitialAck > initialAck || minInitialMax > initialMax || !KafkaState.initialOpened(state))
+ {
+ initialAck = newInitialAck;
+ assert initialAck <= initialSeq;
+
+ initialMax = minInitialMax;
+
+ state = KafkaState.openedInitial(state);
+
+ doWindow(application, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, client.authorization, budgetId, minInitialPad);
+ }
+ }
+
+ private void doApplicationReset(
+ long traceId,
+ Flyweight extension)
+ {
+ if (KafkaState.initialOpening(state) && !KafkaState.initialClosed(state))
+ {
+ state = KafkaState.closedInitial(state);
+
+ doReset(application, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, client.authorization, extension);
+ }
+ }
+
+ private void cleanupApplication(
+ long traceId,
+ int error)
+ {
+ final KafkaResetExFW kafkaResetEx = kafkaResetExRW.wrap(extBuffer, 0, extBuffer.capacity())
+ .typeId(kafkaTypeId)
+ .error(error)
+ .build();
+
+ cleanupApplication(traceId, kafkaResetEx);
+ }
+
+ private void cleanupApplication(
+ long traceId,
+ Flyweight extension)
+ {
+ doApplicationReset(traceId, extension);
+ doApplicationAbort(traceId);
+ }
+ }
+
+ private final class KafkaDeleteTopicsClient extends KafkaSaslClient
+ {
+ private final LongLongConsumer encodeSaslHandshakeRequest = this::doEncodeSaslHandshakeRequest;
+ private final LongLongConsumer encodeSaslAuthenticateRequest = this::doEncodeSaslAuthenticateRequest;
+ private final LongLongConsumer encodeDeleteTopicsRequest = this::doEncodeDeleteTopicsRequest;
+
+ private final KafkaDeleteTopicsStream delegate;
+ private final List topics;
+ private final int timeout;
+
+ private MessageConsumer network;
+ private int state;
+ private long authorization;
+
+ private long initialSeq;
+ private long initialAck;
+ private int initialMax;
+ private int initialMin;
+ private int initialPad;
+ private long initialBudgetId = NO_BUDGET_ID;
+ private long initialDebIndex = NO_DEBITOR_INDEX;
+
+ private long replySeq;
+ private long replyAck;
+ private int replyMax;
+
+ private int encodeSlot = NO_SLOT;
+ private int encodeSlotOffset;
+ private long encodeSlotTraceId;
+
+ private int decodeSlot = NO_SLOT;
+ private int decodeSlotOffset;
+ private int decodeSlotReserved;
+
+ private int nextResponseId;
+
+ private BudgetDebitor initialDeb;
+ private KafkaDeleteTopicsClientDecoder decoder;
+ private LongLongConsumer encoder;
+
+ KafkaDeleteTopicsClient(
+ KafkaDeleteTopicsStream delegate,
+ long originId,
+ long routedId,
+ List topics,
+ int timeout,
+ List servers,
+ KafkaSaslConfig sasl)
+ {
+ super(servers, sasl, originId, routedId);
+ this.delegate = delegate;
+ this.topics = topics;
+ this.timeout = timeout;
+ this.encoder = sasl != null ? encodeSaslHandshakeRequest : encodeDeleteTopicsRequest;
+
+ this.decoder = decodeReject;
+ }
+
+ private void onNetwork(
+ int msgTypeId,
+ DirectBuffer buffer,
+ int index,
+ int length)
+ {
+ switch (msgTypeId)
+ {
+ case BeginFW.TYPE_ID:
+ final BeginFW begin = beginRO.wrap(buffer, index, index + length);
+ onNetworkBegin(begin);
+ break;
+ case DataFW.TYPE_ID:
+ final DataFW data = dataRO.wrap(buffer, index, index + length);
+ onNetworkData(data);
+ break;
+ case EndFW.TYPE_ID:
+ final EndFW end = endRO.wrap(buffer, index, index + length);
+ onNetworkEnd(end);
+ break;
+ case AbortFW.TYPE_ID:
+ final AbortFW abort = abortRO.wrap(buffer, index, index + length);
+ onNetworkAbort(abort);
+ break;
+ case ResetFW.TYPE_ID:
+ final ResetFW reset = resetRO.wrap(buffer, index, index + length);
+ onNetworkReset(reset);
+ break;
+ case WindowFW.TYPE_ID:
+ final WindowFW window = windowRO.wrap(buffer, index, index + length);
+ onNetworkWindow(window);
+ break;
+ case SignalFW.TYPE_ID:
+ final SignalFW signal = signalRO.wrap(buffer, index, index + length);
+ onNetworkSignal(signal);
+ break;
+ default:
+ break;
+ }
+ }
+
+ private void onNetworkBegin(
+ BeginFW begin)
+ {
+ final long traceId = begin.traceId();
+
+ authorization = begin.authorization();
+ state = KafkaState.openingReply(state);
+
+ doNetworkWindow(traceId, 0L, 0, 0, decodePool.slotCapacity());
+ }
+
+ private void onNetworkData(
+ DataFW data)
+ {
+ final long sequence = data.sequence();
+ final long acknowledge = data.acknowledge();
+ final long traceId = data.traceId();
+ final long budgetId = data.budgetId();
+
+ assert acknowledge <= sequence;
+ assert sequence >= replySeq;
+
+ replySeq = sequence + data.reserved();
+ authorization = data.authorization();
+
+ assert replyAck <= replySeq;
+
+ if (replySeq > replyAck + replyMax)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ if (decodeSlot == NO_SLOT)
+ {
+ decodeSlot = decodePool.acquire(initialId);
+ }
+
+ if (decodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final OctetsFW payload = data.payload();
+ int reserved = data.reserved();
+ int offset = payload.offset();
+ int limit = payload.limit();
+
+ final MutableDirectBuffer buffer = decodePool.buffer(decodeSlot);
+ buffer.putBytes(decodeSlotOffset, payload.buffer(), offset, limit - offset);
+ decodeSlotOffset += limit - offset;
+ decodeSlotReserved += reserved;
+
+ offset = 0;
+ limit = decodeSlotOffset;
+ reserved = decodeSlotReserved;
+
+ decodeNetwork(traceId, authorization, budgetId, reserved, buffer, offset, limit);
+ }
+ }
+ }
+
+ private void onNetworkEnd(
+ EndFW end)
+ {
+ final long traceId = end.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ cleanupDecodeSlotIfNecessary();
+
+ if (!delegate.isApplicationReplyOpen())
+ {
+ cleanupNetwork(traceId);
+ }
+ else if (decodeSlot == NO_SLOT)
+ {
+ delegate.doApplicationEnd(traceId);
+ }
+ }
+
+ private void onNetworkAbort(
+ AbortFW abort)
+ {
+ final long traceId = abort.traceId();
+
+ state = KafkaState.closedReply(state);
+
+ cleanupNetwork(traceId);
+ }
+
+ private void onNetworkReset(
+ ResetFW reset)
+ {
+ final long traceId = reset.traceId();
+
+ state = KafkaState.closedInitial(state);
+
+ cleanupNetwork(traceId);
+ }
+
+ private void onNetworkWindow(
+ WindowFW window)
+ {
+ final long sequence = window.sequence();
+ final long acknowledge = window.acknowledge();
+ final int minimum = window.minimum();
+ final int maximum = window.maximum();
+ final long traceId = window.traceId();
+ final long budgetId = window.budgetId();
+ final int padding = window.padding();
+
+ assert acknowledge <= sequence;
+ assert sequence <= initialSeq;
+ assert acknowledge >= initialAck;
+ assert maximum + acknowledge >= initialMax + initialAck;
+
+ this.initialAck = acknowledge;
+ this.initialMax = maximum;
+ this.initialPad = padding;
+ this.initialMin = minimum;
+ this.initialBudgetId = budgetId;
+
+ assert initialAck <= initialSeq;
+
+ this.authorization = window.authorization();
+
+ state = KafkaState.openedInitial(state);
+
+ if (initialBudgetId != NO_BUDGET_ID && initialDebIndex == NO_DEBITOR_INDEX)
+ {
+ initialDeb = supplyDebitor.apply(initialBudgetId);
+ initialDebIndex = initialDeb.acquire(initialBudgetId, initialId, this::doNetworkData);
+ assert initialDebIndex != NO_DEBITOR_INDEX;
+ }
+
+ doNetworkData(budgetId);
+
+ doEncodeRequest(traceId, budgetId);
+ }
+
+ private void doNetworkData(
+ long traceId)
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ final MutableDirectBuffer buffer = encodePool.buffer(encodeSlot);
+ final int limit = encodeSlotOffset;
+
+ encodeNetwork(traceId, authorization, initialBudgetId, buffer, 0, limit);
+ }
+ }
+
+ private void onNetworkSignal(
+ SignalFW signal)
+ {
+ final long traceId = signal.traceId();
+ final int signalId = signal.signalId();
+
+ if (signalId == SIGNAL_NEXT_REQUEST)
+ {
+ doEncodeRequest(traceId, initialBudgetId);
+ }
+ }
+
+ private void doNetworkBegin(
+ long traceId,
+ long authorization,
+ long affinity)
+ {
+ state = KafkaState.openingInitial(state);
+
+ Consumer extension = EMPTY_EXTENSION;
+
+ if (server != null)
+ {
+ extension = e -> e.set((b, o, l) -> proxyBeginExRW.wrap(b, o, l)
+ .typeId(proxyTypeId)
+ .address(a -> a.inet(i -> i.protocol(p -> p.set(STREAM))
+ .source("0.0.0.0")
+ .destination(server.host)
+ .sourcePort(0)
+ .destinationPort(server.port)))
+ .infos(i -> i.item(ii -> ii.authority(server.host)))
+ .build()
+ .sizeof());
+ }
+
+ network = newStream(this::onNetwork, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, affinity, extension);
+ }
+
+ @Override
+ protected void doNetworkData(
+ long traceId,
+ long budgetId,
+ DirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot);
+ encodeBuffer.putBytes(encodeSlotOffset, buffer, offset, limit - offset);
+ encodeSlotOffset += limit - offset;
+ encodeSlotTraceId = traceId;
+
+ buffer = encodeBuffer;
+ offset = 0;
+ limit = encodeSlotOffset;
+ }
+
+ encodeNetwork(traceId, authorization, budgetId, buffer, offset, limit);
+ }
+
+ private void doNetworkEnd(
+ long traceId,
+ long authorization)
+ {
+ state = KafkaState.closedInitial(state);
+
+ cleanupEncodeSlotIfNecessary();
+ cleanupBudgetIfNecessary();
+
+ doEnd(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, EMPTY_EXTENSION);
+ }
+
+ private void doNetworkAbort(
+ long traceId)
+ {
+ if (!KafkaState.initialClosed(state))
+ {
+ doAbort(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, EMPTY_EXTENSION);
+ state = KafkaState.closedInitial(state);
+ }
+
+ cleanupEncodeSlotIfNecessary();
+ cleanupBudgetIfNecessary();
+ }
+
+ private void doNetworkReset(
+ long traceId)
+ {
+ if (!KafkaState.replyClosed(state))
+ {
+ doReset(network, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, EMPTY_OCTETS);
+ state = KafkaState.closedReply(state);
+ }
+
+ cleanupDecodeSlotIfNecessary();
+ }
+
+ private void doNetworkWindow(
+ long traceId,
+ long budgetId,
+ int minReplyNoAck,
+ int minReplyPad,
+ int minReplyMax)
+ {
+ final long newReplyAck = Math.max(replySeq - minReplyNoAck, replyAck);
+
+ if (newReplyAck > replyAck || minReplyMax > replyMax || !KafkaState.replyOpened(state))
+ {
+ replyAck = newReplyAck;
+ assert replyAck <= replySeq;
+
+ replyMax = minReplyMax;
+
+ state = KafkaState.openedReply(state);
+
+ doWindow(network, originId, routedId, replyId, replySeq, replyAck, replyMax,
+ traceId, authorization, budgetId, minReplyPad);
+ }
+ }
+
+ private void doEncodeRequest(
+ long traceId,
+ long budgetId)
+ {
+ if (nextRequestId == nextResponseId)
+ {
+ encoder.accept(traceId, budgetId);
+ }
+ }
+
+ private void doEncodeDeleteTopicsRequest(
+ long traceId,
+ long budgetId)
+ {
+ final MutableDirectBuffer encodeBuffer = writeBuffer;
+ final int encodeOffset = DataFW.FIELD_OFFSET_PAYLOAD;
+ final int encodeLimit = encodeBuffer.capacity();
+
+ int encodeProgress = encodeOffset;
+
+ final RequestHeaderFW requestHeader = requestHeaderRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .length(0)
+ .apiKey(DELETE_TOPICS_API_KEY)
+ .apiVersion(DELETE_TOPICS_API_VERSION)
+ .correlationId(0)
+ .clientId(clientId)
+ .build();
+
+ encodeProgress = requestHeader.limit();
+
+ final DeleteTopicsRequestFW deleteTopicsRequest =
+ deleteTopicsRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .topicCount(topics.size())
+ .build();
+
+ encodeProgress = deleteTopicsRequest.limit();
+
+ for (String topic : topics)
+ {
+ final TopicRequestFW topicRequest = topicRequestRW.wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .topic(topic)
+ .build();
+
+ encodeProgress = topicRequest.limit();
+ }
+
+ DeleteTopicsRequestPart2FW deleteTopicsRequestPart2 = deleteTopicsRequestPart2RW
+ .wrap(encodeBuffer, encodeProgress, encodeLimit)
+ .timeout(timeout)
+ .build();
+
+ encodeProgress = deleteTopicsRequestPart2.limit();
+
+ final int requestId = nextRequestId++;
+ final int requestSize = encodeProgress - encodeOffset - RequestHeaderFW.FIELD_OFFSET_API_KEY;
+
+ requestHeaderRW.wrap(encodeBuffer, requestHeader.offset(), requestHeader.limit())
+ .length(requestSize)
+ .apiKey(requestHeader.apiKey())
+ .apiVersion(requestHeader.apiVersion())
+ .correlationId(requestId)
+ .clientId(requestHeader.clientId())
+ .build();
+
+ doNetworkData(traceId, budgetId, encodeBuffer, encodeOffset, encodeProgress);
+
+ decoder = decodeDeleteTopicsResponse;
+ }
+
+ private void encodeNetwork(
+ long traceId,
+ long authorization,
+ long budgetId,
+ DirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ final int length = limit - offset;
+ final int initialBudget = Math.max(initialMax - (int)(initialSeq - initialAck), 0);
+ final int reservedMax = Math.max(Math.min(length + initialPad, initialBudget), initialMin);
+
+ int reserved = reservedMax;
+
+ flush:
+ if (reserved > 0)
+ {
+
+ boolean claimed = false;
+
+ if (initialDebIndex != NO_DEBITOR_INDEX)
+ {
+ reserved = initialDeb.claim(traceId, initialDebIndex, initialId, reserved, reserved, 0);
+ claimed = reserved > 0;
+ }
+
+ if (reserved < initialPad || reserved == initialPad && length > 0)
+ {
+ break flush;
+ }
+
+ doData(network, originId, routedId, initialId, initialSeq, initialAck, initialMax,
+ traceId, authorization, budgetId, reserved, buffer, offset, length, EMPTY_EXTENSION);
+
+ initialSeq += reserved;
+
+ assert initialAck <= initialSeq;
+ }
+
+ final int flushed = Math.max(reserved - initialPad, 0);
+ final int remaining = length - flushed;
+ if (remaining > 0)
+ {
+ if (encodeSlot == NO_SLOT)
+ {
+ encodeSlot = encodePool.acquire(initialId);
+ }
+
+ if (encodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final MutableDirectBuffer encodeBuffer = encodePool.buffer(encodeSlot);
+ encodeBuffer.putBytes(0, buffer, offset + flushed, remaining);
+ encodeSlotOffset = remaining;
+ }
+ }
+ else
+ {
+ cleanupEncodeSlotIfNecessary();
+ }
+ }
+
+ private void decodeNetwork(
+ long traceId,
+ long authorization,
+ long budgetId,
+ int reserved,
+ MutableDirectBuffer buffer,
+ int offset,
+ int limit)
+ {
+ KafkaDeleteTopicsClientDecoder previous = null;
+ int progress = offset;
+ while (progress <= limit && previous != decoder)
+ {
+ previous = decoder;
+ progress = decoder.decode(this, traceId, authorization, budgetId, reserved, buffer, offset, progress, limit);
+ }
+
+ if (progress < limit)
+ {
+ if (decodeSlot == NO_SLOT)
+ {
+ decodeSlot = decodePool.acquire(initialId);
+ }
+
+ if (decodeSlot == NO_SLOT)
+ {
+ cleanupNetwork(traceId);
+ }
+ else
+ {
+ final MutableDirectBuffer decodeBuffer = decodePool.buffer(decodeSlot);
+ decodeBuffer.putBytes(0, buffer, progress, limit - progress);
+ decodeSlotOffset = limit - progress;
+ decodeSlotReserved = (limit - progress) * reserved / (limit - offset);
+ }
+
+ doNetworkWindow(traceId, budgetId, decodeSlotOffset, 0, replyMax);
+ }
+ else
+ {
+ cleanupDecodeSlotIfNecessary();
+
+ if (KafkaState.replyClosing(state))
+ {
+ delegate.doApplicationEnd(traceId);
+ }
+ else if (reserved > 0)
+ {
+ doNetworkWindow(traceId, budgetId, 0, 0, replyMax);
+ }
+ }
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeResponse(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeResponse;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshake(
+ long traceId)
+ {
+ decoder = decodeSaslHandshake;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeMechanisms(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeMechanisms;
+ }
+
+ @Override
+ protected void doDecodeSaslHandshakeMechansim(
+ long traceId)
+ {
+ decoder = decodeSaslHandshakeMechanism;
+ }
+
+ @Override
+ protected void doDecodeSaslAuthenticateResponse(
+ long traceId)
+ {
+ decoder = decodeSaslAuthenticateResponse;
+ }
+
+ @Override
+ protected void doDecodeSaslAuthenticate(
+ long traceId)
+ {
+ decoder = decodeSaslAuthenticate;
+ }
+
+ @Override
+ protected void onDecodeSaslHandshakeResponse(
+ long traceId,
+ long authorization,
+ int errorCode)
+ {
+ switch (errorCode)
+ {
+ case ERROR_NONE:
+ encoder = encodeSaslAuthenticateRequest;
+ decoder = decodeSaslAuthenticateResponse;
+ break;
+ default:
+ delegate.cleanupApplication(traceId, errorCode);
+ doNetworkEnd(traceId, authorization);
+ break;
+ }
+ }
+
+ @Override
+ protected void onDecodeSaslAuthenticateResponse(
+ long traceId,
+ long authorization,
+ int errorCode)
+ {
+ switch (errorCode)
+ {
+ case ERROR_NONE:
+ encoder = encodeDeleteTopicsRequest;
+ decoder = decodeDeleteTopicsResponse;
+ break;
+ default:
+ delegate.cleanupApplication(traceId, errorCode);
+ doNetworkEnd(traceId, authorization);
+ break;
+ }
+ }
+
+ @Override
+ protected void onDecodeSaslResponse(
+ long traceId)
+ {
+ nextResponseId++;
+ signaler.signalNow(originId, routedId, initialId, traceId, SIGNAL_NEXT_REQUEST, 0);
+ }
+
+ private void onDecodeDeleteTopicsResponse(
+ long traceId,
+ long authorization,
+ int throttle,
+ List topics)
+ {
+ delegate.doApplicationBegin(traceId, authorization, throttle, topics);
+ }
+
+ private void cleanupNetwork(
+ long traceId)
+ {
+ doNetworkReset(traceId);
+ doNetworkAbort(traceId);
+
+ delegate.cleanupApplication(traceId, ERROR_NONE);
+ }
+
+ private void cleanupDecodeSlotIfNecessary()
+ {
+ if (decodeSlot != NO_SLOT)
+ {
+ decodePool.release(decodeSlot);
+ decodeSlot = NO_SLOT;
+ decodeSlotOffset = 0;
+ decodeSlotReserved = 0;
+ }
+ }
+
+ private void cleanupEncodeSlotIfNecessary()
+ {
+ if (encodeSlot != NO_SLOT)
+ {
+ encodePool.release(encodeSlot);
+ encodeSlot = NO_SLOT;
+ encodeSlotOffset = 0;
+ encodeSlotTraceId = 0;
+ }
+ }
+
+ private void cleanupBudgetIfNecessary()
+ {
+ if (initialDebIndex != NO_DEBITOR_INDEX)
+ {
+ initialDeb.release(initialDebIndex, initialId);
+ initialDebIndex = NO_DEBITOR_INDEX;
+ }
+ }
+ }
+
+ private record DeleteTopicsResponseInfo(
+ String name,
+ short error)
+ {
+ }
+}
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
index c0c713d083..087fef2f4c 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientProduceFactory.java
@@ -1017,8 +1017,7 @@ private void onApplicationData(
assert initialAck <= initialSeq;
- if (initialSeq > initialAck + initialMax ||
- extension.sizeof() > produceRecordFramingSize)
+ if (initialSeq > initialAck + initialMax)
{
cleanupApplication(traceId, EMPTY_OCTETS);
client.cleanupNetwork(traceId);
diff --git a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRequestFactory.java b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRequestFactory.java
index 67e585d9fe..a0ab2ff3c7 100644
--- a/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRequestFactory.java
+++ b/runtime/binding-kafka/src/main/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/KafkaClientRequestFactory.java
@@ -55,14 +55,20 @@ public KafkaClientRequestFactory(
BindingHandler streamFactory,
UnaryOperator resolveSasl)
{
+ final KafkaClientDescribeClusterFactory clientDescribeClusterFactory = new KafkaClientDescribeClusterFactory(
+ config, context, supplyBinding, supplyDebitor, signaler, streamFactory, resolveSasl);
final KafkaClientCreateTopicsFactory clientCreateTopicsFactory = new KafkaClientCreateTopicsFactory(
config, context, supplyBinding, supplyDebitor, signaler, streamFactory, resolveSasl);
- final KafkaClientDescribeClusterFactory clientDescribeClusterFactory = new KafkaClientDescribeClusterFactory(
+ final KafkaClientDeleteTopicsFactory clientDeleteTopicsFactory = new KafkaClientDeleteTopicsFactory(
+ config, context, supplyBinding, supplyDebitor, signaler, streamFactory, resolveSasl);
+ final KafkaClientAlterConfigsFactory clientAlterConfigsFactory = new KafkaClientAlterConfigsFactory(
config, context, supplyBinding, supplyDebitor, signaler, streamFactory, resolveSasl);
final Int2ObjectHashMap factories = new Int2ObjectHashMap<>();
- factories.put(KafkaApi.CREATE_TOPICS.value(), clientCreateTopicsFactory);
factories.put(KafkaApi.DESCRIBE_CLUSTER.value(), clientDescribeClusterFactory);
+ factories.put(KafkaApi.CREATE_TOPICS.value(), clientCreateTopicsFactory);
+ factories.put(KafkaApi.DELETE_TOPICS.value(), clientDeleteTopicsFactory);
+ factories.put(KafkaApi.ALTER_CONFIGS.value(), clientAlterConfigsFactory);
this.kafkaTypeId = context.supplyTypeId(KafkaBinding.NAME);
this.factories = factories;
diff --git a/runtime/binding-kafka/src/main/zilla/protocol.idl b/runtime/binding-kafka/src/main/zilla/protocol.idl
index a3a8fce48f..4c7f773124 100644
--- a/runtime/binding-kafka/src/main/zilla/protocol.idl
+++ b/runtime/binding-kafka/src/main/zilla/protocol.idl
@@ -680,6 +680,78 @@ scope protocol
}
}
+ scope delete_topics
+ {
+ struct DeleteTopicsRequest // v3
+ {
+ int32 topicCount;
+ }
+
+ struct TopicRequest // v3
+ {
+ string16 topic;
+ }
+
+ struct DeleteTopicsRequestPart2
+ {
+ int32 timeout;
+ }
+
+ struct DeleteTopicsResponse
+ {
+ int32 correlationId;
+ int32 throttleTimeMillis;
+ int32 topicCount;
+ }
+
+ struct TopicResponse
+ {
+ string16 name;
+ int16 error;
+ }
+ }
+
+ scope alter_configs
+ {
+ struct AlterConfigsRequest // v1
+ {
+ int32 resourceCount;
+ }
+
+ struct ResourceRequest
+ {
+ uint8 type;
+ string16 name;
+ int32 configCount;
+ }
+
+ struct ConfigRequest
+ {
+ string16 name;
+ string16 value = null;
+ }
+
+ struct AlterConfigsRequestPart2
+ {
+ uint8 validate_only;
+ }
+
+ struct AlterConfigsResponse
+ {
+ int32 correlationId;
+ int32 throttleTimeMillis;
+ int32 resourceCount;
+ }
+
+ struct ResourceResponse
+ {
+ int16 error;
+ string16 message = null;
+ uint8 type;
+ string16 name;
+ }
+ }
+
scope sasl
{
struct SaslHandshakeRequest // v1
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsIT.java
new file mode 100644
index 0000000000..ad778caa38
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsIT.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+
+public class AlterConfigsIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.yaml")
+ @Specification({
+ "${app}/alter.topics.config/client",
+ "${net}/alter.topics.config/server"})
+ public void shouldAlterTopicConfigs() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsSaslIT.java
new file mode 100644
index 0000000000..3e707a63cf
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/AlterConfigsSaslIT.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfigurationTest.KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configure;
+
+public class AlterConfigsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.options.sasl.plain.yaml")
+ @Specification({
+ "${app}/alter.topics.config/client",
+ "${net}/alter.topics.config.sasl.plain/server"})
+ public void shouldAlterTopicConfigsWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("client.options.sasl.scram.yaml")
+ @Specification({
+ "${app}/alter.topics.config/client",
+ "${net}/alter.topics.config.sasl.scram/server"})
+ @Configure(name = KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME,
+ value = "io.aklivity.zilla.runtime.binding.kafka.internal.stream.AlterConfigsSaslIT::supplyNonce")
+ public void shouldAlterTopicConfigsWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+
+ public static String supplyNonce()
+ {
+ return "fyko+d2lbbFgONRv9qkxdawL";
+ }
+}
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CreateTopicsSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CreateTopicsSaslIT.java
new file mode 100644
index 0000000000..7ecfc258d0
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/CreateTopicsSaslIT.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfigurationTest.KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configure;
+
+public class CreateTopicsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/create.topics")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.options.sasl.plain.yaml")
+ @Specification({
+ "${app}/create.topics/client",
+ "${net}/create.topics.sasl.plain/server"})
+ public void shouldCreateTopicsWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("client.options.sasl.scram.yaml")
+ @Specification({
+ "${app}/create.topics/client",
+ "${net}/create.topics.sasl.scram/server"})
+ @Configure(name = KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME,
+ value = "io.aklivity.zilla.runtime.binding.kafka.internal.stream.CreateTopicsSaslIT::supplyNonce")
+ public void shouldCreateTopicsWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+
+ public static String supplyNonce()
+ {
+ return "fyko+d2lbbFgONRv9qkxdawL";
+ }
+}
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsIT.java
new file mode 100644
index 0000000000..e2ca8408a5
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsIT.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+
+public class DeleteTopicsIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.yaml")
+ @Specification({
+ "${app}/delete.topics/client",
+ "${net}/delete.topics/server"})
+ public void shouldDeleteTopics() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsSaslIT.java
new file mode 100644
index 0000000000..6c472fcbdf
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DeleteTopicsSaslIT.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfigurationTest.KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configure;
+
+public class DeleteTopicsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.options.sasl.plain.yaml")
+ @Specification({
+ "${app}/delete.topics/client",
+ "${net}/delete.topics.sasl.plain/server"})
+ public void shouldDeleteTopicsWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("client.options.sasl.scram.yaml")
+ @Specification({
+ "${app}/delete.topics/client",
+ "${net}/delete.topics.sasl.scram/server"})
+ @Configure(name = KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME,
+ value = "io.aklivity.zilla.runtime.binding.kafka.internal.stream.DeleteTopicsSaslIT::supplyNonce")
+ public void shouldDeleteTopicsWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+
+ public static String supplyNonce()
+ {
+ return "fyko+d2lbbFgONRv9qkxdawL";
+ }
+}
diff --git a/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DescribeClusterSaslIT.java b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DescribeClusterSaslIT.java
new file mode 100644
index 0000000000..714e683b0f
--- /dev/null
+++ b/runtime/binding-kafka/src/test/java/io/aklivity/zilla/runtime/binding/kafka/internal/stream/DescribeClusterSaslIT.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.runtime.binding.kafka.internal.stream;
+
+import static io.aklivity.zilla.runtime.binding.kafka.internal.KafkaConfigurationTest.KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+import io.aklivity.zilla.runtime.engine.test.EngineRule;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configuration;
+import io.aklivity.zilla.runtime.engine.test.annotation.Configure;
+
+public class DescribeClusterSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("app", "io/aklivity/zilla/specs/binding/kafka/streams/application/describe.cluster")
+ .addScriptRoot("net", "io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ private final EngineRule engine = new EngineRule()
+ .directory("target/zilla-itests")
+ .countersBufferCapacity(8192)
+ .configurationRoot("io/aklivity/zilla/specs/binding/kafka/config")
+ .external("net0")
+ .clean();
+
+ @Rule
+ public final TestRule chain = outerRule(engine).around(k3po).around(timeout);
+
+ @Test
+ @Configuration("client.options.sasl.plain.yaml")
+ @Specification({
+ "${app}/cluster.brokers.info/client",
+ "${net}/cluster.brokers.info.sasl.plain/server"})
+ public void shouldDescribeClusterBrokerInfoWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Configuration("client.options.sasl.scram.yaml")
+ @Specification({
+ "${app}/cluster.brokers.info/client",
+ "${net}/cluster.brokers.info.sasl.scram/server"})
+ @Configure(name = KAFKA_CLIENT_SASL_SCRAM_NONCE_NAME,
+ value = "io.aklivity.zilla.runtime.binding.kafka.internal.stream.DescribeClusterSaslIT::supplyNonce")
+ public void shouldDescribeClusterBrokerInfoWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+
+ public static String supplyNonce()
+ {
+ return "fyko+d2lbbFgONRv9qkxdawL";
+ }
+}
diff --git a/runtime/binding-mqtt-kafka/pom.xml b/runtime/binding-mqtt-kafka/pom.xml
index a626b86bef..e5049ce30d 100644
--- a/runtime/binding-mqtt-kafka/pom.xml
+++ b/runtime/binding-mqtt-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-mqtt/pom.xml b/runtime/binding-mqtt/pom.xml
index d41b11af78..2eb850add0 100644
--- a/runtime/binding-mqtt/pom.xml
+++ b/runtime/binding-mqtt/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-openapi-asyncapi/pom.xml b/runtime/binding-openapi-asyncapi/pom.xml
index 579494d496..5bf7591394 100644
--- a/runtime/binding-openapi-asyncapi/pom.xml
+++ b/runtime/binding-openapi-asyncapi/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-openapi/pom.xml b/runtime/binding-openapi/pom.xml
index 4ffdcf1fe9..d58ab3f09f 100644
--- a/runtime/binding-openapi/pom.xml
+++ b/runtime/binding-openapi/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-proxy/pom.xml b/runtime/binding-proxy/pom.xml
index 0c1b46a7db..c9bdb407c2 100644
--- a/runtime/binding-proxy/pom.xml
+++ b/runtime/binding-proxy/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-sse-kafka/pom.xml b/runtime/binding-sse-kafka/pom.xml
index 20857a7602..573a06895c 100644
--- a/runtime/binding-sse-kafka/pom.xml
+++ b/runtime/binding-sse-kafka/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-sse/pom.xml b/runtime/binding-sse/pom.xml
index 2eb68cbbf2..b11a798555 100644
--- a/runtime/binding-sse/pom.xml
+++ b/runtime/binding-sse/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-tcp/pom.xml b/runtime/binding-tcp/pom.xml
index 152fc985ad..f3d7e18e67 100644
--- a/runtime/binding-tcp/pom.xml
+++ b/runtime/binding-tcp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-tls/pom.xml b/runtime/binding-tls/pom.xml
index 4c7080a322..9b9999e1f7 100644
--- a/runtime/binding-tls/pom.xml
+++ b/runtime/binding-tls/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/binding-ws/pom.xml b/runtime/binding-ws/pom.xml
index 9e4fd0f449..7923732e5e 100644
--- a/runtime/binding-ws/pom.xml
+++ b/runtime/binding-ws/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/catalog-apicurio/pom.xml b/runtime/catalog-apicurio/pom.xml
index 7aae6d0e38..92ca89f37e 100644
--- a/runtime/catalog-apicurio/pom.xml
+++ b/runtime/catalog-apicurio/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/catalog-filesystem/pom.xml b/runtime/catalog-filesystem/pom.xml
index 34712c0c9c..bf16f084b1 100644
--- a/runtime/catalog-filesystem/pom.xml
+++ b/runtime/catalog-filesystem/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/catalog-inline/pom.xml b/runtime/catalog-inline/pom.xml
index e417de93ab..8057461088 100644
--- a/runtime/catalog-inline/pom.xml
+++ b/runtime/catalog-inline/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/catalog-karapace/pom.xml b/runtime/catalog-karapace/pom.xml
index 04ef74018e..1b7ce33c86 100644
--- a/runtime/catalog-karapace/pom.xml
+++ b/runtime/catalog-karapace/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/catalog-schema-registry/pom.xml b/runtime/catalog-schema-registry/pom.xml
index 5f9eb1bd68..a9aa8fc2fc 100644
--- a/runtime/catalog-schema-registry/pom.xml
+++ b/runtime/catalog-schema-registry/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/command-metrics/pom.xml b/runtime/command-metrics/pom.xml
index 2f3f8e5d5a..d19a28aeab 100644
--- a/runtime/command-metrics/pom.xml
+++ b/runtime/command-metrics/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/command-start/pom.xml b/runtime/command-start/pom.xml
index cd8aac9a8f..f7e4b07373 100644
--- a/runtime/command-start/pom.xml
+++ b/runtime/command-start/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/command-stop/pom.xml b/runtime/command-stop/pom.xml
index 493b60953f..03fbc63623 100644
--- a/runtime/command-stop/pom.xml
+++ b/runtime/command-stop/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/command-version/pom.xml b/runtime/command-version/pom.xml
index a5b18f22f2..bb123a7271 100644
--- a/runtime/command-version/pom.xml
+++ b/runtime/command-version/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/command/pom.xml b/runtime/command/pom.xml
index 1e0a3d94a5..f882c5f7a9 100644
--- a/runtime/command/pom.xml
+++ b/runtime/command/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/common/pom.xml b/runtime/common/pom.xml
index 5aa9ed5e9e..08bc8138a3 100644
--- a/runtime/common/pom.xml
+++ b/runtime/common/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/engine/pom.xml b/runtime/engine/pom.xml
index cc84069d6c..5bc7d4b1fc 100644
--- a/runtime/engine/pom.xml
+++ b/runtime/engine/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/exporter-otlp/pom.xml b/runtime/exporter-otlp/pom.xml
index 78a14292e8..54422d759f 100644
--- a/runtime/exporter-otlp/pom.xml
+++ b/runtime/exporter-otlp/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/exporter-prometheus/pom.xml b/runtime/exporter-prometheus/pom.xml
index 56551fbb15..5b663abc61 100644
--- a/runtime/exporter-prometheus/pom.xml
+++ b/runtime/exporter-prometheus/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/exporter-stdout/pom.xml b/runtime/exporter-stdout/pom.xml
index 059289676e..d8222d2444 100644
--- a/runtime/exporter-stdout/pom.xml
+++ b/runtime/exporter-stdout/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/filesystem-http/pom.xml b/runtime/filesystem-http/pom.xml
index 2df0385ad5..022f555971 100644
--- a/runtime/filesystem-http/pom.xml
+++ b/runtime/filesystem-http/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/filesystem-http/src/main/java/io/aklivity/zilla/runtime/filesystem/http/internal/HttpPath.java b/runtime/filesystem-http/src/main/java/io/aklivity/zilla/runtime/filesystem/http/internal/HttpPath.java
index bb6d54c838..99bc466f5a 100644
--- a/runtime/filesystem-http/src/main/java/io/aklivity/zilla/runtime/filesystem/http/internal/HttpPath.java
+++ b/runtime/filesystem-http/src/main/java/io/aklivity/zilla/runtime/filesystem/http/internal/HttpPath.java
@@ -308,9 +308,13 @@ void success(
}
break;
case HTTP_NOT_FOUND:
- body = EMPTY_BODY;
- etag = null;
- changeCount++;
+ if (body != EMPTY_BODY ||
+ etag != null)
+ {
+ body = EMPTY_BODY;
+ etag = null;
+ changeCount++;
+ }
break;
case HTTP_NOT_MODIFIED:
break;
diff --git a/runtime/guard-jwt/pom.xml b/runtime/guard-jwt/pom.xml
index 5e421926d6..cc4dc08771 100644
--- a/runtime/guard-jwt/pom.xml
+++ b/runtime/guard-jwt/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/metrics-grpc/pom.xml b/runtime/metrics-grpc/pom.xml
index 6f33ecdcd4..8d13c7a77d 100644
--- a/runtime/metrics-grpc/pom.xml
+++ b/runtime/metrics-grpc/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/metrics-http/pom.xml b/runtime/metrics-http/pom.xml
index 97813990cc..fbfdeac377 100644
--- a/runtime/metrics-http/pom.xml
+++ b/runtime/metrics-http/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/metrics-stream/pom.xml b/runtime/metrics-stream/pom.xml
index ad8f3d8ac3..616b23b386 100644
--- a/runtime/metrics-stream/pom.xml
+++ b/runtime/metrics-stream/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/model-avro/pom.xml b/runtime/model-avro/pom.xml
index 8207d84794..d91dbc6375 100644
--- a/runtime/model-avro/pom.xml
+++ b/runtime/model-avro/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/model-core/pom.xml b/runtime/model-core/pom.xml
index ca585e29b0..ab48cb4cec 100644
--- a/runtime/model-core/pom.xml
+++ b/runtime/model-core/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/model-json/pom.xml b/runtime/model-json/pom.xml
index d1dd7f3b43..193aa18b92 100644
--- a/runtime/model-json/pom.xml
+++ b/runtime/model-json/pom.xml
@@ -6,7 +6,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/model-protobuf/pom.xml b/runtime/model-protobuf/pom.xml
index bca6fa95ed..d027181acb 100644
--- a/runtime/model-protobuf/pom.xml
+++ b/runtime/model-protobuf/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/pom.xml b/runtime/pom.xml
index 2d125b134b..f648ed44e9 100644
--- a/runtime/pom.xml
+++ b/runtime/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/resolver-env/pom.xml b/runtime/resolver-env/pom.xml
index f4dda5ed42..a56fdab514 100644
--- a/runtime/resolver-env/pom.xml
+++ b/runtime/resolver-env/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/runtime/vault-filesystem/pom.xml b/runtime/vault-filesystem/pom.xml
index 3682164374..4f94506026 100644
--- a/runtime/vault-filesystem/pom.xml
+++ b/runtime/vault-filesystem/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
runtime
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-asyncapi.spec/pom.xml b/specs/binding-asyncapi.spec/pom.xml
index 808c9c3e4e..279c25be60 100644
--- a/specs/binding-asyncapi.spec/pom.xml
+++ b/specs/binding-asyncapi.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-echo.spec/pom.xml b/specs/binding-echo.spec/pom.xml
index 3be13318a8..ee764f5f34 100644
--- a/specs/binding-echo.spec/pom.xml
+++ b/specs/binding-echo.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-fan.spec/pom.xml b/specs/binding-fan.spec/pom.xml
index 0791629ea1..b6889572a7 100644
--- a/specs/binding-fan.spec/pom.xml
+++ b/specs/binding-fan.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-filesystem.spec/pom.xml b/specs/binding-filesystem.spec/pom.xml
index c687ab9794..5e7f73026f 100644
--- a/specs/binding-filesystem.spec/pom.xml
+++ b/specs/binding-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-grpc-kafka.spec/pom.xml b/specs/binding-grpc-kafka.spec/pom.xml
index 1aaf6e330d..16c7749599 100644
--- a/specs/binding-grpc-kafka.spec/pom.xml
+++ b/specs/binding-grpc-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-grpc.spec/pom.xml b/specs/binding-grpc.spec/pom.xml
index 521d86e96d..912eb08c15 100644
--- a/specs/binding-grpc.spec/pom.xml
+++ b/specs/binding-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-http-filesystem.spec/pom.xml b/specs/binding-http-filesystem.spec/pom.xml
index 32adc9d218..5e2a490211 100644
--- a/specs/binding-http-filesystem.spec/pom.xml
+++ b/specs/binding-http-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-http-kafka.spec/pom.xml b/specs/binding-http-kafka.spec/pom.xml
index ca85666800..116c8e16ca 100644
--- a/specs/binding-http-kafka.spec/pom.xml
+++ b/specs/binding-http-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-http.spec/pom.xml b/specs/binding-http.spec/pom.xml
index 19b55ddc82..42dc068428 100644
--- a/specs/binding-http.spec/pom.xml
+++ b/specs/binding-http.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-kafka-grpc.spec/pom.xml b/specs/binding-kafka-grpc.spec/pom.xml
index e224ec8776..268491b29f 100644
--- a/specs/binding-kafka-grpc.spec/pom.xml
+++ b/specs/binding-kafka-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-kafka.spec/pom.xml b/specs/binding-kafka.spec/pom.xml
index f0260ece81..b6f12d3497 100644
--- a/specs/binding-kafka.spec/pom.xml
+++ b/specs/binding-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
index 338e4377bb..bbdb120cef 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/schema/kafka.schema.patch.json
@@ -378,7 +378,7 @@
"type": "object",
"patternProperties":
{
- "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$":
+ "^[a-zA-Z:]+[a-zA-Z0-9\\._\\-:]*$":
{
"type": "string",
"pattern": "^\\$\\{message\\.(key|value)\\.([A-Za-z_][A-Za-z0-9_]*)\\}$"
@@ -405,7 +405,7 @@
"type": "object",
"patternProperties":
{
- "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$":
+ "^[a-zA-Z:]+[a-zA-Z0-9\\._\\-:]*$":
{
"type": "string",
"pattern": "^\\$\\{message\\.(key|value)\\.([A-Za-z_][A-Za-z0-9_]*)\\}$"
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/client.rpt
index e017cacd08..ebde58102e 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/client.rpt
@@ -56,5 +56,5 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.build()
.build()}
-read closed
write close
+read closed
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/server.rpt
index d89304e822..526c3b63e5 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/alter.configs/alter.topics.config/server.rpt
@@ -59,6 +59,7 @@ write zilla:begin.ext ${kafka:beginEx()
.build()
.build()
.build()}
+write flush
-write close
read closed
+write close
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/client.rpt
index 81b7ebcbe4..5b4d5a1fcd 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/client.rpt
@@ -46,5 +46,5 @@ read zilla:begin.ext ${kafka:matchBeginEx()
.build()
.build()}
-read closed
write close
+read closed
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/server.rpt
index 4bc986a081..b083b6463b 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/application/delete.topics/delete.topics/server.rpt
@@ -50,6 +50,8 @@ write zilla:begin.ext ${kafka:beginEx()
.build()
.build()}
-write close
+write flush
+
read closed
+write close
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/client.rpt
new file mode 100644
index 0000000000..cf78832ad2
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/client.rpt
@@ -0,0 +1,91 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+read 17 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+write 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+read 20 # size
+ ${newRequestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+write 98 # size
+ 33s # alter configs
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # resource count
+ [0x02] # resource type
+ 6s "events" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ [0x02] # resource type
+ 9s "snapshots" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ [0x00] # validate only
+
+read 41 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 9s "snapshots" # name
+
+
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/server.rpt
new file mode 100644
index 0000000000..9bc3a1ddb2
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.plain/server.rpt
@@ -0,0 +1,87 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+write 17 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+read 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+write 20 # size
+ ${requestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+read 98 # size
+ 33s # alter configs
+ 1s # v1
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # resource count
+ [0x02] # resource type
+ 6s "events" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ [0x02] # resource type
+ 9s "snapshots" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ [0x00] # validate only
+
+write 41 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 9s "snapshots" # name
+
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/client.rpt
new file mode 100644
index 0000000000..50eebe3388
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/client.rpt
@@ -0,0 +1,105 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+read 23 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+write 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+read 92 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096"
+ 0L # session lifetime
+
+write 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+read 52 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ="
+ 0L # session lifetime
+
+write 98 # size
+ 33s # alter configs
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # resource count
+ [0x02] # resource type
+ 6s "events" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ [0x02] # resource type
+ 9s "snapshots" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ [0x00] # validate only
+
+read 41 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 9s "snapshots" # name
+
+
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/server.rpt
new file mode 100644
index 0000000000..9d026fc7f9
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1/alter.topics.config.sasl.scram/server.rpt
@@ -0,0 +1,101 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+write 23 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+read 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+write 92 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" # authentication bytes
+ 0L # session lifetime
+
+read 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+write 52 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes
+ 0L # session lifetime
+
+read 98 # size
+ 33s # alter configs
+ 1s # v1
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # resource count
+ [0x02] # resource type
+ 6s "events" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ [0x02] # resource type
+ 9s "snapshots" # name
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ [0x00] # validate only
+
+write 41 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ [0x02] # resource type
+ 9s "snapshots" # name
+
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/client.rpt
index c0d1c52c8f..51812bc021 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/client.rpt
@@ -28,21 +28,22 @@ connect "zilla://streams/net0"
connected
-write 130 # size
- 19s # create topics
+write 98 # size
+ 33s # alter configs
1s # v1
${newRequestId}
+ 5s "zilla" # client id
2 # resource count
[0x02] # resource type
6s "events" # name
1 # configs
14s "cleanup.policy" # name
6s "delete" # value
- 6s "snapshots" # name
+ [0x02] # resource type
+ 9s "snapshots" # name
1 # configs
14s "cleanup.policy" # name
- 6s "compact" # value
- 0 # timeout
+ 7s "compact" # value
[0x00] # validate only
read 41 # size
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/server.rpt
index dea77e8777..7e1fa535b1 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1/alter.topics.config/server.rpt
@@ -25,21 +25,22 @@ accepted
connected
-read 130 # size
- 19s # create topics
+read 98 # size
+ 33s # alter configs
1s # v1
(int:newRequestId)
+ 5s "zilla" # client id
2 # resource count
[0x02] # resource type
6s "events" # name
1 # configs
14s "cleanup.policy" # name
6s "delete" # value
- 6s "snapshots" # name
+ [0x02] # resource type
+ 9s "snapshots" # name
1 # configs
14s "cleanup.policy" # name
- 6s "compact" # value
- 0 # timeout
+ 7s "compact" # value
[0x00] # validate only
write 41 # size
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/client.rpt
new file mode 100644
index 0000000000..897955b02b
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/client.rpt
@@ -0,0 +1,97 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+read 17 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+write 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+read 20 # size
+ ${newRequestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+write 144 # size
+ 19s # create topics
+ 3s # v3
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ 9s "snapshots" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ 0 # timeout
+
+read 39 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ 9s "snapshots" # name
+ 0s # error code
+ -1s # error message
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/server.rpt
new file mode 100644
index 0000000000..3cd750e67c
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.plain/server.rpt
@@ -0,0 +1,94 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+write 17 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+read 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+write 20 # size
+ ${requestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+read 144 # size
+ 19s # create topics
+ 3s # v3
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ 9s "snapshots" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ 0 # timeout
+
+write 39 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ 9s "snapshots" # name
+ 0s # error code
+ -1s # error message
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/client.rpt
new file mode 100644
index 0000000000..46d37728d3
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/client.rpt
@@ -0,0 +1,111 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+read 23 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+write 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+read 92 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096"
+ 0L # session lifetime
+
+write 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+read 52 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ="
+ 0L # session lifetime
+
+write 144 # size
+ 19s # create topics
+ 3s # v3
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ 9s "snapshots" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ 0 # timeout
+
+read 39 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ 9s "snapshots" # name
+ 0s # error code
+ -1s # error message
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/server.rpt
new file mode 100644
index 0000000000..b655c2a9b1
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1/create.topics.sasl.scram/server.rpt
@@ -0,0 +1,108 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+write 23 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+read 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+write 92 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" # authentication bytes
+ 0L # session lifetime
+
+read 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+write 52 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes
+ 0L # session lifetime
+
+read 144 # size
+ 19s # create topics
+ 3s # v3
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 6s "delete" # value
+ 9s "snapshots" # name
+ 1 # number of partitions
+ 1s # replication factor
+ 1 # assignments
+ 0 # partition index
+ 1 # broker ids
+ 0 # broker id
+ 1 # configs
+ 14s "cleanup.policy" # name
+ 7s "compact" # value
+ 0 # timeout
+
+write 39 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ -1s # error message
+ 9s "snapshots" # name
+ 0s # error code
+ -1s # error message
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/client.rpt
new file mode 100644
index 0000000000..77d2adcc59
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/client.rpt
@@ -0,0 +1,77 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+read 17 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+write 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+read 20 # size
+ ${newRequestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+write 42 # size
+ 20s # delete topics
+ 3s # v3
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 9s "snapshots" # name
+ 0 # timeout
+
+read 35 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ 9s "snapshots" # name
+ 0s # error code
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/server.rpt
new file mode 100644
index 0000000000..a34f717c05
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.plain/server.rpt
@@ -0,0 +1,74 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+write 17 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+read 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+write 20 # size
+ ${requestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+read 42 # size
+ 20s # delete topics
+ 3s # v3
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 9s "snapshots" # name
+ 0 # timeout
+
+write 35 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ 9s "snapshots" # name
+ 0s # error code
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/client.rpt
new file mode 100644
index 0000000000..d232f5ef88
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/client.rpt
@@ -0,0 +1,91 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+read 23 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+write 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+read 92 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096"
+ 0L # session lifetime
+
+write 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+read 52 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ="
+ 0L # session lifetime
+
+write 42 # size
+ 20s # delete topics
+ 3s # v3
+ ${newRequestId}
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 9s "snapshots" # name
+ 0 # timeout
+
+read 35 # size
+ (int:newRequestId)
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ 9s "snapshots" # name
+ 0s # error code
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/server.rpt
new file mode 100644
index 0000000000..bfdeb1b149
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1/delete.topics.sasl.scram/server.rpt
@@ -0,0 +1,88 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+write 23 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+read 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+write 92 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" # authentication bytes
+ 0L # session lifetime
+
+read 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+write 52 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes
+ 0L # session lifetime
+
+read 42 # size
+ 20s # delete topics
+ 3s # v3
+ (int:newRequestId)
+ 5s "zilla" # client id
+ 2 # topic count
+ 6s "events" # name
+ 9s "snapshots" # name
+ 0 # timeout
+
+write 35 # size
+ ${newRequestId}
+ 0 # throttle time ms
+ 2 # topics
+ 6s "events" # name
+ 0s # error code
+ 9s "snapshots" # name
+ 0s # error code
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/client.rpt
index 7fcea14d9f..29fe443624 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/client.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/client.rpt
@@ -28,10 +28,11 @@ connect "zilla://streams/net0"
connected
-write 39 # size
+write 42 # size
20s # delete topics
3s # v3
${newRequestId}
+ 5s "zilla" # client id
2 # topic count
6s "events" # name
9s "snapshots" # name
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/server.rpt
index 81ad2ed361..1ca5aae5e4 100644
--- a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/server.rpt
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3/delete.topics/server.rpt
@@ -25,10 +25,11 @@ accepted
connected
-read 39 # size
+read 42 # size
20s # delete topics
3s # v3
(int:newRequestId)
+ 5s "zilla" # client id
2 # topic count
6s "events" # name
9s "snapshots" # name
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/client.rpt
new file mode 100644
index 0000000000..672f2813e4
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/client.rpt
@@ -0,0 +1,89 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+read 17 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+write 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+read 20 # size
+ ${newRequestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+write 18 # size
+ 60s # describe cluster
+ 0s # v0
+ ${newRequestId}
+ 5s "zilla" # client id
+ [0x00] # tagged fields
+ [0x00] # include cluster authorized ops
+ [0x00] # tagged fields
+
+read 92 # size
+ (int:newRequestId)
+ [0x00] # tagged fields
+ 0 # throttle time ms
+ 0s # error code
+ [0x00] # error message
+ [0x0a] "cluster-0" # cluster id
+ 0 # controller id
+ [0x03] # brokers
+ 1 # broker id
+ [0x14] "broker1.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 2 # broker id
+ [0x14] "broker2.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 0 # cluster authorized operations
+ [0x00] # tagged fields
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/server.rpt
new file mode 100644
index 0000000000..abd3c04d4e
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.plain/server.rpt
@@ -0,0 +1,86 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 22 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 5s "PLAIN" # mechanism
+
+write 17 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 5s "PLAIN" # PLAIN
+
+read 37 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 18
+ [0x00] "username" # authentication bytes
+ [0x00] "password"
+
+write 20 # size
+ ${requestId}
+ 0s # no error
+ -1
+ -1s # authentication bytes
+ 0L # session lifetime
+
+read 18 # size
+ 60s # describe cluster
+ 0s # v0
+ (int:newRequestId)
+ 5s "zilla" # client id
+ [0x00] # tagged fields
+ [0x00] # include cluster authorized ops
+ [0x00] # tagged fields
+
+write 92 # size
+ ${newRequestId}
+ [0x00] # tagged fields
+ 0 # throttle time ms
+ 0s # error code
+ [0x00] # error message
+ [0x0a] "cluster-0" # cluster id
+ 0 # controller id
+ [0x03] # brokers
+ 1 # broker id
+ [0x14] "broker1.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 2 # broker id
+ [0x14] "broker2.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 0 # cluster authorized operations
+ [0x00] # tagged fields
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/client.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/client.rpt
new file mode 100644
index 0000000000..071925c476
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/client.rpt
@@ -0,0 +1,103 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkConnectWindow 8192
+
+property newRequestId ${kafka:newRequestId()}
+property fetchWaitMax 500
+property fetchBytesMax 65535
+property partitionBytesMax 8192
+
+connect "zilla://streams/net0"
+ option zilla:window ${networkConnectWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+connected
+
+write 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+read 23 # size
+ ${newRequestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+write 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+read 92 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096"
+ 0L # session lifetime
+
+write 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ ${newRequestId}
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+read 52 # size
+ ${newRequestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ="
+ 0L # session lifetime
+
+write 18 # size
+ 60s # describe cluster
+ 0s # v0
+ ${newRequestId}
+ 5s "zilla" # client id
+ [0x00] # tagged fields
+ [0x00] # include cluster authorized ops
+ [0x00] # tagged fields
+
+read 92 # size
+ (int:newRequestId)
+ [0x00] # tagged fields
+ 0 # throttle time ms
+ 0s # error code
+ [0x00] # error message
+ [0x0a] "cluster-0" # cluster id
+ 0 # controller id
+ [0x03] # brokers
+ 1 # broker id
+ [0x14] "broker1.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 2 # broker id
+ [0x14] "broker2.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 0 # cluster authorized operations
+ [0x00] # tagged fields
diff --git a/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/server.rpt b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/server.rpt
new file mode 100644
index 0000000000..4ca492e889
--- /dev/null
+++ b/specs/binding-kafka.spec/src/main/scripts/io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1/cluster.brokers.info.sasl.scram/server.rpt
@@ -0,0 +1,100 @@
+#
+# Copyright 2021-2023 Aklivity Inc.
+#
+# Aklivity licenses this file to you under the Apache License,
+# version 2.0 (the "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+property networkAcceptWindow 8192
+
+accept "zilla://streams/net0"
+ option zilla:window ${networkAcceptWindow}
+ option zilla:transmission "duplex"
+ option zilla:byteorder "network"
+
+accepted
+
+connected
+
+read 28 # size
+ 17s # sasl.handshake
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 11s "SCRAM-SHA-1" # mechanism
+
+write 23 # size
+ ${requestId}
+ 0s # no error
+ 1 # mechanisms
+ 11s "SCRAM-SHA-1" # SCRAM
+
+read 55 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 36 # authentication bytes
+ "n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL"
+
+write 92 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 70 "r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096" # authentication bytes
+ 0L # session lifetime
+
+read 101 # size
+ 36s # sasl.authenticate
+ 1s # v1
+ (int:requestId)
+ 5s "zilla" # client id
+ 82 # authentication bytes
+ "c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts="
+
+write 52 # size
+ ${requestId}
+ 0s # no error
+ -1s
+ 30 "v=rmF9pqV8S7suAoZWja4dJRkFsKQ=" # authentication bytes
+ 0L # session lifetime
+
+read 18 # size
+ 60s # describe cluster
+ 0s # v0
+ (int:newRequestId)
+ 5s "zilla" # client id
+ [0x00] # tagged fields
+ [0x00] # include cluster authorized ops
+ [0x00] # tagged fields
+
+write 92 # size
+ ${newRequestId}
+ [0x00] # tagged fields
+ 0 # throttle time ms
+ 0s # error code
+ [0x00] # error message
+ [0x0a] "cluster-0" # cluster id
+ 0 # controller id
+ [0x03] # brokers
+ 1 # broker id
+ [0x14] "broker1.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 2 # broker id
+ [0x14] "broker2.example.com" # host
+ 9092 # port
+ [0x00] # rack
+ [0x00] # tagged fields
+ 0 # cluster authorized operations
+ [0x00] # tagged fields
diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/AlterConfigsSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/AlterConfigsSaslIT.java
new file mode 100644
index 0000000000..977230d2b0
--- /dev/null
+++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/AlterConfigsSaslIT.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.specs.binding.kafka.streams.network;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+
+public class AlterConfigsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("net",
+ "io/aklivity/zilla/specs/binding/kafka/streams/network/alter.configs.v1.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ @Rule
+ public final TestRule chain = outerRule(k3po).around(timeout);
+
+ @Test
+ @Specification({
+ "${net}/alter.topics.config.sasl.plain/client",
+ "${net}/alter.topics.config.sasl.plain/server"})
+ public void shouldAlterTopicsConfigWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${net}/alter.topics.config.sasl.scram/client",
+ "${net}/alter.topics.config.sasl.scram/server"})
+ public void shouldAlterTopicsConfigWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/CreateTopicsSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/CreateTopicsSaslIT.java
new file mode 100644
index 0000000000..681a63ddd4
--- /dev/null
+++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/CreateTopicsSaslIT.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.specs.binding.kafka.streams.network;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+
+public class CreateTopicsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("net",
+ "io/aklivity/zilla/specs/binding/kafka/streams/network/create.topics.v3.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ @Rule
+ public final TestRule chain = outerRule(k3po).around(timeout);
+
+ @Test
+ @Specification({
+ "${net}/create.topics.sasl.plain/client",
+ "${net}/create.topics.sasl.plain/server"})
+ public void shouldCreateTopicsWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${net}/create.topics.sasl.scram/client",
+ "${net}/create.topics.sasl.scram/server"})
+ public void shouldCreateTopicsWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DeleteTopicsSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DeleteTopicsSaslIT.java
new file mode 100644
index 0000000000..5dd4e8d9ce
--- /dev/null
+++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DeleteTopicsSaslIT.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.specs.binding.kafka.streams.network;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+
+public class DeleteTopicsSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("net",
+ "io/aklivity/zilla/specs/binding/kafka/streams/network/delete.topics.v3.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ @Rule
+ public final TestRule chain = outerRule(k3po).around(timeout);
+
+ @Test
+ @Specification({
+ "${net}/delete.topics.sasl.plain/client",
+ "${net}/delete.topics.sasl.plain/server"})
+ public void shouldDeleteTopicsWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${net}/delete.topics.sasl.scram/client",
+ "${net}/delete.topics.sasl.scram/server"})
+ public void shouldDeleteTopicsWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DescribeClusterSaslIT.java b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DescribeClusterSaslIT.java
new file mode 100644
index 0000000000..e333c72c24
--- /dev/null
+++ b/specs/binding-kafka.spec/src/test/java/io/aklivity/zilla/specs/binding/kafka/streams/network/DescribeClusterSaslIT.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021-2023 Aklivity Inc.
+ *
+ * Aklivity licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.aklivity.zilla.specs.binding.kafka.streams.network;
+
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.junit.rules.RuleChain.outerRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.DisableOnDebug;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+
+import io.aklivity.k3po.runtime.junit.annotation.Specification;
+import io.aklivity.k3po.runtime.junit.rules.K3poRule;
+
+public class DescribeClusterSaslIT
+{
+ private final K3poRule k3po = new K3poRule()
+ .addScriptRoot("net",
+ "io/aklivity/zilla/specs/binding/kafka/streams/network/describe.cluster.v0.sasl.handshake.v1");
+
+ private final TestRule timeout = new DisableOnDebug(new Timeout(5, SECONDS));
+
+ @Rule
+ public final TestRule chain = outerRule(k3po).around(timeout);
+
+ @Test
+ @Specification({
+ "${net}/cluster.brokers.info.sasl.plain/client",
+ "${net}/cluster.brokers.info.sasl.plain/server"})
+ public void shouldDescribeClusterBrokerInfoWithSaslPlain() throws Exception
+ {
+ k3po.finish();
+ }
+
+ @Test
+ @Specification({
+ "${net}/cluster.brokers.info.sasl.scram/client",
+ "${net}/cluster.brokers.info.sasl.scram/server"})
+ public void shouldDescribeClusterBrokerInfoWithSaslScram() throws Exception
+ {
+ k3po.finish();
+ }
+}
diff --git a/specs/binding-mqtt-kafka.spec/pom.xml b/specs/binding-mqtt-kafka.spec/pom.xml
index 926407bc3f..9a761dbe32 100644
--- a/specs/binding-mqtt-kafka.spec/pom.xml
+++ b/specs/binding-mqtt-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-mqtt.spec/pom.xml b/specs/binding-mqtt.spec/pom.xml
index c20eac342d..93754aa66e 100644
--- a/specs/binding-mqtt.spec/pom.xml
+++ b/specs/binding-mqtt.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-openapi-asyncapi.spec/pom.xml b/specs/binding-openapi-asyncapi.spec/pom.xml
index 693fe25160..ff2f59f36e 100644
--- a/specs/binding-openapi-asyncapi.spec/pom.xml
+++ b/specs/binding-openapi-asyncapi.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-openapi.spec/pom.xml b/specs/binding-openapi.spec/pom.xml
index 75742a8b8d..69c54a0721 100644
--- a/specs/binding-openapi.spec/pom.xml
+++ b/specs/binding-openapi.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-proxy.spec/pom.xml b/specs/binding-proxy.spec/pom.xml
index 105e34c325..9f1f424bb7 100644
--- a/specs/binding-proxy.spec/pom.xml
+++ b/specs/binding-proxy.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-sse-kafka.spec/pom.xml b/specs/binding-sse-kafka.spec/pom.xml
index efec187ade..d239d8bc7e 100644
--- a/specs/binding-sse-kafka.spec/pom.xml
+++ b/specs/binding-sse-kafka.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-sse.spec/pom.xml b/specs/binding-sse.spec/pom.xml
index e57e98c193..08f93a2695 100644
--- a/specs/binding-sse.spec/pom.xml
+++ b/specs/binding-sse.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-tcp.spec/pom.xml b/specs/binding-tcp.spec/pom.xml
index b2623ecdaf..5391af661c 100644
--- a/specs/binding-tcp.spec/pom.xml
+++ b/specs/binding-tcp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-tls.spec/pom.xml b/specs/binding-tls.spec/pom.xml
index 76142853f0..9e6e183ffe 100644
--- a/specs/binding-tls.spec/pom.xml
+++ b/specs/binding-tls.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/binding-ws.spec/pom.xml b/specs/binding-ws.spec/pom.xml
index b49ca66b9f..dfc4d053a0 100644
--- a/specs/binding-ws.spec/pom.xml
+++ b/specs/binding-ws.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/catalog-apicurio.spec/pom.xml b/specs/catalog-apicurio.spec/pom.xml
index c4ebd7c265..c7758fcfa0 100644
--- a/specs/catalog-apicurio.spec/pom.xml
+++ b/specs/catalog-apicurio.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/catalog-filesystem.spec/pom.xml b/specs/catalog-filesystem.spec/pom.xml
index dc57384b1a..6e348598b2 100644
--- a/specs/catalog-filesystem.spec/pom.xml
+++ b/specs/catalog-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/catalog-inline.spec/pom.xml b/specs/catalog-inline.spec/pom.xml
index 0b63a46de2..09fe911b7e 100644
--- a/specs/catalog-inline.spec/pom.xml
+++ b/specs/catalog-inline.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/catalog-karapace.spec/pom.xml b/specs/catalog-karapace.spec/pom.xml
index 3489a503ed..73901d083f 100644
--- a/specs/catalog-karapace.spec/pom.xml
+++ b/specs/catalog-karapace.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/catalog-schema-registry.spec/pom.xml b/specs/catalog-schema-registry.spec/pom.xml
index 8cfea113d9..c4069b3eb2 100644
--- a/specs/catalog-schema-registry.spec/pom.xml
+++ b/specs/catalog-schema-registry.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/engine.spec/pom.xml b/specs/engine.spec/pom.xml
index 9a2a606d87..f57168b289 100644
--- a/specs/engine.spec/pom.xml
+++ b/specs/engine.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/exporter-otlp.spec/pom.xml b/specs/exporter-otlp.spec/pom.xml
index b58fdaaedc..4aae1ca78b 100644
--- a/specs/exporter-otlp.spec/pom.xml
+++ b/specs/exporter-otlp.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/exporter-prometheus.spec/pom.xml b/specs/exporter-prometheus.spec/pom.xml
index 9ed6e4cd5c..2cf1d7212a 100644
--- a/specs/exporter-prometheus.spec/pom.xml
+++ b/specs/exporter-prometheus.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/exporter-stdout.spec/pom.xml b/specs/exporter-stdout.spec/pom.xml
index bc250c91f5..1086bd5ecc 100644
--- a/specs/exporter-stdout.spec/pom.xml
+++ b/specs/exporter-stdout.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/filesystem-http.spec/pom.xml b/specs/filesystem-http.spec/pom.xml
index b823726e39..ce328f4b11 100644
--- a/specs/filesystem-http.spec/pom.xml
+++ b/specs/filesystem-http.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/guard-jwt.spec/pom.xml b/specs/guard-jwt.spec/pom.xml
index 3f1f634628..7964825128 100644
--- a/specs/guard-jwt.spec/pom.xml
+++ b/specs/guard-jwt.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/metrics-grpc.spec/pom.xml b/specs/metrics-grpc.spec/pom.xml
index 832a441a99..a5919639d3 100644
--- a/specs/metrics-grpc.spec/pom.xml
+++ b/specs/metrics-grpc.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/metrics-http.spec/pom.xml b/specs/metrics-http.spec/pom.xml
index 12a00a1330..7eae34dcc8 100644
--- a/specs/metrics-http.spec/pom.xml
+++ b/specs/metrics-http.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/metrics-stream.spec/pom.xml b/specs/metrics-stream.spec/pom.xml
index 6eb1a1c70c..25aa3f6a28 100644
--- a/specs/metrics-stream.spec/pom.xml
+++ b/specs/metrics-stream.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/model-avro.spec/pom.xml b/specs/model-avro.spec/pom.xml
index f6936de111..c0a1f14818 100644
--- a/specs/model-avro.spec/pom.xml
+++ b/specs/model-avro.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/model-core.spec/pom.xml b/specs/model-core.spec/pom.xml
index 268b6a1d13..2098be2739 100644
--- a/specs/model-core.spec/pom.xml
+++ b/specs/model-core.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/model-json.spec/pom.xml b/specs/model-json.spec/pom.xml
index 403e308156..181821985f 100644
--- a/specs/model-json.spec/pom.xml
+++ b/specs/model-json.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/model-protobuf.spec/pom.xml b/specs/model-protobuf.spec/pom.xml
index 7cae2c3dd4..f550f7f5ea 100644
--- a/specs/model-protobuf.spec/pom.xml
+++ b/specs/model-protobuf.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/pom.xml b/specs/pom.xml
index 24445d9819..04b95ef3b1 100644
--- a/specs/pom.xml
+++ b/specs/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
zilla
- 0.9.95
+ 0.9.96
../pom.xml
diff --git a/specs/vault-filesystem.spec/pom.xml b/specs/vault-filesystem.spec/pom.xml
index c3603f09c0..a2d2fe8dee 100644
--- a/specs/vault-filesystem.spec/pom.xml
+++ b/specs/vault-filesystem.spec/pom.xml
@@ -8,7 +8,7 @@
io.aklivity.zilla
specs
- 0.9.95
+ 0.9.96
../pom.xml