diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..29071a8 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: "gradle" + directory: "/" + schedule: + interval: "daily" + allow: + - dependency-name: "org.apache.kafka:*" + - dependency-name: "com.solacesystems:*" diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 58c2905..a074f81 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -4,24 +4,73 @@ name: build on: pull_request: - + push: jobs: + dupe_check: + name: Check for Duplicate Workflow Run + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v3.4.0 + with: + concurrent_skipping: same_content + do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' + build: + needs: + - dupe_check + if: needs.dupe_check.outputs.should_skip != 'true' || (github.event_name == 'push' && github.ref == 'refs/heads/master' && github.repository_owner == 'SolaceProducts') runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - - name: Setup JDK 1.8 - uses: actions/setup-java@v1 with: - java-version: 1.8 + submodules: recursive + + - name: Cache Gradle + uses: actions/cache@v2 + with: + path: | + ~/.gradle/caches + ~/.gradle/wrapper + key: ${{ runner.os }}-gradle-build-test-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} + restore-keys: | + ${{ runner.os }}-gradle-build-test- + + - name: Setup JDK 8 + uses: actions/setup-java@v2 + with: + distribution: zulu + java-version: 8 + + - name: Validate Gradle wrapper + uses: gradle/wrapper-validation-action@v1 + + - name: Install Test Support + working-directory: solace-integration-test-support + run: ./mvnw clean install -DskipTests + - name: Build and test with Gradle - run: ./gradlew clean integrationTest --tests com.solace.connector.kafka.connect.sink.it.SinkConnectorIT + run: ./gradlew clean test integrationTest jacocoFullReport --info + + - name: Upload Test Artifacts + if: always() + uses: actions/upload-artifact@v2 + with: + name: Test Results + path: | + **/build/jacoco/*.exec + **/build/reports/ + **/build/test-results/**/*.xml + - name: Publish artifacts - if: github.event_name == 'push' + # Security Measure: Do not publish artifacts from dependabot builds + if: github.event_name == 'push' && (github.actor != 'dependabot[bot]' || !contains(github.ref, 'dependabot')) run: | if [ ${{ github.ref }} == 'refs/heads/master' ] && [ ${{ github.repository_owner }} == 'SolaceProducts' ] ; then echo "Using master on SolaceProducts" @@ -57,4 +106,30 @@ jobs: git remote add origin-pages https://${{ secrets.GH_TOKEN }}@github.com/${{ github.repository }}.git > /dev/null 2>&1; git push --quiet --set-upstream origin-pages gh-pages; echo "Updated and pushed GH pages!"; - fi + fi + + - name: Cleanup Gradle Cache + # Remove some files from the Gradle cache, so they aren't cached by GitHub Actions. + # Restoring these files from a GitHub Actions cache might cause problems for future builds. + run: | + rm -f ~/.gradle/caches/modules-2/modules-2.lock + rm -f ~/.gradle/caches/modules-2/gc.properties + + - name: Publish Unit Test Results + if: github.actor != 'dependabot[bot]' || (github.event_name == 'push' && !contains(github.ref, 'dependabot')) + uses: EnricoMi/publish-unit-test-result-action@v1 + continue-on-error: true + with: + check_name: Unit Test Results + comment_mode: create new + fail_on: nothing + hide_comments: orphaned commits + files: | + **/build/test-results/**/*.xml + + - name: Publish Test Coverage Results + if: github.event_name == 'pull_request' && github.actor != 'dependabot[bot]' && github.event.pull_request.head.repo.full_name == github.repository + uses: madrapps/jacoco-report@v1.2 + with: + paths: build/reports/jacoco/jacocoFullReport/jacocoFullReport.xml + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..dc2420a --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,96 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "Code Analysis (CodeQL)" + +on: + push: + pull_request: + schedule: + - cron: '38 15 * * 0' + workflow_dispatch: + +jobs: + dupe_check: + name: Check for Duplicate Workflow Run + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v3.4.0 + with: + concurrent_skipping: same_content + do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' + + analyze: + name: Analyze + needs: + - dupe_check + if: needs.dupe_check.outputs.should_skip != 'true' + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Cache Gradle + uses: actions/cache@v2 + with: + path: | + ~/.gradle/caches + ~/.gradle/wrapper + key: ${{ runner.os }}-gradle-codeql-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} + restore-keys: | + ${{ runner.os }}-gradle-codeql- + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: java + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + - name: Validate Gradle wrapper + uses: gradle/wrapper-validation-action@v1 + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 + + - name: Cleanup Gradle Cache + # Remove some files from the Gradle cache, so they aren't cached by GitHub Actions. + # Restoring these files from a GitHub Actions cache might cause problems for future builds. + run: | + rm -f ~/.gradle/caches/modules-2/modules-2.lock + rm -f ~/.gradle/caches/modules-2/gc.properties diff --git a/.github/workflows/pmd-analysis.yml b/.github/workflows/pmd-analysis.yml new file mode 100644 index 0000000..84ecd6e --- /dev/null +++ b/.github/workflows/pmd-analysis.yml @@ -0,0 +1,71 @@ +name: Code Analysis (PMD) + +on: + pull_request: + push: + workflow_dispatch: + +jobs: + dupe_check: + name: Check for Duplicate Workflow Run + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v3.4.0 + with: + concurrent_skipping: same_content + do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' + + run-analysis: + name: Run PMD Static Code Analysis + needs: + - dupe_check + if: needs.dupe_check.outputs.should_skip != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Cache Gradle + uses: actions/cache@v2 + with: + path: | + ~/.gradle/caches + ~/.gradle/wrapper + key: ${{ runner.os }}-gradle-pmd-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} + restore-keys: | + ${{ runner.os }}-gradle-pmd- + + - name: Setup JDK 8 + uses: actions/setup-java@v2 + with: + distribution: zulu + java-version: 8 + + - name: Validate Gradle wrapper + uses: gradle/wrapper-validation-action@v1 + + - name: Run static code analysis + run: ./gradlew clean pmdMainSarif --info + + - name: Upload Test Artifacts + if: always() + uses: actions/upload-artifact@v2 + with: + name: Static Code Analysis Results (PMD) + path: | + **/build/reports/ + + - name: Upload SARIF file + if: success() || failure() + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: build/reports/pmd/main.sarif + + - name: Cleanup Gradle Cache + # Remove some files from the Gradle cache, so they aren't cached by GitHub Actions. + # Restoring these files from a GitHub Actions cache might cause problems for future builds. + run: | + rm -f ~/.gradle/caches/modules-2/modules-2.lock + rm -f ~/.gradle/caches/modules-2/gc.properties \ No newline at end of file diff --git a/.github/workflows/spotbugs-analysis.yml b/.github/workflows/spotbugs-analysis.yml new file mode 100644 index 0000000..f1cd934 --- /dev/null +++ b/.github/workflows/spotbugs-analysis.yml @@ -0,0 +1,71 @@ +name: Code Analysis (SpotBugs) + +on: + pull_request: + push: + workflow_dispatch: + +jobs: + dupe_check: + name: Check for Duplicate Workflow Run + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v3.4.0 + with: + concurrent_skipping: same_content + do_not_skip: '["pull_request", "workflow_dispatch", "schedule"]' + + run-analysis: + name: Run SpotBugs Static Code Analysis + needs: + - dupe_check + if: needs.dupe_check.outputs.should_skip != 'true' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Cache Gradle + uses: actions/cache@v2 + with: + path: | + ~/.gradle/caches + ~/.gradle/wrapper + key: ${{ runner.os }}-gradle-spotbugs-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }} + restore-keys: | + ${{ runner.os }}-gradle-spotbugs- + + - name: Setup JDK 8 + uses: actions/setup-java@v2 + with: + distribution: zulu + java-version: 8 + + - name: Validate Gradle wrapper + uses: gradle/wrapper-validation-action@v1 + + - name: Run static code analysis + run: ./gradlew clean spotbugsMain --info + + - name: Upload Test Artifacts + if: always() + uses: actions/upload-artifact@v2 + with: + name: Static Code Analysis Results (SpotBugs) + path: | + **/build/reports/ + + - name: Upload SARIF file + if: success() || failure() + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: build/reports/spotbugs/main.sarif + + - name: Cleanup Gradle Cache + # Remove some files from the Gradle cache, so they aren't cached by GitHub Actions. + # Restoring these files from a GitHub Actions cache might cause problems for future builds. + run: | + rm -f ~/.gradle/caches/modules-2/modules-2.lock + rm -f ~/.gradle/caches/modules-2/gc.properties \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2f4dd92..d2e5889 100644 --- a/.gitignore +++ b/.gitignore @@ -37,5 +37,12 @@ local.properties *.launch /build/ + +### IntelliJ +.idea +*.iws +*.iml +*.ipr + # Unzipped test connector src/integrationTest/resources/pubsubplus-connector-kafka*/ \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..846409f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "solace-integration-test-support"] + path = solace-integration-test-support + url = ../../SolaceDev/solace-integration-test-support.git diff --git a/README.md b/README.md index 4cf75cd..3732bce 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ [![Actions Status](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/workflows/build/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions?query=workflow%3Abuild+branch%3Amaster) +[![Code Analysis (CodeQL)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/codeql-analysis.yml/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/codeql-analysis.yml) +[![Code Analysis (PMD)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/pmd-analysis.yml/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/pmd-analysis.yml) +[![Code Analysis (SpotBugs)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/spotbugs-analysis.yml/badge.svg?branch=master)](https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink/actions/workflows/spotbugs-analysis.yml) # Solace PubSub+ Connector for Kafka: Sink @@ -154,7 +157,7 @@ In this case the IP address is one of the nodes running the distributed mode wor { "class": "com.solace.connector.kafka.connect.sink.SolaceSinkConnector", "type": "sink", - "version": "2.0.0" + "version": "2.1.0" }, ``` @@ -267,10 +270,14 @@ Note that generally one connector can send to only one queue. ##### Recovery from Kafka Connect API or Kafka Broker Failure +Operators are expected to monitor their connector for failures since errors will cause it to stop. If any are found and the connector was stopped, the operator must explicitly restart it again once the error condition has been resolved. + The Kafka Connect API automatically keeps track of the offset that the Sink Connector has read and processed. If the connector stops or is restarted, the Connect API starts passing records to the connector based on the last saved offset. The time interval to save the last offset can be tuned via the `offset.flush.interval.ms` parameter (default 60,000 ms) in the worker's `connect-distributed.properties` configuration file. +Multiple retries can also be configured using the `errors.retry.timeout` parameter (default 0 ms) in the PubSub+ Sink Connector `solace_sink.properties` configuration file. Please refer to the [Kafka documentation](https://kafka.apache.org/documentation/#connect_errorreporting) for more info on retry configuration options. + Recovery may result in duplicate PubSub+ events published to the Event Mesh. As described [above](#record-processors), the Solace message header "User Property Map" contains all the Kafka unique record information which enables identifying and filtering duplicates. #### Multiple Workers @@ -318,27 +325,38 @@ Kerberos has some very specific requirements to operate correctly. Some addition ## Developers Guide -### Build and Test the Project +### Build the Project JDK 8 or higher is required for this project. First, clone this GitHub repo: -``` +```shell git clone https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink.git cd pubsubplus-connector-kafka-sink ``` Then run the build script: -``` -gradlew clean build +```shell +./gradlew clean build ``` This script creates artifacts in the `build` directory, including the deployable packaged PubSub+ Sink Connector archives under `build\distributions`. +### Test the Project + An integration test suite is also included, which spins up a Docker-based deployment environment that includes a PubSub+ event broker, Zookeeper, Kafka broker, Kafka Connect. It deploys the connector to Kafka Connect and runs end-to-end tests. -``` -gradlew clean integrationTest --tests com.solace.connector.kafka.connect.sink.it.SinkConnectorIT -``` + +1. Install the test support module: + ```shell + git submodule update --init --recursive + cd solace-integration-test-support + ./mvnw clean install -DskipTests + cd .. + ``` +2. Run the tests: + ```shell + ./gradlew clean test integrationTest + ``` ### Build a New Record Processor diff --git a/build.gradle b/build.gradle index f36753e..db66b72 100644 --- a/build.gradle +++ b/build.gradle @@ -1,10 +1,17 @@ -apply plugin: 'java' -apply plugin: 'distribution' -apply plugin: 'org.unbroken-dome.test-sets' +import com.github.spotbugs.snom.SpotBugsTask + +plugins { + id 'java' + id 'distribution' + id 'jacoco' + id 'pmd' + id 'com.github.spotbugs' version '4.7.6' + id 'org.unbroken-dome.test-sets' version '2.2.1' +} ext { - kafkaVersion = '2.4.1' - solaceJavaAPIVersion = '10.6.0' + kafkaVersion = '2.8.1' + solaceJavaAPIVersion = '10.12.0' } repositories { @@ -12,16 +19,12 @@ repositories { mavenCentral() } -buildscript { - repositories { - maven { - url "https://plugins.gradle.org/m2/" +sourceSets { + main { + java { + srcDir "${buildDir}/generated/java" // add generated sources as additional source directory } } - dependencies { - classpath "com.github.spotbugs:spotbugs-gradle-plugin:3.0.0" - classpath "org.unbroken-dome.test-sets:org.unbroken-dome.test-sets.gradle.plugin:2.2.1" - } } testSets { @@ -29,37 +32,63 @@ testSets { } dependencies { - integrationTestImplementation 'junit:junit:4.12' - integrationTestImplementation 'org.junit.jupiter:junit-jupiter-api:5.7.1' - integrationTestImplementation 'org.junit.jupiter:junit-jupiter-engine:5.7.1' - integrationTestImplementation 'org.junit.jupiter:junit-jupiter-params:5.7.1' - integrationTestImplementation 'org.junit.platform:junit-platform-engine:1.7.1' - integrationTestImplementation 'org.mockito:mockito-core:3.7.7' - integrationTestImplementation 'org.mockito:mockito-junit-jupiter:3.7.7' - integrationTestImplementation 'org.testcontainers:testcontainers:1.15.1' - integrationTestImplementation 'org.testcontainers:junit-jupiter:1.15.1' - integrationTestImplementation 'org.slf4j:slf4j-api:1.7.28' - integrationTestImplementation 'org.slf4j:slf4j-simple:1.7.28' + integrationTestImplementation 'org.junit.jupiter:junit-jupiter:5.8.1' + integrationTestImplementation 'org.junit-pioneer:junit-pioneer:1.4.2' + integrationTestImplementation 'org.mockito:mockito-junit-jupiter:3.12.4' + integrationTestImplementation 'org.testcontainers:testcontainers:1.16.0' + integrationTestImplementation 'org.testcontainers:junit-jupiter:1.16.0' + integrationTestImplementation 'org.testcontainers:kafka:1.16.0' + integrationTestImplementation 'com.solace.test.integration:pubsubplus-junit-jupiter:0.5.0' + integrationTestImplementation 'org.slf4j:slf4j-api:1.7.32' + integrationTestImplementation 'org.apache.logging.log4j:log4j-slf4j-impl:2.14.1' integrationTestImplementation 'org.apache.commons:commons-configuration2:2.6' integrationTestImplementation 'commons-beanutils:commons-beanutils:1.9.4' integrationTestImplementation 'com.google.code.gson:gson:2.3.1' integrationTestImplementation 'commons-io:commons-io:2.4' integrationTestImplementation 'com.squareup.okhttp3:okhttp:4.9.1' - integrationTestImplementation 'org.apache.kafka:kafka-clients:$kafkaVersion' + integrationTestImplementation "org.apache.kafka:kafka-clients:$kafkaVersion" + testImplementation 'org.junit.jupiter:junit-jupiter:5.8.1' + testImplementation 'org.mockito:mockito-junit-jupiter:3.12.4' + testImplementation 'org.hamcrest:hamcrest-all:1.3' + testImplementation 'org.apache.logging.log4j:log4j-slf4j-impl:2.14.1' compile "org.apache.kafka:connect-api:$kafkaVersion" compile "com.solacesystems:sol-jcsmp:$solaceJavaAPIVersion" } +pmd { + consoleOutput = true + rulesMinimumPriority = 2 + toolVersion = '6.38.0' +} + +spotbugs { + effort 'max' + reportLevel 'high' // Decrease to medium once medium errors are fixed +} + +task('jacocoFullReport', type: JacocoReport) { + description 'Generates code coverage report for all tests.' + executionData tasks.withType(Test) + sourceSets sourceSets.main + reports { + xml.required = true + } +} + task('prepDistForIntegrationTesting') { dependsOn assembleDist doLast { - copy { - from zipTree(file('build/distributions').listFiles().findAll {it.name.endsWith('.zip')}[0]) - into (file('src/integrationTest/resources')) + copy { + from zipTree(file(distsDirectory).listFiles().findAll { + it.name.endsWith("-${project.version}.zip") + }[0]) + into sourceSets.integrationTest.resources.srcDirs[0] } copy { - from zipTree(file('build/distributions').listFiles().findAll {it.name.endsWith('.zip')}[0]) - into (file('build/resources/integrationTest')) + from zipTree(file(distsDirectory).listFiles().findAll { + it.name.endsWith("-${project.version}.zip") + }[0]) + into sourceSets.integrationTest.output.resourcesDir } } } @@ -77,6 +106,81 @@ project.integrationTest { } } +project.test { + useJUnitPlatform() +} + +tasks.withType(SpotBugsTask) { + reports { + sarif { + enabled = true + } + } +} + +// Workaround to generate Sarif report +// Based off https://github.com/gradle/gradle/blob/v6.9.1/subprojects/code-quality/src/main/groovy/org/gradle/api/plugins/quality/internal/PmdInvoker.groovy +task('pmdMainSarif') { + PmdExtension extension = project.extensions.getByType(PmdExtension) + dependsOn classes + outputs.dir extension.getReportsDir() + doLast { + ant.taskdef(name: 'pmd', + classname: 'net.sourceforge.pmd.ant.PMDTask', + classpath: project.configurations.pmd.asPath) + ant.pmd(failOnRuleViolation: false, + failuresPropertyName: "pmdFailureCount", + minimumPriority: extension.rulesMinimumPriority.get()) { + sourceSets.main.allJava.srcDirs.each { + fileset(dir: it) + } + + extension.ruleSets.each { + ruleset(it) + } + + extension.ruleSetFiles.each { + ruleset(it) + } + + if (extension.ruleSetConfig != null) { + ruleset(extension.ruleSetConfig.asFile()) + } + + Provider reportsDir = project.getLayout() + .file(project.getProviders().provider({a -> extension.getReportsDir()}) as Provider) + formatter(type: 'sarif', toFile: new File(reportsDir.get().getAsFile(), 'main.sarif')) + formatter(type: 'html', toFile: new File(reportsDir.get().getAsFile(), 'main.html')) + + if (extension.consoleOutput) { + formatter(type: 'textcolor', toConsole: true) + } + } + + def failureCount = ant.project.properties["pmdFailureCount"] + if (failureCount) { + def message = "$failureCount PMD rule violations were found." + if (extension.ignoreFailures || ((failureCount as Integer) <= extension.maxFailures.get())) { + logger.warn(message) + } else { + throw new GradleException(message) + } + } + } +} + +task('generateJava', type: Copy) { + def templateContext = [version: project.version] + inputs.properties templateContext // Register context as input so that task doesn't skip when props are updated + from 'src/template/java' + into "${buildDir}/generated/java" + expand templateContext +} + +project.compileJava { + dependsOn generateJava +} + distributions { main { contents { diff --git a/etc/solace_sink.properties b/etc/solace_sink.properties index d57d775..ae26863 100644 --- a/etc/solace_sink.properties +++ b/etc/solace_sink.properties @@ -10,7 +10,7 @@ tasks.max=1 value.converter=org.apache.kafka.connect.converters.ByteArrayConverter key.converter=org.apache.kafka.connect.storage.StringConverter -# If tasks.max>1 related tasks will share the same group.id. +# If tasks.max>1 related tasks will share the same group.id. group.id=solSinkConnectorGroup # Kafka topics to read from @@ -34,6 +34,9 @@ sol.topics=sinktest # Refer to https://github.com/SolaceProducts/pubsubplus-connector-kafka-sink sol.record_processor_class=com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleRecordProcessor +# If enabled, records that throw record processor errors will be discarded. +#sol.record_processor.error.ignore=false + # When using SolSimpleKeyedRecordProcessor, defines how to convert a Kafka record key # to part of which part of a PubSub+ message # Allowable values include: NONE, DESTINATION, CORRELATION_ID, CORRELATION_ID_AS_BYTES @@ -42,6 +45,10 @@ sol.record_processor_class=com.solace.connector.kafka.connect.sink.recordprocess # Set to true only if using SolDynamicDestinationRecordProcessor and dynamic destinations #sol.dynamic_destination=false +# If true, Kafka headers are automatically copied to Solace messages as user properties. +# If the Kafka record contains multiple values for the same header key, the value of the one last-added will be retained. +#sol.emit.kafka.record.headers.enabled=false + # Whether to use transacted session and transactions to publish messages to PubSub+ queue #sol.use_transactions_for_queue=true @@ -50,7 +57,7 @@ sol.record_processor_class=com.solace.connector.kafka.connect.sink.recordprocess #sol.autoflush.size=200 # Starting offset to publish records to PubSub+. If not specified then will only publish new messages. -# If specified it applies to all partitions: set to the desired position or 0 to publish all records from the beginning +# If specified it applies to all partitions: set to the desired position or 0 to publish all records from the beginning #sol.kafka_replay_offset= # Connector TLS session to PubSub+ message broker properties diff --git a/gradle.properties b/gradle.properties index fb7cb53..16cc23c 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1 +1 @@ -version=2.0.2 \ No newline at end of file +version=2.1.0 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index cc4fdc2..e708b1c 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 1b16c34..3ab0b72 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,5 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-6.1.1-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-6.9.1-bin.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index 2fe81a7..4f906e0 100755 --- a/gradlew +++ b/gradlew @@ -82,6 +82,7 @@ esac CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + # Determine the Java command to use to start the JVM. if [ -n "$JAVA_HOME" ] ; then if [ -x "$JAVA_HOME/jre/sh/java" ] ; then @@ -129,6 +130,7 @@ fi if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` # We build the pattern for arguments to be converted via cygpath diff --git a/gradlew.bat b/gradlew.bat index 24467a1..ac1b06f 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -29,6 +29,9 @@ if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @@ -37,7 +40,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init +if "%ERRORLEVEL%" == "0" goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -51,7 +54,7 @@ goto fail set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto init +if exist "%JAVA_EXE%" goto execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -61,28 +64,14 @@ echo location of your Java installation. goto fail -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - :execute @rem Setup the command line set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + @rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* :end @rem End local scope for the variables with windows NT shell diff --git a/solace-integration-test-support b/solace-integration-test-support new file mode 160000 index 0000000..c411ac2 --- /dev/null +++ b/solace-integration-test-support @@ -0,0 +1 @@ +Subproject commit c411ac2e0f82af25ece2994691352cb0d6235142 diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupApache.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupApache.java deleted file mode 100644 index b305e7e..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupApache.java +++ /dev/null @@ -1,63 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import static org.junit.jupiter.api.Assertions.assertNotNull; - -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.testcontainers.containers.BindMode; -import org.testcontainers.containers.FixedHostPortGenericContainer; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.junit.jupiter.Container; - -public class DockerizedPlatformSetupApache implements MessagingServiceFullLocalSetupApache { - - @Container - public final static GenericContainer KAFKA_CONNECT_REST = new FixedHostPortGenericContainer<>("bitnami/kafka:2") - .withEnv("KAFKA_CFG_ZOOKEEPER_CONNECT", dockerIpAddress + ":2181") - .withEnv("ALLOW_PLAINTEXT_LISTENER", "yes") - .withCommand("/bin/sh", "-c", //"sleep 10000") - "sed -i 's/bootstrap.servers=.*/bootstrap.servers=" + dockerIpAddress - + ":39092/g' /opt/bitnami/kafka/config/connect-distributed.properties; " - + "echo 'plugin.path=/opt/bitnami/kafka/jars' >> /opt/bitnami/kafka/config/connect-distributed.properties; " - + "echo 'rest.port=28083' >> /opt/bitnami/kafka/config/connect-distributed.properties; " - + "/opt/bitnami/kafka/bin/connect-distributed.sh /opt/bitnami/kafka/config/connect-distributed.properties") - .withFixedExposedPort(28083,28083) - .withExposedPorts(28083) -//// -// // Enable remote debug session at default port 5005 -// .withEnv("KAFKA_DEBUG", "y") -// .withEnv("DEBUG_SUSPEND_FLAG", "y") -//// - .withClasspathResourceMapping(Tools.getUnzippedConnectorDirName() + "/lib", - "/opt/bitnami/kafka/jars/pubsubplus-connector-kafka", BindMode.READ_ONLY) -// .withStartupTimeout(Duration.ofSeconds(120)) - .waitingFor( Wait.forLogMessage(".*Finished starting connectors and tasks.*", 1) ) - ; - - @BeforeAll - static void setUp() { - assert(KAFKA_CONNECT_REST != null); // Required to instantiate - } - - @DisplayName("Local MessagingService connection tests") - @Nested - class MessagingServiceConnectionTests { - @DisplayName("Setup the dockerized platform") - @Test - @Disabled - void setupDockerizedPlatformTest() { - String host = COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080); - assertNotNull(host); - try { - Thread.sleep(36000000l); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - } - } -} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupConfluent.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupConfluent.java deleted file mode 100644 index d7ead46..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/DockerizedPlatformSetupConfluent.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import static org.junit.jupiter.api.Assertions.assertNotNull; -import org.junit.jupiter.api.DisplayName; -import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; -import org.testcontainers.containers.BindMode; -import org.testcontainers.containers.FixedHostPortGenericContainer; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.wait.strategy.Wait; -import org.testcontainers.junit.jupiter.Container; - -public class DockerizedPlatformSetupConfluent implements MessagingServiceFullLocalSetupConfluent { - - @DisplayName("Local MessagingService connection tests") - @Nested - class MessagingServiceConnectionTests { - - @Container - public final GenericContainer connector = new FixedHostPortGenericContainer<>("confluentinc/cp-kafka-connect-base:5.4.0") - .withEnv("CONNECT_BOOTSTRAP_SERVERS", - COMPOSE_CONTAINER_KAFKA.getServiceHost("kafka_1", 39092) + ":39092") - .withFixedExposedPort(28083,28083) - .withFixedExposedPort(5005,5005) - .withExposedPorts(28083,5005) - .withEnv("CONNECT_REST_PORT", "28083") -// -// // Enable remote debug session at default port 5005 -// .withEnv("KAFKA_DEBUG", "y") -// .withEnv("DEBUG_SUSPEND_FLAG", "y") -// - .withEnv("CONNECT_GROUP_ID", "quickstart-avro") - .withEnv("CONNECT_CONFIG_STORAGE_TOPIC", "quickstart-avro-config") - .withEnv("CONNECT_OFFSET_STORAGE_TOPIC", "quickstart-avro-offsets") - .withEnv("CONNECT_STATUS_STORAGE_TOPIC", "quickstart-avro-status") - .withEnv("CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR", "1") - .withEnv("CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR", "1") - .withEnv("CONNECT_STATUS_STORAGE_REPLICATION_FACTOR", "1") - .withEnv("CONNECT_KEY_CONVERTER", "io.confluent.connect.avro.AvroConverter") - .withEnv("CONNECT_VALUE_CONVERTER", "io.confluent.connect.avro.AvroConverter") - .withEnv("CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL", - "http://" + COMPOSE_CONTAINER_KAFKA.getServiceHost("schema-registry_1", 8081) - + ":8081") - .withEnv("CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL", - "http://" + COMPOSE_CONTAINER_KAFKA.getServiceHost("schema-registry_1", 8081) - + ":8081") - .withEnv("CONNECT_INTERNAL_KEY_CONVERTER", "org.apache.kafka.connect.json.JsonConverter") - .withEnv("CONNECT_INTERNAL_VALUE_CONVERTER", "org.apache.kafka.connect.json.JsonConverter") -// - .withEnv("CONNECT_REST_ADVERTISED_HOST_NAME", "localhost") - .withEnv("CONNECT_LOG4J_ROOT_LOGLEVEL", "INFO") - .withEnv("CONNECT_PLUGIN_PATH", "/usr/share/java,/etc/kafka-connect/jars") - .withClasspathResourceMapping("pubsubplus-connector-kafka-sink/lib", - "/etc/kafka-connect/jars/pubsubplus-connector-kafka", BindMode.READ_ONLY) -// .waitingFor( Wait.forHealthcheck() ); - .waitingFor( Wait.forLogMessage(".*Kafka Connect started.*", 1) ); - - @DisplayName("Setup the dockerized platform") - @Test - void setupDockerizedPlatformTest() { - String host = COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080); - assertNotNull(host); - try { - Thread.sleep(36000000l); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - } -} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupApache.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupApache.java deleted file mode 100644 index 88c11c7..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupApache.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import static org.junit.jupiter.api.Assertions.assertNotNull; - -import java.io.File; -import org.junit.jupiter.api.BeforeAll; -import org.testcontainers.containers.DockerComposeContainer; -import org.testcontainers.junit.jupiter.Container; -import org.testcontainers.junit.jupiter.Testcontainers; -import org.testcontainers.containers.wait.strategy.Wait; - -@Testcontainers -public interface MessagingServiceFullLocalSetupApache extends TestConstants { - - @Container - public static final DockerComposeContainer COMPOSE_CONTAINER_PUBSUBPLUS = - new DockerComposeContainer( - new File(FULL_DOCKER_COMPOSE_FILE_PATH + "docker-compose-solace.yml")) - .withEnv("PUBSUB_NETWORK_NAME", PUBSUB_NETWORK_NAME) - .withEnv("PUBSUB_HOSTNAME", PUBSUB_HOSTNAME) - .withEnv("PUBSUB_TAG", PUBSUB_TAG) - .withServices(SERVICES) - .withLocalCompose(true) - .withPull(false) - .waitingFor("solbroker_1", - Wait.forLogMessage(".*System startup complete.*", 1) ); - - public static final String dockerReportedAddress = COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080); - public static final String dockerIpAddress = (dockerReportedAddress == "localhost" || dockerReportedAddress == "127.0.0.1" ? - Tools.getIpAddress() : dockerReportedAddress); - - @Container - public static final DockerComposeContainer COMPOSE_CONTAINER_KAFKA = - new DockerComposeContainer( - new File(FULL_DOCKER_COMPOSE_FILE_PATH + "docker-compose-kafka-apache.yml")) - .withEnv("KAFKA_TOPIC", KAFKA_SINK_TOPIC) - .withEnv("KAFKA_HOST", dockerIpAddress) - .withLocalCompose(true); - - @BeforeAll - static void checkContainer() { - String host = COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080); - assertNotNull(host); - } -} - diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupConfluent.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupConfluent.java deleted file mode 100644 index 130a75a..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/MessagingServiceFullLocalSetupConfluent.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import static org.junit.jupiter.api.Assertions.assertNotNull; - -import com.google.gson.Gson; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonParser; - -import java.io.File; -import java.io.IOException; -import java.time.Duration; - -import org.apache.commons.configuration2.Configuration; -import org.apache.commons.configuration2.FileBasedConfiguration; -import org.apache.commons.configuration2.PropertiesConfiguration; -import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder; -import org.apache.commons.configuration2.builder.fluent.Parameters; -import org.apache.commons.configuration2.ex.ConfigurationException; -import org.apache.commons.io.FileUtils; -import org.junit.jupiter.api.BeforeAll; -import org.testcontainers.containers.DockerComposeContainer; -import org.testcontainers.junit.jupiter.Container; -import org.testcontainers.junit.jupiter.Testcontainers; -import org.testcontainers.containers.wait.strategy.Wait; - -@Testcontainers -public interface MessagingServiceFullLocalSetupConfluent extends TestConstants { - - @Container - public static final DockerComposeContainer COMPOSE_CONTAINER_PUBSUBPLUS = - new DockerComposeContainer( - new File(FULL_DOCKER_COMPOSE_FILE_PATH + "docker-compose-solace.yml")) - .withEnv("PUBSUB_NETWORK_NAME", PUBSUB_NETWORK_NAME) - .withEnv("PUBSUB_HOSTNAME", PUBSUB_HOSTNAME) - .withEnv("PUBSUB_TAG", PUBSUB_TAG) - .withServices(SERVICES) - .withLocalCompose(true) - .withPull(false) - .waitingFor("solbroker_1", - Wait.forLogMessage(".*System startup complete.*", 1) ); - - @Container - public static final DockerComposeContainer COMPOSE_CONTAINER_KAFKA = - new DockerComposeContainer( - new File(FULL_DOCKER_COMPOSE_FILE_PATH + "docker-compose-kafka-confluent.yml")) - .withEnv("KAFKA_TOPIC", KAFKA_SINK_TOPIC) - .withEnv("KAFKA_HOST", COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080)) - .withLocalCompose(true) - .waitingFor("schema-registry_1", - Wait.forHttp("/subjects").forStatusCode(200)); - - @BeforeAll - static void checkContainer() { - String host = COMPOSE_CONTAINER_PUBSUBPLUS.getServiceHost("solbroker_1", 8080); - assertNotNull(host); - } -} - diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SinkConnectorIT.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SinkConnectorIT.java index 2b869ad..b439d30 100644 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SinkConnectorIT.java +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SinkConnectorIT.java @@ -1,231 +1,282 @@ package com.solace.connector.kafka.connect.sink.it; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonObject; +import com.solace.connector.kafka.connect.sink.SolaceSinkConstants; +import com.solace.connector.kafka.connect.sink.it.util.extensions.KafkaArgumentsProvider; +import com.solace.connector.kafka.connect.sink.it.util.extensions.KafkaArgumentsProvider.KafkaArgumentSource; +import com.solace.connector.kafka.connect.sink.it.util.extensions.KafkaArgumentsProvider.KafkaContext; +import com.solace.connector.kafka.connect.sink.it.util.extensions.NetworkPubSubPlusExtension; +import com.solace.test.integration.junit.jupiter.extension.ExecutorServiceExtension; +import com.solace.test.integration.junit.jupiter.extension.ExecutorServiceExtension.ExecSvc; +import com.solace.test.integration.semp.v2.SempV2Api; +import com.solace.test.integration.semp.v2.config.model.ConfigMsgVpnQueue; +import com.solacesystems.jcsmp.BytesXMLMessage; +import com.solacesystems.jcsmp.EndpointProperties; +import com.solacesystems.jcsmp.JCSMPException; +import com.solacesystems.jcsmp.JCSMPFactory; +import com.solacesystems.jcsmp.JCSMPProperties; +import com.solacesystems.jcsmp.JCSMPSession; +import com.solacesystems.jcsmp.Queue; +import com.solacesystems.jcsmp.SDTException; +import com.solacesystems.jcsmp.SDTMap; import org.apache.kafka.clients.producer.RecordMetadata; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Nested; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.TestInstance.Lifecycle; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junitpioneer.jupiter.CartesianProductTest; +import org.junitpioneer.jupiter.CartesianValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.testcontainers.containers.output.WaitingConsumer; import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; -import com.solacesystems.jcsmp.BytesXMLMessage; -import com.solacesystems.jcsmp.JCSMPException; -import com.solacesystems.jcsmp.SDTException; -import com.solacesystems.jcsmp.SDTMap; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; - +import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively; +import static org.junit.jupiter.api.Assertions.fail; -public class SinkConnectorIT extends DockerizedPlatformSetupApache implements TestConstants { +@ExtendWith(ExecutorServiceExtension.class) +@ExtendWith(KafkaArgumentsProvider.AutoDeleteSolaceConnectorDeploymentAfterEach.class) +public class SinkConnectorIT implements TestConstants { - static Logger logger = LoggerFactory.getLogger(SinkConnectorIT.class.getName()); - // Connectordeployment creates a Kafka topic "kafkaTestTopic", which is used next - static SolaceConnectorDeployment connectorDeployment = new SolaceConnectorDeployment(); - static TestKafkaProducer kafkaProducer = new TestKafkaProducer(connectorDeployment.kafkaTestTopic); - static TestSolaceConsumer solaceConsumer = new TestSolaceConsumer(); + static Logger logger = LoggerFactory.getLogger(SinkConnectorIT.class); + static TestSolaceQueueConsumer solaceQueueConsumer; + static TestSolaceTopicConsumer solaceTopicConsumer; // Used to request additional verification types - static enum AdditionalCheck { ATTACHMENTBYTEBUFFER, CORRELATIONID } - + enum AdditionalCheck { ATTACHMENTBYTEBUFFER, CORRELATIONID } + + private Properties connectorProps; + + @RegisterExtension + public static final NetworkPubSubPlusExtension PUB_SUB_PLUS_EXTENSION = new NetworkPubSubPlusExtension(); + //////////////////////////////////////////////////// // Main setup/teardown @BeforeAll - static void setUp() { - try { - connectorDeployment.waitForConnectorRestIFUp(); - connectorDeployment.provisionKafkaTestTopic(); - // Start consumer - // Ensure test queue exists on PubSub+ - solaceConsumer.initialize("tcp://" + MessagingServiceFullLocalSetupConfluent.COMPOSE_CONTAINER_PUBSUBPLUS - .getServiceHost("solbroker_1", 55555) + ":55555", "default", "default", "default"); - solaceConsumer.provisionQueue(SOL_QUEUE); - solaceConsumer.start(); - kafkaProducer.start(); - Thread.sleep(1000l); - } catch (JCSMPException | InterruptedException e1) { - e1.printStackTrace(); - } + static void setUp(JCSMPSession jcsmpSession) throws JCSMPException { + // Start consumer + // Ensure test queue exists on PubSub+ + solaceTopicConsumer = new TestSolaceTopicConsumer(jcsmpSession); + solaceTopicConsumer.start(); + solaceQueueConsumer = new TestSolaceQueueConsumer(jcsmpSession); + solaceQueueConsumer.provisionQueue(SOL_QUEUE); + solaceQueueConsumer.start(); + } + + @BeforeEach + public void beforeEach(JCSMPProperties jcsmpProperties) { + connectorProps = new Properties(); + connectorProps.setProperty(SolaceSinkConstants.SOL_HOST, String.format("tcp://%s:55555", PUB_SUB_PLUS_EXTENSION.getNetworkAlias())); + connectorProps.setProperty(SolaceSinkConstants.SOL_USERNAME, jcsmpProperties.getStringProperty(JCSMPProperties.USERNAME)); + connectorProps.setProperty(SolaceSinkConstants.SOL_PASSWORD, jcsmpProperties.getStringProperty(JCSMPProperties.PASSWORD)); + connectorProps.setProperty(SolaceSinkConstants.SOL_VPN_NAME, jcsmpProperties.getStringProperty(JCSMPProperties.VPN_NAME)); } @AfterAll static void cleanUp() { - kafkaProducer.close(); - solaceConsumer.stop(); + solaceTopicConsumer.close(); + solaceQueueConsumer.close(); } - - + + //////////////////////////////////////////////////// // Test types - - void messageToKafkaTest(String expectedSolaceQueue, String[] expectedSolaceTopics, String kafkaKey, String kafkaValue, + + void messageToKafkaTest(TestKafkaProducer producer, String expectedSolaceQueue, String[] expectedSolaceTopics, String kafkaKey, String kafkaValue, Map additionalChecks) { try { - // Clean catch queues first - // TODO: fix possible concurrency issue with cleaning/wring the queue later - TestSolaceConsumer.solaceReceivedQueueMessages.clear(); - TestSolaceConsumer.solaceReceivedTopicMessages.clear(); - - // Received messages - List receivedMessages = new ArrayList<>(); - - // Send Kafka message - RecordMetadata metadata = kafkaProducer.sendMessageToKafka(kafkaKey, kafkaValue); - assertNotNull(metadata); - - // Wait for PubSub+ to report messages - populate queue and topics if provided - if (expectedSolaceQueue != null) { - BytesXMLMessage queueMessage = TestSolaceConsumer.solaceReceivedQueueMessages.poll(5,TimeUnit.SECONDS); - assertNotNull(queueMessage); - receivedMessages.add(queueMessage); - } else { - assert(TestSolaceConsumer.solaceReceivedQueueMessages.size() == 0); - } - for(String s : expectedSolaceTopics) { - BytesXMLMessage newTopicMessage = TestSolaceConsumer.solaceReceivedTopicMessages.poll(5,TimeUnit.SECONDS); - assertNotNull(newTopicMessage); - receivedMessages.add(newTopicMessage); - } - - // Evaluate messages - // ensure each solacetopic got a respective message - for(String topicname : expectedSolaceTopics) { - boolean topicFound = false; - for (BytesXMLMessage message : receivedMessages) { - if (message.getDestination().getName().equals(topicname)) { - topicFound = true; - break; - } + clearReceivedMessages(); + + RecordMetadata metadata = sendMessagetoKafka(producer, kafkaKey, kafkaValue); + assertMessageReceived(expectedSolaceQueue, expectedSolaceTopics, metadata, additionalChecks); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (SDTException e) { + e.printStackTrace(); + } + } + + void clearReceivedMessages() { + // Clean catch queues first + // TODO: fix possible concurrency issue with cleaning/wring the queue later + TestSolaceQueueConsumer.solaceReceivedQueueMessages.clear(); + TestSolaceTopicConsumer.solaceReceivedTopicMessages.clear(); + } + + RecordMetadata sendMessagetoKafka(TestKafkaProducer producer, String kafkaKey, String kafkaValue) { + // Send Kafka message + RecordMetadata metadata = producer.sendMessageToKafka(kafkaKey, kafkaValue); + assertNotNull(metadata); + return metadata; + } + + void assertMessageReceived(String expectedSolaceQueue, String[] expectedSolaceTopics, RecordMetadata metadata, + Map additionalChecks) throws SDTException, InterruptedException { + List receivedMessages = new ArrayList<>(); + + // Wait for PubSub+ to report messages - populate queue and topics if provided + if (expectedSolaceQueue != null) { + BytesXMLMessage queueMessage = TestSolaceQueueConsumer.solaceReceivedQueueMessages.poll(5,TimeUnit.MINUTES); + assertNotNull(queueMessage); + receivedMessages.add(queueMessage); + } else { + assert(TestSolaceQueueConsumer.solaceReceivedQueueMessages.size() == 0); + } + for(String s : expectedSolaceTopics) { + BytesXMLMessage newTopicMessage = TestSolaceTopicConsumer.solaceReceivedTopicMessages.poll(5,TimeUnit.SECONDS); + assertNotNull(newTopicMessage); + receivedMessages.add(newTopicMessage); + } + + // Evaluate messages + // ensure each solacetopic got a respective message + for(String topicname : expectedSolaceTopics) { + boolean topicFound = false; + for (BytesXMLMessage message : receivedMessages) { + if (message.getDestination().getName().equals(topicname)) { + topicFound = true; + break; } - if (!topicFound) fail("Nothing was delivered to topic " + topicname); } - // check message contents - for (BytesXMLMessage message : receivedMessages) { - SDTMap userHeader = message.getProperties(); - assert(userHeader.getString("k_topic").contentEquals(metadata.topic())); - assert(userHeader.getString("k_partition").contentEquals(Long.toString(metadata.partition()))); - assert(userHeader.getString("k_offset").contentEquals(Long.toString(metadata.offset()))); - assert(message.getApplicationMessageType().contains(metadata.topic())); - // additional checks as requested - if (additionalChecks != null) { - for (Map.Entry check : additionalChecks.entrySet()) { - if (check.getKey() == AdditionalCheck.ATTACHMENTBYTEBUFFER) { - // Verify contents of the message AttachmentByteBuffer - assert(Arrays.equals((byte[])message.getAttachmentByteBuffer().array(),check.getValue().getBytes())); - } - if (check.getKey() == AdditionalCheck.CORRELATIONID) { - // Verify contents of the message correlationId - assert(message.getCorrelationId().contentEquals(check.getValue())); - } + if (!topicFound) fail("Nothing was delivered to topic " + topicname); + } + // check message contents + for (BytesXMLMessage message : receivedMessages) { + SDTMap userHeader = message.getProperties(); + assertEquals(metadata.topic(), userHeader.getString("k_topic")); + assertEquals(Long.toString(metadata.partition()), userHeader.getString("k_partition")); + assertEquals(Long.toString(metadata.offset()), userHeader.getString("k_offset")); + assertThat(message.getApplicationMessageType(), containsString(metadata.topic())); + // additional checks as requested + if (additionalChecks != null) { + for (Map.Entry check : additionalChecks.entrySet()) { + if (check.getKey() == AdditionalCheck.ATTACHMENTBYTEBUFFER) { + // Verify contents of the message AttachmentByteBuffer + assertArrayEquals(check.getValue().getBytes(), message.getAttachmentByteBuffer().array()); + } + if (check.getKey() == AdditionalCheck.CORRELATIONID) { + // Verify contents of the message correlationId + assertEquals(check.getValue(), message.getCorrelationId()); } } } - - } catch (InterruptedException e) { - e.printStackTrace(); - } catch (SDTException e) { - e.printStackTrace(); } } - + //////////////////////////////////////////////////// // Scenarios - + @DisplayName("Sink SimpleMessageProcessor tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkConnectorSimpleMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "false"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "false"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-QueueAndTopics-SolSampleSimpleMessageProcessor") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest(SOL_QUEUE, topics, + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), SOL_QUEUE, topics, // kafka key and value "Key", "Hello TextMessageToTopicTest world!", // additional checks ImmutableMap.of(AdditionalCheck.ATTACHMENTBYTEBUFFER, "Hello TextMessageToTopicTest world!")); } } - - + + @DisplayName("Sink KeyedMessageProcessor-NONE tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkConnectorNoneKeyedMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "false"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.kafka_message_key", "NONE"); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "false"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.kafka_message_key", "NONE"); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-QueueAndTopics-KeyedMessageProcessor-NONE") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest(SOL_QUEUE, topics, + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), SOL_QUEUE, topics, // kafka key and value "Key", "Hello TextMessageToTopicTest world!", // additional checks ImmutableMap.of(AdditionalCheck.ATTACHMENTBYTEBUFFER, "Hello TextMessageToTopicTest world!")); } } - + @DisplayName("Sink KeyedMessageProcessor-DESTINATION tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkConnectorDestinationKeyedMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "false"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.kafka_message_key", "DESTINATION"); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "false"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.kafka_message_key", "DESTINATION"); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-QueueAndTopics-KeyedMessageProcessor-DESTINATION") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest(SOL_QUEUE, topics, + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), SOL_QUEUE, topics, // kafka key and value "Destination", "Hello TextMessageToTopicTest world!", // additional checks with expected values @@ -233,31 +284,31 @@ void kafkaConsumerTextMessageToTopicTest() { AdditionalCheck.CORRELATIONID, "Destination")); } } - + @DisplayName("Sink KeyedMessageProcessor-CORRELATION_ID tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkConnectorCorrelationIdKeyedMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "false"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.kafka_message_key", "CORRELATION_ID"); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "false"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.kafka_message_key", "CORRELATION_ID"); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-QueueAndTopics-KeyedMessageProcessor-CORRELATION_ID") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest(SOL_QUEUE, topics, + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), SOL_QUEUE, topics, // kafka key and value "TestCorrelationId", "Hello TextMessageToTopicTest world!", // additional checks with expected values @@ -265,31 +316,31 @@ void kafkaConsumerTextMessageToTopicTest() { AdditionalCheck.CORRELATIONID, "TestCorrelationId")); } } - + @DisplayName("Sink KeyedMessageProcessor-CORRELATION_ID_AS_BYTES tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkConnectorCorrelationIdAsBytesKeyedMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "false"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.kafka_message_key", "CORRELATION_ID_AS_BYTES"); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleKeyedRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "false"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.kafka_message_key", "CORRELATION_ID_AS_BYTES"); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-QueueAndTopics-KeyedMessageProcessor-CORRELATION_ID_AS_BYTES") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest(SOL_QUEUE, topics, + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), SOL_QUEUE, topics, // kafka key and value "TestCorrelationId", "Hello TextMessageToTopicTest world!", // additional checks with expected values @@ -297,30 +348,30 @@ void kafkaConsumerTextMessageToTopicTest() { AdditionalCheck.CORRELATIONID, "TestCorrelationId")); } } - + @DisplayName("Sink DynamicDestinationMessageProcessor tests") @Nested @TestInstance(Lifecycle.PER_CLASS) class SinkDynamicDestinationMessageProcessorMessageProcessorTests { - - String topics[] = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; - - @BeforeAll + + String[] topics = {SOL_ROOT_TOPIC+"/TestTopic1/SubTopic", SOL_ROOT_TOPIC+"/TestTopic2/SubTopic"}; + + @BeforeEach void setUp() { - Properties prop = new Properties(); - prop.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolDynamicDestinationRecordProcessor"); - prop.setProperty("sol.dynamic_destination", "true"); - prop.setProperty("sol.topics", String.join(", ", topics)); - prop.setProperty("sol.queue", SOL_QUEUE); - connectorDeployment.startConnector(prop); + connectorProps.setProperty("sol.record_processor_class", "com.solace.connector.kafka.connect.sink.recordprocessor.SolDynamicDestinationRecordProcessor"); + connectorProps.setProperty("sol.dynamic_destination", "true"); + connectorProps.setProperty("sol.topics", String.join(", ", topics)); + connectorProps.setProperty("sol.queue", SOL_QUEUE); } @DisplayName("TextMessage-DynamicDestinationMessageProcessor-start") - @Test - void kafkaConsumerTextMessageToTopicTest() { - messageToKafkaTest( + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), // expected list of delivery queue and topics null, new String[] {"ctrl/bus/1234/start"}, // kafka key and value @@ -330,9 +381,11 @@ void kafkaConsumerTextMessageToTopicTest() { } @DisplayName("TextMessage-DynamicDestinationMessageProcessor-stop") - @Test - void kafkaConsumerTextMessageToTopicTest2() { - messageToKafkaTest( + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest2(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), // expected list of delivery queue and topics null, new String[] {"ctrl/bus/1234/stop"}, // kafka key and value @@ -342,9 +395,11 @@ void kafkaConsumerTextMessageToTopicTest2() { } @DisplayName("TextMessage-DynamicDestinationMessageProcessor-other") - @Test - void kafkaConsumerTextMessageToTopicTest3() { - messageToKafkaTest( + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void kafkaConsumerTextMessageToTopicTest3(KafkaContext kafkaContext) { + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + messageToKafkaTest(kafkaContext.getProducer(), // expected list of delivery queue and topics null, new String[] {"comms/bus/1234"}, // kafka key and value @@ -354,4 +409,80 @@ void kafkaConsumerTextMessageToTopicTest3() { } } + @DisplayName("Solace connector lifecycle tests") + @Nested + @TestInstance(Lifecycle.PER_CLASS) + class SolaceConnectorLifecycleTests { + private final Gson GSON = new GsonBuilder().setPrettyPrinting().create(); + + @ParameterizedTest + @ArgumentsSource(KafkaArgumentsProvider.class) + void testFailPubSubConnection(KafkaContext kafkaContext) { + connectorProps.setProperty("sol.vpn_name", randomAlphanumeric(10)); + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps, true); + AtomicReference connectorStatus = new AtomicReference<>(new JsonObject()); + assertTimeoutPreemptively(Duration.ofMinutes(1), () -> { + JsonObject taskStatus; + do { + JsonObject status = kafkaContext.getSolaceConnectorDeployment().getConnectorStatus(); + connectorStatus.set(status); + taskStatus = status.getAsJsonArray("tasks").get(0).getAsJsonObject(); + } while (!taskStatus.get("state").getAsString().equals("FAILED")); + assertThat(taskStatus.get("trace").getAsString(), containsString("Message VPN Not Allowed")); + }, () -> "Timed out waiting for connector to fail: " + GSON.toJson(connectorStatus.get())); + } + + @CartesianProductTest(name = "[{index}] autoFlush={0}, kafka={1}") + @CartesianValueSource(booleans = { true, false }) + @KafkaArgumentSource + void testCommitRollback(boolean autoFlush, KafkaContext kafkaContext, + JCSMPSession jcsmpSession, SempV2Api sempV2Api, + @ExecSvc(poolSize = 2, scheduled = true) ScheduledExecutorService executorService) + throws Exception { + Queue queue = JCSMPFactory.onlyInstance().createQueue(randomAlphanumeric(100)); + + try (TestSolaceQueueConsumer solaceConsumer1 = new TestSolaceQueueConsumer(jcsmpSession)) { + EndpointProperties endpointProperties = new EndpointProperties(); + endpointProperties.setMaxMsgSize(1); + solaceConsumer1.provisionQueue(queue.getName(), endpointProperties); + solaceConsumer1.start(); + + connectorProps.setProperty(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + connectorProps.setProperty(SolaceSinkConstants.SOL_QUEUE_MESSAGES_AUTOFLUSH_SIZE, Integer.toString(autoFlush ? 1 : 100)); + connectorProps.setProperty(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, Boolean.toString(true)); + connectorProps.setProperty("errors.retry.timeout", Long.toString(-1)); + kafkaContext.getSolaceConnectorDeployment().startConnector(connectorProps); + + clearReceivedMessages(); + String recordValue = randomAlphanumeric(100); + Future recordMetadata = executorService.schedule(() -> + sendMessagetoKafka(kafkaContext.getProducer(), randomAlphanumeric(100), recordValue), + 5, TimeUnit.SECONDS); + + WaitingConsumer logConsumer = new WaitingConsumer(); + kafkaContext.getConnection().getConnectContainer().followOutput(logConsumer); + logConsumer.waitUntil(frame -> frame.getUtf8String() + .contains("Document Is Too Large"), 30, TimeUnit.SECONDS); + if (autoFlush) { + logConsumer.waitUntil(frame -> frame.getUtf8String() + .contains("RetriableException from SinkTask"), 30, TimeUnit.SECONDS); + } else { + logConsumer.waitUntil(frame -> frame.getUtf8String() + .contains("Offset commit failed, rewinding to last committed offsets"), 1, TimeUnit.MINUTES); + } + Thread.sleep(5000); + + Assertions.assertEquals("RUNNING", + kafkaContext.getSolaceConnectorDeployment().getConnectorStatus() + .getAsJsonArray("tasks").get(0).getAsJsonObject().get("state").getAsString()); + + sempV2Api.config().updateMsgVpnQueue(SOL_VPN, queue.getName(), new ConfigMsgVpnQueue().maxMsgSize(10000000), null); + assertMessageReceived(queue.getName(), new String[0], recordMetadata.get(30, TimeUnit.SECONDS), + ImmutableMap.of(AdditionalCheck.ATTACHMENTBYTEBUFFER, recordValue)); + } finally { + jcsmpSession.deprovision(queue, JCSMPSession.FLAG_IGNORE_DOES_NOT_EXIST); + } + } + } + } diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceConnectorDeployment.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceConnectorDeployment.java index d4df7be..156b6b2 100644 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceConnectorDeployment.java +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceConnectorDeployment.java @@ -1,78 +1,106 @@ package com.solace.connector.kafka.connect.sink.it; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Properties; -import org.apache.commons.configuration2.Configuration; -import org.apache.commons.configuration2.FileBasedConfiguration; -import org.apache.commons.configuration2.PropertiesConfiguration; -import org.apache.commons.configuration2.builder.FileBasedConfigurationBuilder; -import org.apache.commons.configuration2.builder.fluent.Parameters; -import org.apache.commons.configuration2.ex.ConfigurationException; -import org.apache.commons.io.FileUtils; -import org.apache.kafka.clients.admin.AdminClient; -import org.apache.kafka.clients.admin.NewTopic; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; - +import com.solace.connector.kafka.connect.sink.SolaceSinkConnector; +import com.solace.connector.kafka.connect.sink.VersionUtil; +import com.solace.connector.kafka.connect.sink.it.util.KafkaConnection; import okhttp3.MediaType; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.RequestBody; import okhttp3.Response; +import okhttp3.ResponseBody; +import org.apache.commons.io.FileUtils; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.DeleteTopicsResult; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively; +import static org.junit.jupiter.api.Assertions.assertTrue; public class SolaceConnectorDeployment implements TestConstants { static Logger logger = LoggerFactory.getLogger(SolaceConnectorDeployment.class.getName()); - static String kafkaTestTopic = KAFKA_SINK_TOPIC + "-" + Instant.now().getEpochSecond(); + public static String kafkaTestTopic = KAFKA_SINK_TOPIC + "-" + Instant.now().getEpochSecond(); OkHttpClient client = new OkHttpClient(); - String connectorAddress = new TestConfigProperties().getProperty("kafka.connect_rest_url"); + private final AdminClient adminClient; + private final KafkaConnection kafkaConnection; + + public SolaceConnectorDeployment(KafkaConnection kafkaConnection, AdminClient adminClient) { + this.kafkaConnection = kafkaConnection; + this.adminClient = adminClient; + } public void waitForConnectorRestIFUp() { - Request request = new Request.Builder().url("http://" + connectorAddress + "/connector-plugins").build(); - Response response = null; + Request request = new Request.Builder().url(kafkaConnection.getConnectUrl() + "/connector-plugins").build(); + boolean success = false; do { try { - Thread.sleep(1000l); - response = client.newCall(request).execute(); + Thread.sleep(1000L); + try (Response response = client.newCall(request).execute()) { + success = response.isSuccessful(); + } } catch (IOException | InterruptedException e) { // Continue looping } - } while (response == null || !response.isSuccessful()); + } while (!success); } public void provisionKafkaTestTopic() { // Create a new kafka test topic to use - String bootstrapServers = MessagingServiceFullLocalSetupConfluent.COMPOSE_CONTAINER_KAFKA.getServiceHost("kafka_1", - 39092) + ":39092"; - Properties properties = new Properties(); - properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); - AdminClient adminClient = AdminClient.create(properties); NewTopic newTopic = new NewTopic(kafkaTestTopic, 1, (short) 1); // new NewTopic(topicName, numPartitions, // replicationFactor) List newTopics = new ArrayList(); newTopics.add(newTopic); adminClient.createTopics(newTopics); - adminClient.close(); } - void startConnector() { - startConnector(null); // Defaults only, no override + public void deleteKafkaTestTopic() throws ExecutionException, InterruptedException, TimeoutException { + DeleteTopicsResult result = adminClient.deleteTopics(Collections.singleton(kafkaTestTopic)); + for (Map.Entry> entry : result.values().entrySet()) { + try { + entry.getValue().get(1, TimeUnit.MINUTES); + } catch (ExecutionException e) { + if (!(e.getCause() instanceof UnknownTopicOrPartitionException)) { + throw e; + } + } + } } void startConnector(Properties props) { + startConnector(props, false); + } + + void startConnector(Properties props, boolean expectStartFail) { + Gson gson = new GsonBuilder().setPrettyPrinting().create(); String configJson = null; // Prep config files try { @@ -84,10 +112,6 @@ void startConnector(Properties props) { JsonElement jconfig = jtree.getAsJsonObject().get("config"); JsonObject jobject = jconfig.getAsJsonObject(); // Set properties defaults - jobject.addProperty("sol.host", "tcp://" + new TestConfigProperties().getProperty("sol.host") + ":55555"); - jobject.addProperty("sol.username", SOL_ADMINUSER_NAME); - jobject.addProperty("sol.password", SOL_ADMINUSER_PW); - jobject.addProperty("sol.vpn_name", SOL_VPN); jobject.addProperty("topics", kafkaTestTopic); jobject.addProperty("sol.topics", SOL_TOPICS); jobject.addProperty("sol.autoflush.size", "1"); @@ -96,12 +120,7 @@ void startConnector(Properties props) { jobject.addProperty("value.converter", "org.apache.kafka.connect.converters.ByteArrayConverter"); jobject.addProperty("key.converter", "org.apache.kafka.connect.storage.StringConverter"); // Override properties if provided - if (props != null) { - props.forEach((key, value) -> { - jobject.addProperty((String) key, (String) value); - }); - } - Gson gson = new Gson(); + props.forEach((key, value) -> jobject.addProperty((String) key, (String) value)); configJson = gson.toJson(jtree); } catch (IOException e) { e.printStackTrace(); @@ -111,30 +130,51 @@ void startConnector(Properties props) { try { // check presence of Solace plugin: curl // http://18.218.82.209:8083/connector-plugins | jq - Request request = new Request.Builder().url("http://" + connectorAddress + "/connector-plugins").build(); - Response response; - response = client.newCall(request).execute(); - assert (response.isSuccessful()); - String results = response.body().string(); - logger.info("Available connector plugins: " + results); - assert (results.contains("solace")); + Request request = new Request.Builder().url(kafkaConnection.getConnectUrl() + "/connector-plugins").build(); + try (Response response = client.newCall(request).execute()) { + assertTrue(response.isSuccessful()); + JsonArray results = responseBodyToJson(response.body()).getAsJsonArray(); + logger.info("Available connector plugins: " + gson.toJson(results)); + boolean hasConnector = false; + for (Iterator resultsIter = results.iterator(); !hasConnector && resultsIter.hasNext();) { + JsonObject connectorPlugin = resultsIter.next().getAsJsonObject(); + if (connectorPlugin.get("class").getAsString().equals(SolaceSinkConnector.class.getName())) { + hasConnector = true; + assertEquals("sink", connectorPlugin.get("type").getAsString()); + assertEquals(VersionUtil.getVersion(), connectorPlugin.get("version").getAsString()); + } + } + assertTrue(hasConnector, String.format("Could not find connector %s : %s", + SolaceSinkConnector.class.getName(), gson.toJson(results))); + } // Delete a running connector, if any - Request deleterequest = new Request.Builder() - .url("http://" + connectorAddress + "/connectors/solaceSinkConnector").delete().build(); - Response deleteresponse = client.newCall(deleterequest).execute(); - logger.info("Delete response: " + deleteresponse); + deleteConnector(); // configure plugin: curl -X POST -H "Content-Type: application/json" -d // @solace_source_properties.json http://18.218.82.209:8083/connectors - Request configrequest = new Request.Builder().url("http://" + connectorAddress + "/connectors") + Request configrequest = new Request.Builder().url(kafkaConnection.getConnectUrl() + "/connectors") .post(RequestBody.create(configJson, MediaType.parse("application/json"))).build(); - Response configresponse = client.newCall(configrequest).execute(); - // if (!configresponse.isSuccessful()) throw new IOException("Unexpected code " - // + configresponse); - String configresults = configresponse.body().string(); - logger.info("Connector config results: " + configresults); + try (Response configresponse = client.newCall(configrequest).execute()) { + // if (!configresponse.isSuccessful()) throw new IOException("Unexpected code " + // + configresponse); + logger.info("Connector config results: " + gson.toJson(responseBodyToJson(configresponse.body()))); + } // check success + AtomicReference statusResponse = new AtomicReference<>(new JsonObject()); + assertTimeoutPreemptively(Duration.ofSeconds(10), () -> { + JsonObject connectorStatus; + do { + connectorStatus = getConnectorStatus(); + statusResponse.set(connectorStatus); + } while (!(expectStartFail ? "FAILED" : "RUNNING").equals(Optional.ofNullable(connectorStatus) + .map(a -> a.getAsJsonArray("tasks")) + .map(a -> a.size() > 0 ? a.get(0) : null) + .map(JsonElement::getAsJsonObject) + .map(a -> a.get("state")) + .map(JsonElement::getAsString) + .orElse(""))); + }, () -> "Timed out while waiting for connector to start: " + gson.toJson(statusResponse.get())); Thread.sleep(5000); // Give some time to start } catch (IOException e) { e.printStackTrace(); @@ -144,4 +184,35 @@ void startConnector(Properties props) { } } + public void deleteConnector() throws IOException { + Request request = new Request.Builder() + .url(kafkaConnection.getConnectUrl() + "/connectors/solaceSinkConnector").delete().build(); + try (Response response = client.newCall(request).execute()) { + logger.info("Delete response: " + response); + } + } + + public JsonObject getConnectorStatus() { + Request request = new Request.Builder() + .url(kafkaConnection.getConnectUrl() + "/connectors/solaceSinkConnector/status").build(); + return assertTimeoutPreemptively(Duration.ofSeconds(30), () -> { + while (true) { + try (Response response = client.newCall(request).execute()) { + if (!response.isSuccessful()) { + continue; + } + + return responseBodyToJson(response.body()).getAsJsonObject(); + } + } + }); + } + + private JsonElement responseBodyToJson(ResponseBody responseBody) { + return Optional.ofNullable(responseBody) + .map(ResponseBody::charStream) + .map(s -> new JsonParser().parse(s)) + .orElseGet(JsonObject::new); + } + } diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceSinkTaskIT.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceSinkTaskIT.java new file mode 100644 index 0000000..c38b12a --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/SolaceSinkTaskIT.java @@ -0,0 +1,367 @@ +package com.solace.connector.kafka.connect.sink.it; + +import com.solace.connector.kafka.connect.sink.SolRecordProcessorIF; +import com.solace.connector.kafka.connect.sink.SolaceSinkConstants; +import com.solace.connector.kafka.connect.sink.SolaceSinkSender; +import com.solace.connector.kafka.connect.sink.SolaceSinkTask; +import com.solace.connector.kafka.connect.sink.it.util.extensions.NetworkPubSubPlusExtension; +import com.solace.connector.kafka.connect.sink.recordprocessor.SolDynamicDestinationRecordProcessor; +import com.solace.test.integration.junit.jupiter.extension.ExecutorServiceExtension; +import com.solace.test.integration.junit.jupiter.extension.ExecutorServiceExtension.ExecSvc; +import com.solace.test.integration.junit.jupiter.extension.LogCaptorExtension; +import com.solace.test.integration.junit.jupiter.extension.LogCaptorExtension.LogCaptor; +import com.solace.test.integration.semp.v2.SempV2Api; +import com.solace.test.integration.semp.v2.config.model.ConfigMsgVpnClientProfile; +import com.solace.test.integration.semp.v2.config.model.ConfigMsgVpnClientUsername; +import com.solace.test.integration.semp.v2.config.model.ConfigMsgVpnQueue; +import com.solacesystems.jcsmp.BytesXMLMessage; +import com.solacesystems.jcsmp.ClosedFacilityException; +import com.solacesystems.jcsmp.JCSMPException; +import com.solacesystems.jcsmp.JCSMPProperties; +import com.solacesystems.jcsmp.Queue; +import com.solacesystems.jcsmp.SDTException; +import com.solacesystems.jcsmp.transaction.RollbackException; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; +import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively; + +@ExtendWith(ExecutorServiceExtension.class) +@ExtendWith(LogCaptorExtension.class) +@ExtendWith(NetworkPubSubPlusExtension.class) +public class SolaceSinkTaskIT { + private SolaceSinkTask solaceSinkTask; + private Map connectorProperties; + private String clientProfileName; + private String clientUsernameName; + + private static final Logger logger = LoggerFactory.getLogger(SolaceSinkTask.class); + + @BeforeEach + void setUp(JCSMPProperties jcsmpProperties, SempV2Api sempV2Api) throws Exception { + solaceSinkTask = new SolaceSinkTask(); + String msgVpnName = jcsmpProperties.getStringProperty(JCSMPProperties.VPN_NAME); + + clientProfileName = sempV2Api.config().createMsgVpnClientProfile(msgVpnName, new ConfigMsgVpnClientProfile() + .allowGuaranteedMsgSendEnabled(true) + .allowGuaranteedMsgReceiveEnabled(true) + .allowTransactedSessionsEnabled(true) + .clientProfileName(RandomStringUtils.randomAlphanumeric(30)), null) + .getData() + .getClientProfileName(); + logger.info("Created client profile {}", clientProfileName); + + ConfigMsgVpnClientUsername clientUsername = sempV2Api.config().createMsgVpnClientUsername(msgVpnName, + new ConfigMsgVpnClientUsername() + .clientUsername(RandomStringUtils.randomAlphanumeric(30)) + .clientProfileName(clientProfileName) + .enabled(true), null) + .getData(); + clientUsernameName = clientUsername.getClientUsername(); + logger.info("Created client username {}", clientUsernameName); + + connectorProperties = new HashMap<>(); + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, TestConstants.CONN_MSGPROC_CLASS); + connectorProperties.put(SolaceSinkConstants.SOL_HOST, jcsmpProperties.getStringProperty(JCSMPProperties.HOST)); + connectorProperties.put(SolaceSinkConstants.SOL_VPN_NAME, msgVpnName); + connectorProperties.put(SolaceSinkConstants.SOL_USERNAME, clientUsername.getClientUsername()); + Optional.ofNullable(clientUsername.getPassword()) + .ifPresent(p -> connectorProperties.put(SolaceSinkConstants.SOL_PASSWORD, p)); + } + + @AfterEach + void tearDown(JCSMPProperties jcsmpProperties, SempV2Api sempV2Api) throws Exception { + String msgVpnName = jcsmpProperties.getStringProperty(JCSMPProperties.VPN_NAME); + + solaceSinkTask.stop(); + + if (clientUsernameName != null) { + logger.info("Deleting client username {}", clientUsernameName); + sempV2Api.config().deleteMsgVpnClientUsername(msgVpnName, clientUsernameName); + } + + if (clientProfileName != null) { + logger.info("Deleting client profile {}", clientProfileName); + sempV2Api.config().deleteMsgVpnClientProfile(msgVpnName, clientProfileName); + } + } + + @Test + public void testNoProvidedMessageProcessor() { + connectorProperties.remove(SolaceSinkConstants.SOL_RECORD_PROCESSOR); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.start(connectorProperties)); + assertThat(thrown.getMessage(), containsString("Failed to setup sender to PubSub+")); + assertThat(thrown.getCause(), instanceOf(KafkaException.class)); + assertThat(thrown.getCause().getMessage(), containsString( + "Could not find a public no-argument constructor for " + SolRecordProcessorIF.class.getName())); + } + + @ParameterizedTest(name = "[{index}] transacted={0}") + @ValueSource(booleans = { true, false }) + public void testFailCreateQueueProducer(boolean transacted, SempV2Api sempV2Api, Queue queue) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + connectorProperties.put(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, Boolean.toString(transacted)); + + sempV2Api.config().updateMsgVpnClientProfile(connectorProperties.get(SolaceSinkConstants.SOL_VPN_NAME), + clientProfileName, + new ConfigMsgVpnClientProfile().allowGuaranteedMsgSendEnabled(false), null); + + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.start(connectorProperties)); + assertThat(thrown.getMessage(), containsString("Failed to setup sender to PubSub+")); + assertThat(thrown.getCause(), instanceOf(JCSMPException.class)); + assertThat(thrown.getCause().getMessage(), containsString("Router does not support guaranteed publisher flows")); + } + + @Test + public void testFailTransactedSessionCreation(SempV2Api sempV2Api, Queue queue) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + connectorProperties.put(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, "true"); + + sempV2Api.config().updateMsgVpnClientProfile(connectorProperties.get(SolaceSinkConstants.SOL_VPN_NAME), + clientProfileName, + new ConfigMsgVpnClientProfile().allowTransactedSessionsEnabled(false), null); + + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.start(connectorProperties)); + assertThat(thrown.getMessage(), containsString("Failed to create Transacted Session")); + assertThat(thrown.getCause(), instanceOf(JCSMPException.class)); + assertThat(thrown.getCause().getMessage(), containsString("Router does not support transacted sessions")); + } + + @Test + public void testSendToTopicThrowsJCSMPException() { + connectorProperties.put(SolaceSinkConstants.SOL_TOPICS, RandomStringUtils.randomAlphanumeric(100)); + solaceSinkTask.start(connectorProperties); + + SinkRecord sinkRecord = new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, RandomUtils.nextBytes(10), 0); + + solaceSinkTask.stop(); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.put( + Collections.singleton(sinkRecord))); + assertThat(thrown, instanceOf(RetriableException.class)); + assertThat(thrown.getMessage(), containsString("Received exception while sending message to topic")); + assertThat(thrown.getCause(), instanceOf(ClosedFacilityException.class)); + } + + @Test + public void testSendToQueueThrowsJCSMPException(Queue queue) { + connectorProperties.put(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + solaceSinkTask.start(connectorProperties); + + SinkRecord sinkRecord = new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, RandomUtils.nextBytes(10), 0); + + solaceSinkTask.stop(); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.put( + Collections.singleton(sinkRecord))); + assertThat(thrown, instanceOf(RetriableException.class)); + assertThat(thrown.getMessage(), containsString("Received exception while sending message to queue")); + assertThat(thrown.getCause(), instanceOf(ClosedFacilityException.class)); + } + + @Test + public void testSendToDynamicTopicThrowsJCSMPException() { + connectorProperties.put(SolaceSinkConstants.SOL_DYNAMIC_DESTINATION, "true"); + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, SolDynamicDestinationRecordProcessor.class.getName()); + solaceSinkTask.start(connectorProperties); + + SinkRecord sinkRecord = new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, String.format("%s %s", RandomStringUtils.randomAlphanumeric(4), + RandomStringUtils.randomAlphanumeric(100)).getBytes(StandardCharsets.UTF_8), 0); + + solaceSinkTask.stop(); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.put( + Collections.singleton(sinkRecord))); + assertThat(thrown, instanceOf(RetriableException.class)); + assertThat(thrown.getMessage(), containsString("Received exception while sending message to topic")); + assertThat(thrown.getCause(), instanceOf(ClosedFacilityException.class)); + } + + @ParameterizedTest(name = "[{index}] ignoreRecordProcessorError={0}") + @ValueSource(booleans = { true, false }) + public void testInvalidDynamicDestination(boolean ignoreRecordProcessorError, + @ExecSvc ExecutorService executorService, + @LogCaptor(SolaceSinkSender.class) BufferedReader logReader) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, BadSolDynamicDestinationRecordProcessor.class.getName()); + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR_IGNORE_ERROR, Boolean.toString(ignoreRecordProcessorError)); + connectorProperties.put(SolaceSinkConstants.SOL_DYNAMIC_DESTINATION, Boolean.toString(true)); + solaceSinkTask.start(connectorProperties); + + Set records = Collections.singleton(new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, String.format("%s %s", RandomStringUtils.randomAlphanumeric(4), + RandomStringUtils.randomAlphanumeric(100)).getBytes(StandardCharsets.UTF_8), 0)); + + if (ignoreRecordProcessorError) { + Future future = executorService.submit(() -> { + String logLine; + do { + try { + logLine = logReader.readLine(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } while (!logLine.contains("Received exception retrieving Dynamic Destination")); + }); + solaceSinkTask.put(records); + future.get(30, TimeUnit.SECONDS); + } else { + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.put(records)); + assertThat(thrown.getMessage(), containsString("Received exception retrieving Dynamic Destination")); + assertThat(thrown.getCause(), instanceOf(SDTException.class)); + assertThat(thrown.getCause().getMessage(), containsString("No conversion from String to Destination")); + } + } + + @ParameterizedTest(name = "[{index}] ignoreRecordProcessorError={0}") + @ValueSource(booleans = { true, false }) + public void testRecordProcessorError(boolean ignoreRecordProcessorError, + @ExecSvc ExecutorService executorService, + @LogCaptor(SolaceSinkSender.class) BufferedReader logReader) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, BadRecordProcessor.class.getName()); + connectorProperties.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR_IGNORE_ERROR, Boolean.toString(ignoreRecordProcessorError)); + solaceSinkTask.start(connectorProperties); + + Set records = Collections.singleton(new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, RandomUtils.nextBytes(10), 0)); + + if (ignoreRecordProcessorError) { + Future future = executorService.submit(() -> { + String logLine; + do { + try { + logLine = logReader.readLine(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } while (!logLine.contains("Encountered exception in record processing")); + }); + solaceSinkTask.put(records); + future.get(30, TimeUnit.SECONDS); + } else { + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.put(records)); + assertThat(thrown.getMessage(), containsString("Encountered exception in record processing")); + assertEquals(BadRecordProcessor.TEST_EXCEPTION, thrown.getCause()); + } + } + + @Test + public void testCommitRollback(SempV2Api sempV2Api, Queue queue) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + connectorProperties.put(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, Boolean.toString(true)); + + String vpnName = connectorProperties.get(SolaceSinkConstants.SOL_VPN_NAME); + sempV2Api.config().updateMsgVpnQueue(vpnName, queue.getName(), new ConfigMsgVpnQueue().maxMsgSize(1), null); + + assertTimeoutPreemptively(Duration.ofSeconds(20), () -> { + while (sempV2Api.monitor().getMsgVpnQueue(vpnName, queue.getName(), null).getData() + .getMaxMsgSize() != 1) { + logger.info("Waiting for queue {} to have max message size of 1", queue.getName()); + Thread.sleep(100); + } + }); + + solaceSinkTask.start(connectorProperties); + + SinkRecord sinkRecord = new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, RandomUtils.nextBytes(10), 0); + Map currentOffsets = Collections.singletonMap( + new TopicPartition(sinkRecord.topic(), sinkRecord.kafkaPartition()), + new OffsetAndMetadata(sinkRecord.kafkaOffset())); + + solaceSinkTask.put(Collections.singleton(sinkRecord)); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.flush(currentOffsets)); + assertThat(thrown.getMessage(), containsString("Error in committing transaction")); + assertThat(thrown.getCause(), instanceOf(RollbackException.class)); + assertThat(thrown.getCause().getMessage(), containsString("Document Is Too Large")); + } + + @Test + public void testAutoFlushCommitRollback(SempV2Api sempV2Api, Queue queue) throws Exception { + connectorProperties.put(SolaceSinkConstants.SOl_QUEUE, queue.getName()); + connectorProperties.put(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, Boolean.toString(true)); + connectorProperties.put(SolaceSinkConstants.SOL_QUEUE_MESSAGES_AUTOFLUSH_SIZE, Integer.toString(1)); + + String vpnName = connectorProperties.get(SolaceSinkConstants.SOL_VPN_NAME); + sempV2Api.config().updateMsgVpnQueue(vpnName, queue.getName(), new ConfigMsgVpnQueue().maxMsgSize(1), null); + + assertTimeoutPreemptively(Duration.ofSeconds(20), () -> { + while (sempV2Api.monitor().getMsgVpnQueue(vpnName, queue.getName(), null).getData() + .getMaxMsgSize() != 1) { + logger.info("Waiting for queue {} to have max message size of 1", queue.getName()); + Thread.sleep(100); + } + }); + + solaceSinkTask.start(connectorProperties); + + SinkRecord sinkRecord = new SinkRecord(RandomStringUtils.randomAlphanumeric(100), 0, + Schema.STRING_SCHEMA, RandomStringUtils.randomAlphanumeric(100), + Schema.BYTES_SCHEMA, RandomUtils.nextBytes(10), 0); + + ConnectException thrown = assertThrows(RetriableException.class, () -> solaceSinkTask.put(Collections.singleton(sinkRecord))); + assertThat(thrown.getMessage(), containsString("Error in committing transaction")); + assertThat(thrown.getCause(), instanceOf(RollbackException.class)); + assertThat(thrown.getCause().getMessage(), containsString("Document Is Too Large")); + } + + public static class BadRecordProcessor implements SolRecordProcessorIF { + static final RuntimeException TEST_EXCEPTION = new RuntimeException("Some processing failure"); + + @Override + public BytesXMLMessage processRecord(String skey, SinkRecord record) { + throw TEST_EXCEPTION; + } + } + + public static class BadSolDynamicDestinationRecordProcessor extends SolDynamicDestinationRecordProcessor { + @Override + public BytesXMLMessage processRecord(String skey, SinkRecord record) { + BytesXMLMessage msg = super.processRecord(skey, record); + try { + msg.getProperties().putString("dynamicDestination", "abc"); + } catch (SDTException e) { + throw new RuntimeException(e); + } + return msg; + } + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConfigProperties.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConfigProperties.java deleted file mode 100644 index f44c291..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConfigProperties.java +++ /dev/null @@ -1,66 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import java.io.FileReader; -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Properties; - -public class TestConfigProperties { - - static String testConfigPropertiesFile = "src/integrationTest/resources/manual-setup.properties"; - // This class helps determine the docker host's IP address and avoids getting "localhost" - static class DockerHost { - static public String getIpAddress() { - String dockerReportedAddress = MessagingServiceFullLocalSetupConfluent.COMPOSE_CONTAINER_KAFKA - .getServiceHost("kafka_1", 9092); - if (dockerReportedAddress == "localhost" || dockerReportedAddress == "127.0.0.1") { - return Tools.getIpAddress(); - } else { - return MessagingServiceFullLocalSetupConfluent.COMPOSE_CONTAINER_KAFKA - .getServiceHost("kafka_1", 9092); - } - } - } - - - private Properties properties = new Properties(); - - TestConfigProperties() { - try(FileReader fileReader = new FileReader(testConfigPropertiesFile)){ - properties.load(fileReader); - } catch (IOException e) { - e.printStackTrace(); - } - } - - String getProperty(String name) { - String configuredProperty = properties.getProperty(name); - if (configuredProperty != null) { - return configuredProperty; - } - switch(name) { - case "sol.host": - // No port here - return DockerHost.getIpAddress(); - - case "sol.username": - return "default"; - - case "sol.password": - return "default"; - - case "sol.vpn_name": - return "default"; - - case "kafka.connect_rest_url": - return (DockerHost.getIpAddress() + ":28083"); - - case "kafka.bootstrap_servers": - return (DockerHost.getIpAddress() + ":39092"); - - default: - return null; - } - } -} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConstants.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConstants.java index 2b6dbbd..7d9baa8 100644 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConstants.java +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestConstants.java @@ -1,31 +1,15 @@ package com.solace.connector.kafka.connect.sink.it; -public interface TestConstants { - - public static final String PUBSUB_TAG = "latest"; - public static final String PUBSUB_HOSTNAME = "solbroker"; - public static final String PUBSUB_NETWORK_NAME = "solace_msg_network"; - public static final String FULL_DOCKER_COMPOSE_FILE_PATH = "src/integrationTest/resources/"; - public static final String[] SERVICES = new String[]{"solbroker"}; - public static final long MAX_STARTUP_TIMEOUT_MSEC = 120000l; - public static final String DIRECT_MESSAGING_HTTP_HEALTH_CHECK_URI = "/health-check/direct-active"; - public static final int DIRECT_MESSAGING_HTTP_HEALTH_CHECK_PORT = 5550; - public static final String GUARANTEED_MESSAGING_HTTP_HEALTH_CHECK_URI = "/health-check/guaranteed-active"; - public static final int GUARANTEED_MESSAGING_HTTP_HEALTH_CHECK_PORT = 5550; +import com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleRecordProcessor; - public static final String CONNECTORSOURCE = "build/distributions/pubsubplus-connector-kafka-sink.zip"; - - public static final String UNZIPPEDCONNECTORDESTINATION = "src/integrationTest/resources"; - public static final String CONNECTORPROPERTIESFILE = "etc/solace_sink.properties"; - public static final String CONNECTORJSONPROPERTIESFILE = "etc/solace_sink_properties.json"; - - public static final String SOL_ADMINUSER_NAME = "default"; - public static final String SOL_ADMINUSER_PW = "default"; - public static final String SOL_VPN = "default"; - public static final String KAFKA_SINK_TOPIC = "kafka-test-topic-sink"; - public static final String SOL_ROOT_TOPIC = "pubsubplus-test-topic-sink"; - public static final String SOL_TOPICS = "pubsubplus-test-topic-sink"; - public static final String SOL_QUEUE = "pubsubplus-test-queue-sink"; - public static final String CONN_MSGPROC_CLASS = "com.solace.sink.connector.recordprocessor.SolSimpleRecordProcessor"; - public static final String CONN_KAFKA_MSGKEY = "DESTINATION"; +public interface TestConstants { + String UNZIPPEDCONNECTORDESTINATION = "src/integrationTest/resources"; + String CONNECTORJSONPROPERTIESFILE = "etc/solace_sink_properties.json"; + String SOL_VPN = "default"; + String KAFKA_SINK_TOPIC = "kafka-test-topic-sink"; + String SOL_ROOT_TOPIC = "pubsubplus-test-topic-sink"; + String SOL_TOPICS = "pubsubplus-test-topic-sink"; + String SOL_QUEUE = "pubsubplus-test-queue-sink"; + String CONN_MSGPROC_CLASS = SolSimpleRecordProcessor.class.getName(); + String CONN_KAFKA_MSGKEY = "DESTINATION"; } diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestKafkaProducer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestKafkaProducer.java index 0c8cc8c..b23be99 100644 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestKafkaProducer.java +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestKafkaProducer.java @@ -5,30 +5,28 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Properties; import java.util.concurrent.ExecutionException; -public class TestKafkaProducer implements TestConstants { +public class TestKafkaProducer implements AutoCloseable { static Logger logger = LoggerFactory.getLogger(TestKafkaProducer.class.getName()); - private String kafkaTopic; + private final String bootstrapHost; + private final String kafkaTopic; private KafkaProducer producer; - - public TestKafkaProducer(String kafkaTestTopic) { - kafkaTopic = kafkaTestTopic; + + public TestKafkaProducer(String bootstrapHost, String kafkaTestTopic) { + this.bootstrapHost = bootstrapHost; + this.kafkaTopic = kafkaTestTopic; } public void start() { - String bootstrapServers = MessagingServiceFullLocalSetupConfluent.COMPOSE_CONTAINER_KAFKA.getServiceHost("kafka_1", 39092) - + ":39092"; - // create Producer properties Properties properties = new Properties(); - properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapHost); properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); @@ -54,12 +52,12 @@ public RecordMetadata sendMessageToKafka(String msgKey, String msgValue) { recordmetadata = producer.send(new ProducerRecord<>(kafkaTopic, msgKey.getBytes(), msgValue.getBytes())).get(); logger.info("Message sent to Kafka topic " + kafkaTopic); } catch (InterruptedException | ExecutionException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + logger.error("Failed to send message to Kafka topic " + kafkaTopic, e); } return recordmetadata; } - + + @Override public void close() { producer.close(); } diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceConsumer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceConsumer.java deleted file mode 100644 index 1c56dd7..0000000 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceConsumer.java +++ /dev/null @@ -1,121 +0,0 @@ -package com.solace.connector.kafka.connect.sink.it; - -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.solacesystems.jcsmp.BytesMessage; -import com.solacesystems.jcsmp.BytesXMLMessage; -import com.solacesystems.jcsmp.ConsumerFlowProperties; -import com.solacesystems.jcsmp.DeliveryMode; -import com.solacesystems.jcsmp.EndpointProperties; -import com.solacesystems.jcsmp.FlowReceiver; -import com.solacesystems.jcsmp.JCSMPException; -import com.solacesystems.jcsmp.JCSMPFactory; -import com.solacesystems.jcsmp.JCSMPProperties; -import com.solacesystems.jcsmp.JCSMPSession; -import com.solacesystems.jcsmp.JCSMPStreamingPublishEventHandler; -import com.solacesystems.jcsmp.Message; -import com.solacesystems.jcsmp.Queue; -import com.solacesystems.jcsmp.TextMessage; -import com.solacesystems.jcsmp.Topic; -import com.solacesystems.jcsmp.XMLMessageConsumer; -import com.solacesystems.jcsmp.XMLMessageListener; -import com.solacesystems.jcsmp.XMLMessageProducer; - -public class TestSolaceConsumer { - - // Queue to communicate received messages - public static BlockingQueue solaceReceivedTopicMessages = new ArrayBlockingQueue<>(10); - public static BlockingQueue solaceReceivedQueueMessages = new ArrayBlockingQueue<>(10); - - static Logger logger = LoggerFactory.getLogger(SinkConnectorIT.class.getName()); - private JCSMPSession session; - private XMLMessageConsumer topicSubscriber; - private FlowReceiver queueConsumer; - private String queueName; - - public void initialize(String host, String user, String password, String messagevpn) { - TestConfigProperties configProps = new TestConfigProperties(); - final JCSMPProperties properties = new JCSMPProperties(); - properties.setProperty(JCSMPProperties.HOST, "tcp://" + configProps.getProperty("sol.host") + ":55555"); // host:port - properties.setProperty(JCSMPProperties.USERNAME, configProps.getProperty("sol.username")); // client-username - properties.setProperty(JCSMPProperties.VPN_NAME, configProps.getProperty("sol.vpn_name")); // message-vpn - properties.setProperty(JCSMPProperties.PASSWORD, configProps.getProperty("sol.password")); // client-password - try { - session = JCSMPFactory.onlyInstance().createSession(properties); - session.connect(); - } catch (JCSMPException e1) { - e1.printStackTrace(); - } - } - - public void provisionQueue(String queueName) throws JCSMPException { - this.queueName = queueName; - final Queue queue = JCSMPFactory.onlyInstance().createQueue(queueName); - // Provision queue in case it doesn't exist, and do not fail if it already exists - final EndpointProperties endpointProps = new EndpointProperties(); - endpointProps.setPermission(EndpointProperties.PERMISSION_CONSUME); - endpointProps.setAccessType(EndpointProperties.ACCESSTYPE_EXCLUSIVE); - session.provision(queue, endpointProps, JCSMPSession.FLAG_IGNORE_ALREADY_EXISTS); - logger.info("Ensured Solace queue " + queueName + " exists."); - } - - public void start() throws JCSMPException { - - // Create and start topic subscriber - - topicSubscriber = session.getMessageConsumer(new XMLMessageListener() { - @Override - public void onReceive(BytesXMLMessage msg) { - logger.info("Message received to topic: " + msg.getDestination()); - solaceReceivedTopicMessages.add(msg); - } - @Override - public void onException(JCSMPException e) { - System.out.printf("Consumer received exception: %s%n",e); - } - }); - // Subscribe to all topics starting a common root - session.addSubscription(JCSMPFactory.onlyInstance().createTopic(TestConstants.SOL_ROOT_TOPIC + "/>")); - // Also add subscriptions for DynamicDestination record processor testing - session.addSubscription(JCSMPFactory.onlyInstance().createTopic("ctrl" + "/>")); - session.addSubscription(JCSMPFactory.onlyInstance().createTopic("comms" + "/>")); - logger.info("Topic subscriber connected. Awaiting message..."); - topicSubscriber.start(); - - // Create and start queue consumer - final ConsumerFlowProperties flow_prop = new ConsumerFlowProperties(); - flow_prop.setEndpoint(JCSMPFactory.onlyInstance().createQueue(queueName)); - flow_prop.setAckMode(JCSMPProperties.SUPPORTED_MESSAGE_ACK_CLIENT); - EndpointProperties endpoint_props = new EndpointProperties(); - endpoint_props.setAccessType(EndpointProperties.ACCESSTYPE_EXCLUSIVE); - queueConsumer = session.createFlow(new XMLMessageListener() { - @Override - public void onReceive(BytesXMLMessage msg) { - logger.info("Queue message received"); - solaceReceivedQueueMessages.add(msg); - msg.ackMessage(); - } - - @Override - public void onException(JCSMPException e) { - System.out.printf("Consumer received exception: %s%n", e); - } - }, flow_prop, endpoint_props); - - // Start the consumer - logger.info("Queue receiver connected. Awaiting message..."); - queueConsumer.start(); - } - - public void stop() { - queueConsumer.stop(); - topicSubscriber.stop(); - session.closeSession(); - } - -} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceQueueConsumer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceQueueConsumer.java new file mode 100644 index 0000000..097fdd9 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceQueueConsumer.java @@ -0,0 +1,79 @@ +package com.solace.connector.kafka.connect.sink.it; + +import com.solacesystems.jcsmp.BytesXMLMessage; +import com.solacesystems.jcsmp.ConsumerFlowProperties; +import com.solacesystems.jcsmp.EndpointProperties; +import com.solacesystems.jcsmp.FlowReceiver; +import com.solacesystems.jcsmp.JCSMPException; +import com.solacesystems.jcsmp.JCSMPFactory; +import com.solacesystems.jcsmp.JCSMPProperties; +import com.solacesystems.jcsmp.JCSMPSession; +import com.solacesystems.jcsmp.Queue; +import com.solacesystems.jcsmp.XMLMessageListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + +public class TestSolaceQueueConsumer implements AutoCloseable { + public static BlockingQueue solaceReceivedQueueMessages = new ArrayBlockingQueue<>(10000); + + static Logger logger = LoggerFactory.getLogger(TestSolaceQueueConsumer.class); + private final JCSMPSession session; + private FlowReceiver queueConsumer; + private String queueName; + + public TestSolaceQueueConsumer(JCSMPSession session) { + this.session = session; + } + + public void provisionQueue(String queueName) throws JCSMPException { + provisionQueue(queueName, new EndpointProperties()); + } + + public void provisionQueue(String queueName, EndpointProperties endpointProps) throws JCSMPException { + setQueueName(queueName); + final Queue queue = JCSMPFactory.onlyInstance().createQueue(queueName); + // Provision queue in case it doesn't exist, and do not fail if it already exists + endpointProps.setPermission(EndpointProperties.PERMISSION_CONSUME); + endpointProps.setAccessType(EndpointProperties.ACCESSTYPE_NONEXCLUSIVE); + session.provision(queue, endpointProps, JCSMPSession.FLAG_IGNORE_ALREADY_EXISTS); + logger.info("Ensured Solace queue " + queueName + " exists."); + } + + public void setQueueName(String queueName) { + this.queueName = queueName; + } + + public void start() throws JCSMPException { + final ConsumerFlowProperties flow_prop = new ConsumerFlowProperties(); + flow_prop.setEndpoint(JCSMPFactory.onlyInstance().createQueue(queueName)); + flow_prop.setAckMode(JCSMPProperties.SUPPORTED_MESSAGE_ACK_CLIENT); + EndpointProperties endpoint_props = new EndpointProperties(); + endpoint_props.setAccessType(EndpointProperties.ACCESSTYPE_EXCLUSIVE); + queueConsumer = session.createFlow(new XMLMessageListener() { + @Override + public void onReceive(BytesXMLMessage msg) { + logger.info("Queue message received from {} with user properties: {}", msg.getDestination(), + msg.getProperties()); + solaceReceivedQueueMessages.add(msg); + msg.ackMessage(); + } + + @Override + public void onException(JCSMPException e) { + logger.error("Consumer received exception", e); + } + }, flow_prop, endpoint_props); + + // Start the consumer + logger.info("Queue receiver connected. Awaiting message..."); + queueConsumer.start(); + } + + @Override + public void close() { + queueConsumer.stop(); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceTopicConsumer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceTopicConsumer.java new file mode 100644 index 0000000..c40c0da --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/TestSolaceTopicConsumer.java @@ -0,0 +1,72 @@ +package com.solace.connector.kafka.connect.sink.it; + +import com.solacesystems.jcsmp.BytesXMLMessage; +import com.solacesystems.jcsmp.JCSMPErrorResponseException; +import com.solacesystems.jcsmp.JCSMPErrorResponseSubcodeEx; +import com.solacesystems.jcsmp.JCSMPException; +import com.solacesystems.jcsmp.JCSMPFactory; +import com.solacesystems.jcsmp.JCSMPSession; +import com.solacesystems.jcsmp.XMLMessageConsumer; +import com.solacesystems.jcsmp.XMLMessageListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + +public class TestSolaceTopicConsumer implements AutoCloseable { + + public static BlockingQueue solaceReceivedTopicMessages = new ArrayBlockingQueue<>(10000); + + static Logger logger = LoggerFactory.getLogger(TestSolaceTopicConsumer.class); + private final JCSMPSession session; + private XMLMessageConsumer topicSubscriber; + + public TestSolaceTopicConsumer(JCSMPSession session) { + this.session = session; + } + + public void start() throws JCSMPException { + topicSubscriber = session.getMessageConsumer(new XMLMessageListener() { + @Override + public void onReceive(BytesXMLMessage msg) { + logger.info("Message received to topic: " + msg.getDestination()); + solaceReceivedTopicMessages.add(msg); + } + @Override + public void onException(JCSMPException e) { + logger.error("Consumer received exception", e); + } + }); + // Subscribe to all topics starting a common root + try { + session.addSubscription(JCSMPFactory.onlyInstance().createTopic(TestConstants.SOL_ROOT_TOPIC + "/>")); + } catch (JCSMPErrorResponseException e) { + if (e.getSubcodeEx() != JCSMPErrorResponseSubcodeEx.SUBSCRIPTION_ALREADY_PRESENT) { + throw e; + } + } + // Also add subscriptions for DynamicDestination record processor testing + try { + session.addSubscription(JCSMPFactory.onlyInstance().createTopic("ctrl" + "/>")); + } catch (JCSMPErrorResponseException e) { + if (e.getSubcodeEx() != JCSMPErrorResponseSubcodeEx.SUBSCRIPTION_ALREADY_PRESENT) { + throw e; + } + } + try { + session.addSubscription(JCSMPFactory.onlyInstance().createTopic("comms" + "/>")); + } catch (JCSMPErrorResponseException e) { + if (e.getSubcodeEx() != JCSMPErrorResponseSubcodeEx.SUBSCRIPTION_ALREADY_PRESENT) { + throw e; + } + } + logger.info("Topic subscriber connected. Awaiting message..."); + topicSubscriber.start(); + } + + @Override + public void close() { + topicSubscriber.stop(); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/Tools.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/Tools.java index c187830..14aaa8a 100644 --- a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/Tools.java +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/Tools.java @@ -1,39 +1,21 @@ package com.solace.connector.kafka.connect.sink.it; +import com.solace.connector.kafka.connect.sink.VersionUtil; + import java.io.IOException; -import java.net.InterfaceAddress; -import java.net.NetworkInterface; -import java.net.SocketException; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; public class Tools { - static public String getIpAddress() { - Set HostAddresses = new HashSet<>(); - try { - for (NetworkInterface ni : Collections.list(NetworkInterface.getNetworkInterfaces())) { - if (!ni.isLoopback() && ni.isUp() && ni.getHardwareAddress() != null) { - for (InterfaceAddress ia : ni.getInterfaceAddresses()) { - if (ia.getBroadcast() != null) { //If limited to IPV4 - HostAddresses.add(ia.getAddress().getHostAddress()); - } - } - } - } - } catch (SocketException e) { } - return (String) HostAddresses.toArray()[0]; - } static public String getUnzippedConnectorDirName() { String connectorUnzippedPath = null; try { DirectoryStream dirs = Files.newDirectoryStream( - Paths.get(TestConstants.UNZIPPEDCONNECTORDESTINATION), "pubsubplus-connector-kafka-*"); + Paths.get(TestConstants.UNZIPPEDCONNECTORDESTINATION), + "pubsubplus-connector-kafka-sink-" + VersionUtil.getVersion()); for (Path entry: dirs) { connectorUnzippedPath = entry.toString(); break; //expecting only one diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/KafkaConnection.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/KafkaConnection.java new file mode 100644 index 0000000..5b8b479 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/KafkaConnection.java @@ -0,0 +1,52 @@ +package com.solace.connector.kafka.connect.sink.it.util; + +import org.testcontainers.containers.GenericContainer; + +import java.util.Objects; + +public class KafkaConnection { + private final String bootstrapServers; + private final String connectUrl; + private final GenericContainer kafkaContainer; + private final GenericContainer connectContainer; + + public KafkaConnection(String bootstrapServers, String connectUrl, GenericContainer kafkaContainer, + GenericContainer connectContainer) { + this.bootstrapServers = bootstrapServers; + this.connectUrl = connectUrl; + this.kafkaContainer = kafkaContainer; + this.connectContainer = connectContainer; + } + + public String getBootstrapServers() { + return bootstrapServers; + } + + public String getConnectUrl() { + return connectUrl; + } + + public GenericContainer getKafkaContainer() { + return kafkaContainer; + } + + public GenericContainer getConnectContainer() { + return connectContainer; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + KafkaConnection that = (KafkaConnection) o; + return Objects.equals(bootstrapServers, that.bootstrapServers) && + Objects.equals(connectUrl, that.connectUrl) && + Objects.equals(kafkaContainer, that.kafkaContainer) && + Objects.equals(connectContainer, that.connectContainer); + } + + @Override + public int hashCode() { + return Objects.hash(bootstrapServers, connectUrl, kafkaContainer, connectContainer); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/KafkaArgumentsProvider.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/KafkaArgumentsProvider.java new file mode 100644 index 0000000..99f6531 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/KafkaArgumentsProvider.java @@ -0,0 +1,333 @@ +package com.solace.connector.kafka.connect.sink.it.util.extensions; + +import com.solace.connector.kafka.connect.sink.it.SolaceConnectorDeployment; +import com.solace.connector.kafka.connect.sink.it.TestKafkaProducer; +import com.solace.connector.kafka.connect.sink.it.util.KafkaConnection; +import com.solace.connector.kafka.connect.sink.it.util.testcontainers.BitnamiKafkaConnectContainer; +import com.solace.connector.kafka.connect.sink.it.util.testcontainers.ConfluentKafkaConnectContainer; +import com.solace.connector.kafka.connect.sink.it.util.testcontainers.ConfluentKafkaControlCenterContainer; +import com.solace.connector.kafka.connect.sink.it.util.testcontainers.ConfluentKafkaSchemaRegistryContainer; +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.junit.jupiter.api.extension.AfterEachCallback; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.ExtensionContext.Namespace; +import org.junit.jupiter.api.extension.ExtensionContext.Store.CloseableResource; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.ArgumentsProvider; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junitpioneer.jupiter.CartesianAnnotationConsumer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +import java.io.IOException; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +public class KafkaArgumentsProvider implements ArgumentsProvider, CartesianAnnotationConsumer { + private static final Logger LOG = LoggerFactory.getLogger(KafkaArgumentsProvider.class); + + @Override + public Stream provideArguments(ExtensionContext context) { + KafkaConnection bitnamiCxn = context.getRoot() + .getStore(KafkaNamespace.BITNAMI.getNamespace()) + .getOrComputeIfAbsent(BitnamiResource.class, c -> { + LOG.info("Creating Bitnami Kafka"); + BitnamiKafkaConnectContainer container = new BitnamiKafkaConnectContainer() + .withNetwork(NetworkPubSubPlusExtension.DOCKER_NET); + if (!container.isCreated()) { + container.start(); + } + return new BitnamiResource(container); + }, BitnamiResource.class) + .getKafkaConnection(); + + KafkaConnection confluentCxn = context.getRoot() + .getStore(KafkaNamespace.CONFLUENT.getNamespace()) + .getOrComputeIfAbsent(ConfluentResource.class, c -> { + LOG.info("Creating Confluent Kafka"); + KafkaContainer kafkaContainer = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka").withTag("6.2.1")) + .withNetwork(NetworkPubSubPlusExtension.DOCKER_NET) + .withNetworkAliases("kafka"); + if (!kafkaContainer.isCreated()) { + kafkaContainer.start(); + } + + ConfluentKafkaSchemaRegistryContainer schemaRegistryContainer = new ConfluentKafkaSchemaRegistryContainer(kafkaContainer) + .withNetworkAliases("schema-registry"); + if (!schemaRegistryContainer.isCreated()) { + schemaRegistryContainer.start(); + } + + ConfluentKafkaControlCenterContainer controlCenterContainer = new ConfluentKafkaControlCenterContainer(kafkaContainer, schemaRegistryContainer); + if (!controlCenterContainer.isCreated()) { + controlCenterContainer.start(); + } + + ConfluentKafkaConnectContainer connectContainer = new ConfluentKafkaConnectContainer(kafkaContainer, schemaRegistryContainer); + if (!connectContainer.isCreated()) { + connectContainer.start(); + } + return new ConfluentResource( + new KafkaContainerResource<>(kafkaContainer), + new KafkaContainerResource<>(schemaRegistryContainer), + new KafkaContainerResource<>(controlCenterContainer), + new KafkaContainerResource<>(connectContainer)); + }, ConfluentResource.class) + .getKafkaConnection(); + + return Stream.of( + Arguments.of(createKafkaContext(bitnamiCxn, KafkaNamespace.BITNAMI, context)), + Arguments.of(createKafkaContext(confluentCxn, KafkaNamespace.CONFLUENT, context)) + ); + } + + private KafkaContext createKafkaContext(KafkaConnection connection, KafkaNamespace namespace, ExtensionContext context) { + TestKafkaProducer producer = context.getRoot() + .getStore(namespace.getNamespace()) + .getOrComputeIfAbsent(ProducerResource.class, c -> { + TestKafkaProducer newProducer = new TestKafkaProducer(connection.getBootstrapServers(), + SolaceConnectorDeployment.kafkaTestTopic); + newProducer.start(); + return new ProducerResource(newProducer); + }, ProducerResource.class) + .getProducer(); + + AdminClient adminClient = context.getRoot() + .getStore(namespace.getNamespace()) + .getOrComputeIfAbsent(AdminClientResource.class, c -> { + Properties properties = new Properties(); + properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, connection.getBootstrapServers()); + AdminClient newAdminClient = AdminClient.create(properties); + return new AdminClientResource(newAdminClient); + }, AdminClientResource.class) + .getAdminClient(); + + SolaceConnectorDeployment connectorDeployment = context.getRoot() + .getStore(namespace.getNamespace()) + .getOrComputeIfAbsent(ConnectorDeploymentResource.class, c -> { + SolaceConnectorDeployment deployment = new SolaceConnectorDeployment(connection, adminClient); + deployment.waitForConnectorRestIFUp(); + deployment.provisionKafkaTestTopic(); + return new ConnectorDeploymentResource(deployment); + }, ConnectorDeploymentResource.class) + .getDeployment(); + + return new KafkaContext(namespace, connection, adminClient, connectorDeployment, producer); + } + + @Override + public void accept(KafkaArgumentSource kafkaArgumentSource) { + + } + + @Target(ElementType.METHOD) + @Retention(RetentionPolicy.RUNTIME) + @ArgumentsSource(KafkaArgumentsProvider.class) + public @interface KafkaArgumentSource { + + } + + public static class AutoDeleteSolaceConnectorDeploymentAfterEach implements AfterEachCallback { + + @Override + public void afterEach(ExtensionContext context) throws Exception { + for (KafkaNamespace namespace : KafkaNamespace.values()) { + ConnectorDeploymentResource deploymentResource = context.getRoot() + .getStore(namespace.getNamespace()) + .get(ConnectorDeploymentResource.class, ConnectorDeploymentResource.class); + if (deploymentResource != null) { + deploymentResource.close(); + } + } + } + } + + public static class KafkaContext { + private final KafkaNamespace namespace; + private final KafkaConnection connection; + private final AdminClient adminClient; + private final SolaceConnectorDeployment solaceConnectorDeployment; + private final TestKafkaProducer producer; + + private KafkaContext(KafkaNamespace namespace, KafkaConnection connection, AdminClient adminClient, + SolaceConnectorDeployment solaceConnectorDeployment, TestKafkaProducer producer) { + this.namespace = namespace; + this.connection = connection; + this.producer = producer; + this.solaceConnectorDeployment = solaceConnectorDeployment; + this.adminClient = adminClient; + } + + public KafkaConnection getConnection() { + return connection; + } + + public AdminClient getAdminClient() { + return adminClient; + } + + public SolaceConnectorDeployment getSolaceConnectorDeployment() { + return solaceConnectorDeployment; + } + + public TestKafkaProducer getProducer() { + return producer; + } + + @Override + public String toString() { + return namespace.name(); + } + } + + private static class ProducerResource implements CloseableResource { + private static final Logger LOG = LoggerFactory.getLogger(ProducerResource.class); + private final TestKafkaProducer producer; + + private ProducerResource(TestKafkaProducer producer) { + this.producer = producer; + } + + public TestKafkaProducer getProducer() { + return producer; + } + + @Override + public void close() { + LOG.info("Closing Kafka producer"); + producer.close(); + } + } + + private static class AdminClientResource implements CloseableResource { + private static final Logger LOG = LoggerFactory.getLogger(AdminClientResource.class); + private final AdminClient adminClient; + + private AdminClientResource(AdminClient adminClient) { + this.adminClient = adminClient; + } + + public AdminClient getAdminClient() { + return adminClient; + } + + @Override + public void close() { + LOG.info("Closing Kafka admin client"); + adminClient.close(); + } + } + + private static class ConnectorDeploymentResource implements CloseableResource { + private static final Logger LOG = LoggerFactory.getLogger(ConnectorDeploymentResource.class); + private final SolaceConnectorDeployment deployment; + + private ConnectorDeploymentResource(SolaceConnectorDeployment deployment) { + this.deployment = deployment; + } + + public SolaceConnectorDeployment getDeployment() { + return deployment; + } + + @Override + public void close() throws IOException, ExecutionException, InterruptedException, TimeoutException { + LOG.info("Closing Kafka connector deployment"); + deployment.deleteKafkaTestTopic(); + deployment.deleteConnector(); + } + } + + private static class BitnamiResource extends KafkaContainerResource { + + private BitnamiResource(BitnamiKafkaConnectContainer container) { + super(container); + } + + public KafkaConnection getKafkaConnection() { + return new KafkaConnection(getContainer().getBootstrapServers(), getContainer().getConnectUrl(), + getContainer(), getContainer()); + } + } + + private static class ConfluentResource implements CloseableResource { + private final KafkaContainerResource kafka; + private final KafkaContainerResource schemaRegistry; + private final KafkaContainerResource controlCenter; + private final KafkaContainerResource connect; + + private ConfluentResource(KafkaContainerResource kafka, + KafkaContainerResource schemaRegistry, + KafkaContainerResource controlCenter, + KafkaContainerResource connect) { + this.kafka = kafka; + this.schemaRegistry = schemaRegistry; + this.controlCenter = controlCenter; + this.connect = connect; + } + + public KafkaConnection getKafkaConnection() { + return new KafkaConnection(kafka.getContainer().getBootstrapServers(), + connect.getContainer().getConnectUrl(), kafka.container, connect.container); + } + + public KafkaContainerResource getKafka() { + return kafka; + } + + public KafkaContainerResource getConnect() { + return connect; + } + + @Override + public void close() { + connect.close(); + controlCenter.close(); + schemaRegistry.close(); + kafka.close(); + } + } + + private static class KafkaContainerResource> implements CloseableResource { + private static final Logger LOG = LoggerFactory.getLogger(KafkaContainerResource.class); + private final T container; + + private KafkaContainerResource(T container) { + this.container = container; + } + + public T getContainer() { + return container; + } + + @Override + public void close() { + LOG.info("Closing container {}", container.getContainerName()); + container.close(); + } + } + + private enum KafkaNamespace { + BITNAMI, CONFLUENT; + + private final Namespace namespace; + + KafkaNamespace() { + this.namespace = Namespace.create(KafkaArgumentsProvider.class, name()); + } + + public Namespace getNamespace() { + return namespace; + } + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/NetworkPubSubPlusExtension.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/NetworkPubSubPlusExtension.java new file mode 100644 index 0000000..57902a4 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/extensions/NetworkPubSubPlusExtension.java @@ -0,0 +1,24 @@ +package com.solace.connector.kafka.connect.sink.it.util.extensions; + +import com.solace.test.integration.junit.jupiter.extension.PubSubPlusExtension; +import com.solace.test.integration.testcontainer.PubSubPlusContainer; +import org.testcontainers.containers.Network; + +public class NetworkPubSubPlusExtension extends PubSubPlusExtension { + public static final Network DOCKER_NET = Network.newNetwork(); + private static final String DOCKER_NET_PUBSUB_ALIAS = "solace-pubsubplus"; + + public NetworkPubSubPlusExtension() { + super(() -> new PubSubPlusContainer() + .withNetwork(DOCKER_NET) + .withNetworkAliases(DOCKER_NET_PUBSUB_ALIAS)); + } + + public Network getDockerNetwork() { + return DOCKER_NET; + } + + public String getNetworkAlias() { + return DOCKER_NET_PUBSUB_ALIAS; + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/BitnamiKafkaConnectContainer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/BitnamiKafkaConnectContainer.java new file mode 100644 index 0000000..5729c40 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/BitnamiKafkaConnectContainer.java @@ -0,0 +1,145 @@ +package com.solace.connector.kafka.connect.sink.it.util.testcontainers; + +import com.github.dockerjava.api.command.InspectContainerResponse; +import com.solace.connector.kafka.connect.sink.SolaceSinkTask; +import com.solace.connector.kafka.connect.sink.it.Tools; +import org.testcontainers.containers.BindMode; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.builder.Transferable; +import org.testcontainers.utility.DockerImageName; + +import java.nio.charset.StandardCharsets; +import java.util.Comparator; + +public class BitnamiKafkaConnectContainer extends GenericContainer { + private static final String BROKER_LISTENER_NAME = "PLAINTEXT"; + private static final int BROKER_LISTENER_PORT = 9092; + private static final String BOOTSTRAP_LISTENER_NAME = "PLAINTEXT_HOST"; + public static final int BOOTSTRAP_LISTENER_PORT = 29092; + public static final int CONNECT_PORT = 28083; + private static final int ZOOKEEPER_PORT = 2181; + private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("bitnami/kafka"); + private static final String DEFAULT_IMAGE_TAG = "2"; + private static final String STARTER_SCRIPT = "/testcontainers_start.sh"; + private DockerImageName zookeeperDockerImageName = DockerImageName.parse("bitnami/zookeeper:3"); + private GenericContainer zookeeperContainer; + + public BitnamiKafkaConnectContainer() { + this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_IMAGE_TAG)); + } + + public BitnamiKafkaConnectContainer(String dockerImageName) { + this(DockerImageName.parse(dockerImageName)); + } + + public BitnamiKafkaConnectContainer(DockerImageName dockerImageName) { + super(dockerImageName); + + withNetwork(Network.newNetwork()); + withExposedPorts(CONNECT_PORT, BROKER_LISTENER_PORT, BOOTSTRAP_LISTENER_PORT); + withEnv("KAFKA_CFG_BROKER_ID", "1"); + withEnv("ALLOW_PLAINTEXT_LISTENER", "yes"); + withEnv("KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP", String.join(",", + BROKER_LISTENER_NAME + ":PLAINTEXT", BOOTSTRAP_LISTENER_NAME + ":PLAINTEXT")); + withEnv("KAFKA_CFG_LISTENERS", String.join(",", + BROKER_LISTENER_NAME + "://:" + BROKER_LISTENER_PORT, BOOTSTRAP_LISTENER_NAME + "://:" + BOOTSTRAP_LISTENER_PORT)); + withClasspathResourceMapping(Tools.getUnzippedConnectorDirName() + "/lib", + "/opt/bitnami/kafka/jars/pubsubplus-connector-kafka", BindMode.READ_ONLY); + waitingFor(Wait.forLogMessage(".*Finished starting connectors and tasks.*", 1)); + } + + @Override + public void start() { + if (zookeeperDockerImageName != null) { + String zookeeperNetworkAlias = "zookeeper"; + zookeeperContainer = new GenericContainer<>(zookeeperDockerImageName) + .withNetwork(getNetwork()) + .withNetworkAliases(zookeeperNetworkAlias) + .withEnv("ZOOKEEPER_CLIENT_PORT", Integer.toString(ZOOKEEPER_PORT)) + .withEnv("ZOOKEEPER_TICK_TIME", "2000") + .withEnv("ALLOW_ANONYMOUS_LOGIN", "yes"); + dependsOn(zookeeperContainer); + withEnv("KAFKA_CFG_ZOOKEEPER_CONNECT", String.format("%s:%s", zookeeperNetworkAlias, ZOOKEEPER_PORT)); + } + super.start(); + } + + @Override + protected void doStart() { + // Delay starting Kafka until after container has started + withCommand("sh", "-c", "while [ ! -f " + STARTER_SCRIPT + " ]; do sleep 0.1; done; " + STARTER_SCRIPT); + super.doStart(); + } + + @Override + protected void containerIsStarting(InspectContainerResponse containerInfo) { + String command = "/bin/sh\n" + + "set -e\n" + + "echo 'plugin.path=/opt/bitnami/kafka/jars' >> /opt/bitnami/kafka/config/connect-distributed.properties\n" + + "echo 'rest.port=" + CONNECT_PORT + "' >> /opt/bitnami/kafka/config/connect-distributed.properties\n" + + "echo 'log4j.logger.org.apache.kafka.connect.runtime.WorkerSinkTask=DEBUG' >> /opt/bitnami/kafka/config/connect-log4j.properties\n" + + "echo 'log4j.logger." + SolaceSinkTask.class.getName() + "=TRACE' >> /opt/bitnami/kafka/config/connect-log4j.properties\n" + + "export KAFKA_CFG_ADVERTISED_LISTENERS=" + advertisedListeners(containerInfo) + "\n" + + "/opt/bitnami/scripts/kafka/setup.sh\n" + + "/opt/bitnami/scripts/kafka/run.sh &\n" + + "/opt/bitnami/kafka/bin/connect-distributed.sh /opt/bitnami/kafka/config/connect-distributed.properties\n"; + copyFileToContainer(Transferable.of(command.getBytes(StandardCharsets.UTF_8), 0777), STARTER_SCRIPT); + super.containerIsStarting(containerInfo); + } + + @Override + public void close() { + super.close(); + if (zookeeperContainer != null) { + zookeeperContainer.close(); + } + } + + public String getBootstrapServers() { + return String.format("%s:%s", getHost(), getMappedPort(BitnamiKafkaConnectContainer.BOOTSTRAP_LISTENER_PORT)); + } + + public String getConnectUrl() { + return String.format("http://%s:%s", getHost(), getMappedPort(BitnamiKafkaConnectContainer.CONNECT_PORT)); + } + + public BitnamiKafkaConnectContainer withZookeeper(DockerImageName dockerImageName) { + zookeeperDockerImageName = dockerImageName; + return this; + } + + private String advertisedListeners(InspectContainerResponse containerInfo) { + return String.join(",", + String.format("%s://%s:%s", BROKER_LISTENER_NAME, getExternalIpAddress(containerInfo), BROKER_LISTENER_PORT), + String.format("%s://%s:%s", BOOTSTRAP_LISTENER_NAME, getHost(), getMappedPort(BOOTSTRAP_LISTENER_PORT))); + } + + /** + * @see org.testcontainers.containers.KafkaContainer + */ + private String getExternalIpAddress(InspectContainerResponse containerInfo) { + // Kafka supports only one INTER_BROKER listener, so we have to pick one. + // The current algorithm uses the following order of resolving the IP: + // 1. Custom network's IP set via `withNetwork` + // 2. Bridge network's IP + // 3. Best effort fallback to getNetworkSettings#ipAddress + return containerInfo.getNetworkSettings().getNetworks().entrySet() + .stream() + .filter(it -> it.getValue().getIpAddress() != null) + .max(Comparator.comparingInt(entry -> { + if (getNetwork().getId().equals(entry.getValue().getNetworkID())) { + return 2; + } + + if ("bridge".equals(entry.getKey())) { + return 1; + } + + return 0; + })) + .map(it -> it.getValue().getIpAddress()) + .orElseGet(() -> containerInfo.getNetworkSettings().getIpAddress()); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaConnectContainer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaConnectContainer.java new file mode 100644 index 0000000..04a8064 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaConnectContainer.java @@ -0,0 +1,63 @@ +package com.solace.connector.kafka.connect.sink.it.util.testcontainers; + +import com.solace.connector.kafka.connect.sink.it.Tools; +import org.testcontainers.containers.BindMode; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConfluentKafkaConnectContainer extends GenericContainer { + public static final int CONNECT_PORT = 28083; + private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("confluentinc/cp-kafka-connect-base"); + private static final String DEFAULT_IMAGE_TAG = "6.2.1"; + + public ConfluentKafkaConnectContainer(KafkaContainer kafka, + ConfluentKafkaSchemaRegistryContainer schemaRegistry) { + this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_IMAGE_TAG), kafka, schemaRegistry); + } + + public ConfluentKafkaConnectContainer(DockerImageName dockerImageName, + KafkaContainer kafka, + ConfluentKafkaSchemaRegistryContainer schemaRegistry) { + super(dockerImageName); + assertThat(kafka.getNetworkAliases().size(), greaterThanOrEqualTo(2)); + assertThat(schemaRegistry.getNetworkAliases().size(), greaterThanOrEqualTo(2)); + assertEquals(kafka.getNetwork(), schemaRegistry.getNetwork()); + + dependsOn(kafka, schemaRegistry); + withNetwork(kafka.getNetwork()); + withExposedPorts(CONNECT_PORT); + withEnv("CONNECT_REST_PORT", Integer.toString(CONNECT_PORT)); + withEnv("CONNECT_GROUP_ID", "quickstart-avro"); + withEnv("CONNECT_CONFIG_STORAGE_TOPIC", "quickstart-avro-config"); + withEnv("CONNECT_OFFSET_STORAGE_TOPIC", "quickstart-avro-offsets"); + withEnv("CONNECT_STATUS_STORAGE_TOPIC", "quickstart-avro-status"); + withEnv("CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR", "1"); + withEnv("CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR", "1"); + withEnv("CONNECT_STATUS_STORAGE_REPLICATION_FACTOR", "1"); + withEnv("CONNECT_KEY_CONVERTER", "io.confluent.connect.avro.AvroConverter"); + withEnv("CONNECT_VALUE_CONVERTER", "io.confluent.connect.avro.AvroConverter"); + withEnv("CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL", String.format("http://%s:%s", + schemaRegistry.getNetworkAliases().get(1), ConfluentKafkaSchemaRegistryContainer.REGISTRY_PORT)); + withEnv("CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL", String.format("http://%s:%s", + schemaRegistry.getNetworkAliases().get(1), ConfluentKafkaSchemaRegistryContainer.REGISTRY_PORT)); + withEnv("CONNECT_BOOTSTRAP_SERVERS", String.format("%s:9092", kafka.getNetworkAliases().get(1))); + withEnv("CONNECT_INTERNAL_KEY_CONVERTER", "org.apache.kafka.connect.json.JsonConverter"); + withEnv("CONNECT_INTERNAL_VALUE_CONVERTER", "org.apache.kafka.connect.json.JsonConverter"); + withEnv("CONNECT_REST_ADVERTISED_HOST_NAME", "localhost"); + withEnv("CONNECT_LOG4J_ROOT_LOGLEVEL", "INFO"); + withEnv("CONNECT_PLUGIN_PATH", "/usr/share/java,/etc/kafka-connect/jars"); + withClasspathResourceMapping(Tools.getUnzippedConnectorDirName() + "/lib", + "/etc/kafka-connect/jars", BindMode.READ_ONLY); + waitingFor( Wait.forLogMessage(".*Kafka Connect started.*", 1) ); + } + + public String getConnectUrl() { + return String.format("http://%s:%s", getHost(), getMappedPort(ConfluentKafkaConnectContainer.CONNECT_PORT)); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaControlCenterContainer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaControlCenterContainer.java new file mode 100644 index 0000000..2973674 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaControlCenterContainer.java @@ -0,0 +1,41 @@ +package com.solace.connector.kafka.connect.sink.it.util.testcontainers; + +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConfluentKafkaControlCenterContainer extends GenericContainer { + private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("confluentinc/cp-enterprise-control-center"); + private static final String DEFAULT_IMAGE_TAG = "6.2.1"; + + public ConfluentKafkaControlCenterContainer(KafkaContainer kafka, + ConfluentKafkaSchemaRegistryContainer schemaRegistry) { + this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_IMAGE_TAG), kafka, schemaRegistry); + } + + public ConfluentKafkaControlCenterContainer(DockerImageName dockerImageName, + KafkaContainer kafka, + ConfluentKafkaSchemaRegistryContainer schemaRegistry) { + super(dockerImageName); + assertThat(kafka.getNetworkAliases().size(), greaterThanOrEqualTo(2)); + assertThat(schemaRegistry.getNetworkAliases().size(), greaterThanOrEqualTo(2)); + assertEquals(kafka.getNetwork(), schemaRegistry.getNetwork()); + + // withExposedPorts(9021); + dependsOn(kafka, schemaRegistry); + withNetwork(kafka.getNetwork()); + withEnv("CONTROL_CENTER_REPLICATION_FACTOR", "1"); + withEnv("CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS", "1"); + withEnv("CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS", "1"); + withEnv("CONFLUENT_METRICS_TOPIC_REPLICATION", "1"); + withEnv("CONTROL_CENTER_SCHEMA_REGISTRY_URL", String.format("http://%s:%s", + schemaRegistry.getNetworkAliases().get(1), ConfluentKafkaSchemaRegistryContainer.REGISTRY_PORT)); + withEnv("CONTROL_CENTER_BOOTSTRAP_SERVERS", String.format("%s:9092", kafka.getNetworkAliases().get(1))); + withEnv("CONTROL_CENTER_ZOOKEEPER_CONNECT", String.format("%s:%s", kafka.getNetworkAliases().get(1), + KafkaContainer.ZOOKEEPER_PORT)); + } +} diff --git a/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaSchemaRegistryContainer.java b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaSchemaRegistryContainer.java new file mode 100644 index 0000000..49dadc8 --- /dev/null +++ b/src/integrationTest/java/com/solace/connector/kafka/connect/sink/it/util/testcontainers/ConfluentKafkaSchemaRegistryContainer.java @@ -0,0 +1,40 @@ +package com.solace.connector.kafka.connect.sink.it.util.testcontainers; + +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +public class ConfluentKafkaSchemaRegistryContainer extends GenericContainer { + public static final int REGISTRY_PORT = 8081; + private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse("confluentinc/cp-schema-registry"); + private static final String DEFAULT_IMAGE_TAG = "6.2.1"; + + public ConfluentKafkaSchemaRegistryContainer(KafkaContainer kafka) { + this(DEFAULT_IMAGE_NAME.withTag(DEFAULT_IMAGE_TAG), kafka); + } + + public ConfluentKafkaSchemaRegistryContainer(DockerImageName dockerImageName, KafkaContainer kafka) { + super(dockerImageName); + + assertNotNull(kafka.getNetwork()); + assertThat(kafka.getNetworkAliases().size(), greaterThanOrEqualTo(2)); + + dependsOn(kafka); + withNetwork(kafka.getNetwork()); + withEnv("SCHEMA_REGISTRY_LISTENERS", "http://0.0.0.0:" + REGISTRY_PORT); + withEnv("SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", String.format("PLAINTEXT://%s:9092", + kafka.getNetworkAliases().get(1))); + waitingFor(Wait.forHttp("/subjects").forStatusCode(200)); + } + + @Override + protected void doStart() { + withEnv("SCHEMA_REGISTRY_HOST_NAME", getNetworkAliases().size() > 1 ? getNetworkAliases().get(1) : getHost()); + super.doStart(); + } +} diff --git a/src/integrationTest/resources/docker-compose-kafka-apache.yml b/src/integrationTest/resources/docker-compose-kafka-apache.yml deleted file mode 100644 index afa48bf..0000000 --- a/src/integrationTest/resources/docker-compose-kafka-apache.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '3' - -services: - zookeeper: - image: bitnami/zookeeper:3 - ports: - - 2181:2181 - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - ALLOW_ANONYMOUS_LOGIN: 'yes' - kafka: - image: bitnami/kafka:2 - ports: - - 9092:9092 - - 29092:29092 - - 39092:39092 - environment: - KAFKA_CFG_BROKER_ID: 1 - KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181 - ALLOW_PLAINTEXT_LISTENER: 'yes' - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAINTEXT_EXTHOST:PLAINTEXT - KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,PLAINTEXT_HOST://:29092,PLAINTEXT_EXTHOST://:39092 - KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092,PLAINTEXT_EXTHOST://$KAFKA_HOST:39092 -# KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092,PLAINTEXT_EXTHOST://$KAFKA_HOST:39092 -# KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 -# KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - depends_on: - - zookeeper diff --git a/src/integrationTest/resources/docker-compose-kafka-confluent.yml b/src/integrationTest/resources/docker-compose-kafka-confluent.yml deleted file mode 100644 index 2982b01..0000000 --- a/src/integrationTest/resources/docker-compose-kafka-confluent.yml +++ /dev/null @@ -1,71 +0,0 @@ -version: '3' - -services: - zookeeper: - image: confluentinc/cp-zookeeper:5.4.0 - ports: - - 2181:2181 - environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_TICK_TIME: 2000 - kafka: - image: confluentinc/cp-kafka:5.4.0 - ports: - - 9092:9092 - - 29092:29092 - - 39092:39092 - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAINTEXT_EXTHOST:PLAINTEXT - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092,PLAINTEXT_EXTHOST://$KAFKA_HOST:39092 - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 - KAFKA_TOPIC: $KAFKA_TOPIC - depends_on: - - zookeeper - kafka-setup: - image: confluentinc/cp-kafka:5.4.0 - hostname: kafka-setup - depends_on: - - kafka - - zookeeper - command: "bash -c 'echo Waiting for Kafka to be ready... && \ - cub kafka-ready -b kafka:9092 1 30 && \ - kafka-topics --create --if-not-exists --zookeeper zookeeper:2181 --partitions 1 --replication-factor 1 --topic $KAFKA_TOPIC && \ - sleep 30'" - environment: - # The following settings are listed here only to satisfy the image's requirements. - # We override the image's `command` anyways, hence this container will not start a broker. - KAFKA_BROKER_ID: ignored - KAFKA_ZOOKEEPER_CONNECT: ignored - - schema-registry: - image: confluentinc/cp-schema-registry:5.4.0 - ports: - - 8081:8081 - environment: - SCHEMA_REGISTRY_HOST_NAME: localhost - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9092 - depends_on: - - kafka - - control-center: - image: confluentinc/cp-enterprise-control-center:latest - hostname: control-center - depends_on: - - zookeeper - - kafka - - schema-registry - ports: - - "9021:9021" - environment: - CONTROL_CENTER_BOOTSTRAP_SERVERS: 'kafka:9092' - CONTROL_CENTER_ZOOKEEPER_CONNECT: 'zookeeper:2181' - CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" - CONTROL_CENTER_REPLICATION_FACTOR: 1 - CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 - CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 - CONFLUENT_METRICS_TOPIC_REPLICATION: 1 - PORT: 9021 \ No newline at end of file diff --git a/src/integrationTest/resources/docker-compose-solace.yml b/src/integrationTest/resources/docker-compose-solace.yml deleted file mode 100644 index fad702e..0000000 --- a/src/integrationTest/resources/docker-compose-solace.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: '3' - -services: - solbroker: - image: solace/solace-pubsub-standard:$PUBSUB_TAG - hostname: $PUBSUB_HOSTNAME - env_file: - - ./solace.env - ports: - - "2222:2222" - - "8080:8080" - - "55003:55003" - - "55443:55443" - - "55445:55445" - - "55555:55555" - - "55556:55556" - - "5672:5672" - - "5550:5550" - - "8008:8008" - shm_size: 2g - ulimits: - memlock: -1 - nofile: - soft: 2448 - hard: 42192 diff --git a/src/integrationTest/resources/log4j2.xml b/src/integrationTest/resources/log4j2.xml new file mode 100644 index 0000000..01eb664 --- /dev/null +++ b/src/integrationTest/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/integrationTest/resources/logback-test.xml b/src/integrationTest/resources/logback-test.xml deleted file mode 100644 index 985c68e..0000000 --- a/src/integrationTest/resources/logback-test.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger -%msg%n%rEx{full, org} - - - - - - - - - - \ No newline at end of file diff --git a/src/integrationTest/resources/manual-setup.properties b/src/integrationTest/resources/manual-setup.properties deleted file mode 100644 index 94b0e6c..0000000 --- a/src/integrationTest/resources/manual-setup.properties +++ /dev/null @@ -1,6 +0,0 @@ -#sol.host=mr1u6o37qn3lar.-cloud-clmessaging.solace.cloud -sol.username=test -sol.password=test -#sol.vpn_name=b-1 -#kafka.connect_rest_host=A:28083 -#kafka.bootstrap_servers=B:39092 \ No newline at end of file diff --git a/src/integrationTest/resources/solace.env b/src/integrationTest/resources/solace.env deleted file mode 100644 index 863a835..0000000 --- a/src/integrationTest/resources/solace.env +++ /dev/null @@ -1,4 +0,0 @@ -username_admin_globalaccesslevel=admin -username_admin_password=admin -system_scaling_maxconnectioncount=100 -logging_debug_output=all \ No newline at end of file diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolRecordProcessorIF.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolRecordProcessorIF.java index ef2df8f..212ff0c 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolRecordProcessorIF.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolRecordProcessorIF.java @@ -21,10 +21,26 @@ import com.solacesystems.jcsmp.BytesXMLMessage; +import org.apache.kafka.common.Configurable; import org.apache.kafka.connect.sink.SinkRecord; -public interface SolRecordProcessorIF { +import java.util.Map; +public interface SolRecordProcessorIF extends Configurable { + + /** + * {@inheritDoc} + */ + @Override + default void configure(Map configs) { } + + /** + * Converts a record consumed from Kafka into a Solace {@link BytesXMLMessage}. + * + * @param skey the Kafka record-key. + * @param record the Kafka record-value. + * @return a new {@link BytesXMLMessage}. + */ BytesXMLMessage processRecord(String skey, SinkRecord record); -} +} \ No newline at end of file diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionEventCallbackHandler.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionEventCallbackHandler.java index 6981134..8f1e469 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionEventCallbackHandler.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionEventCallbackHandler.java @@ -28,7 +28,7 @@ public class SolSessionEventCallbackHandler implements SessionEventHandler { - final Logger log = LoggerFactory.getLogger(SolSessionEventCallbackHandler.class); + private static final Logger log = LoggerFactory.getLogger(SolSessionEventCallbackHandler.class); @Override public void handleEvent(SessionEventArgs event) { diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionHandler.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionHandler.java index 00a5a1b..9ce427c 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionHandler.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolSessionHandler.java @@ -29,7 +29,9 @@ import com.solacesystems.jcsmp.transaction.TransactedSession; import java.util.Enumeration; +import java.util.Optional; +import org.apache.kafka.common.config.types.Password; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,13 +55,14 @@ public SolSessionHandler(SolaceSinkConnectorConfig lconfig) { */ public void configureSession() { // Required Properties - properties.setProperty(JCSMPProperties.USERNAME, + properties.setProperty(JCSMPProperties.USERNAME, lconfig.getString(SolaceSinkConstants.SOL_USERNAME)); - properties.setProperty(JCSMPProperties.PASSWORD, - lconfig.getString(SolaceSinkConstants.SOL_PASSWORD)); - properties.setProperty(JCSMPProperties.VPN_NAME, + properties.setProperty(JCSMPProperties.PASSWORD, + Optional.ofNullable(lconfig.getPassword(SolaceSinkConstants.SOL_PASSWORD)) + .map(Password::value).orElse(null)); + properties.setProperty(JCSMPProperties.VPN_NAME, lconfig.getString(SolaceSinkConstants.SOL_VPN_NAME)); - properties.setProperty(JCSMPProperties.HOST, + properties.setProperty(JCSMPProperties.HOST, lconfig.getString(SolaceSinkConstants.SOL_HOST)); // Channel Properties @@ -71,7 +74,7 @@ public void configureSession() { .getInt(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetries)); chanProperties.setReconnectRetries(lconfig .getInt(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_reconnectRetries)); - + chanProperties.setConnectRetriesPerHost( lconfig.getInt(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetriesPerHost)); chanProperties.setReconnectRetryWaitInMillis( @@ -111,7 +114,7 @@ public void configureSession() { lconfig.getBoolean(SolaceSinkConstants.SOL_IGNORE_DUPLICATE_SUBSCRIPTION_ERROR)); properties.setBooleanProperty(JCSMPProperties.IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR, lconfig.getBoolean(SolaceSinkConstants.SOL_IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR)); - properties.setBooleanProperty(JCSMPProperties.NO_LOCAL, + properties.setBooleanProperty(JCSMPProperties.NO_LOCAL, lconfig.getBoolean(SolaceSinkConstants.SOL_NO_LOCAL)); properties.setProperty(JCSMPProperties.AUTHENTICATION_SCHEME, lconfig.getString(SolaceSinkConstants.SOl_AUTHENTICATION_SCHEME)); @@ -135,47 +138,50 @@ public void configureSession() { lconfig.getBoolean(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE)); properties.setProperty(JCSMPProperties.SSL_VALIDATE_CERTIFICATE_DATE, lconfig.getBoolean(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE_DATE)); - properties.setProperty(JCSMPProperties.SSL_TRUST_STORE, + properties.setProperty(JCSMPProperties.SSL_TRUST_STORE, lconfig.getString(SolaceSinkConstants.SOL_SSL_TRUST_STORE)); properties.setProperty(JCSMPProperties.SSL_TRUST_STORE_PASSWORD, - lconfig.getString(SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD)); + Optional.ofNullable(lconfig.getPassword(SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD)) + .map(Password::value).orElse(null)); properties.setProperty(JCSMPProperties.SSL_TRUST_STORE_FORMAT, lconfig.getString(SolaceSinkConstants.SOL_SSL_TRUST_STORE_FORMAT)); properties.setProperty(JCSMPProperties.SSL_TRUSTED_COMMON_NAME_LIST, lconfig.getString(SolaceSinkConstants.SOL_SSL_TRUSTED_COMMON_NAME_LIST)); - properties.setProperty(JCSMPProperties.SSL_KEY_STORE, + properties.setProperty(JCSMPProperties.SSL_KEY_STORE, lconfig.getString(SolaceSinkConstants.SOL_SSL_KEY_STORE)); properties.setProperty(JCSMPProperties.SSL_KEY_STORE_PASSWORD, - lconfig.getString(SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD)); + Optional.ofNullable(lconfig.getPassword(SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD)) + .map(Password::value).orElse(null)); properties.setProperty(JCSMPProperties.SSL_KEY_STORE_FORMAT, lconfig.getString(SolaceSinkConstants.SOL_SSL_KEY_STORE_FORMAT)); properties.setProperty(JCSMPProperties.SSL_KEY_STORE_NORMALIZED_FORMAT, lconfig.getString(SolaceSinkConstants.SOL_SSL_KEY_STORE_NORMALIZED_FORMAT)); properties.setProperty(JCSMPProperties.SSL_PRIVATE_KEY_PASSWORD, - lconfig.getString(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD)); + Optional.ofNullable(lconfig.getPassword(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD)) + .map(Password::value).orElse(null)); } /** * Create and connect JCSMPSession * @return - * @throws JCSMPException + * @throws JCSMPException */ public void connectSession() throws JCSMPException { System.setProperty("java.security.auth.login.config", lconfig.getString(SolaceSinkConstants.SOL_KERBEROS_LOGIN_CONFIG)); System.setProperty("java.security.krb5.conf", lconfig.getString(SolaceSinkConstants.SOL_KERBEROS_KRB5_CONFIG)); - - session = JCSMPFactory.onlyInstance().createSession(properties, + + session = JCSMPFactory.onlyInstance().createSession(properties, null, new SolSessionEventCallbackHandler()); session.connect(); } /** - * Create transacted session + * Create transacted session * @return TransactedSession - * @throws JCSMPException + * @throws JCSMPException */ public void createTxSession() throws JCSMPException { txSession = session.createTransactedSession(); @@ -200,12 +206,14 @@ public void printStats() { log.info("\n"); } } - + /** * Shutdown Session. */ public void shutdown() { - session.closeSession(); + if (session != null) { + session.closeSession(); + } } } diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfig.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfig.java index a3a4a5e..f6db799 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfig.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfig.java @@ -31,7 +31,7 @@ public class SolaceSinkConnectorConfig extends AbstractConfig { private static final Logger log = LoggerFactory.getLogger(SolaceSinkConnectorConfig.class); - + /** * Create Solace Configuration Properties from JSON or Properties file. * @param properties returns Properties @@ -48,21 +48,24 @@ public SolaceSinkConnectorConfig(Map properties) { */ public static ConfigDef solaceConfigDef() { return new ConfigDef() - .define(SolaceSinkConstants.SOL_USERNAME, + .define(SolaceSinkConstants.SOL_USERNAME, Type.STRING, "default", Importance.HIGH, "Solace username") - .define(SolaceSinkConstants.SOL_PASSWORD, - Type.STRING, "default", Importance.HIGH, "Solace user password") + .define(SolaceSinkConstants.SOL_PASSWORD, + Type.PASSWORD, "default", Importance.HIGH, "Solace user password") .define(SolaceSinkConstants.SOL_HOST, Type.STRING, null, Importance.HIGH, "host to connect with, can be comma delimited for HA/DR") .define(SolaceSinkConstants.SOL_VPN_NAME, Type.STRING, "default", Importance.HIGH, "Solace VPN to connect with ") .define(SolaceSinkConstants.SOL_TOPICS, Type.STRING, null, Importance.MEDIUM, "Solace topic or list of topics to subscribe from") - .define(SolaceSinkConstants.SOl_QUEUE, + .define(SolaceSinkConstants.SOl_QUEUE, Type.STRING, null, Importance.MEDIUM, "Solace queue to consume from") - .define(SolaceSinkConstants.SOL_RECORD_PROCESSOR, + .define(SolaceSinkConstants.SOL_RECORD_PROCESSOR, Type.CLASS, SolRecordProcessorIF.class, Importance.HIGH, "default Solace message processor to use against Kafka Sink Records") + .define(SolaceSinkConstants.SOL_RECORD_PROCESSOR_IGNORE_ERROR, + Type.BOOLEAN, false, Importance.MEDIUM, + "If enabled, records that throw record processor errors will be discarded") .define(SolaceSinkConstants.SOL_LOCALHOST, Type.STRING, null, Importance.LOW, "The hostname or IP address of the machine on which the application " + "is running. On a multihomed machine, it is strongly recommended " @@ -75,104 +78,104 @@ public static ConfigDef solaceConfigDef() { .define(SolaceSinkConstants.SOL_REAPPLY_SUBSCRIPTIONS, Type.BOOLEAN, true, Importance.LOW, "If enabled, the API maintains a local cache of subscriptions " + "and reapplies them when the subscriber connection is reestablished") - .define(SolaceSinkConstants.SOL_GENERATE_SEND_TIMESTAMPS, + .define(SolaceSinkConstants.SOL_GENERATE_SEND_TIMESTAMPS, Type.BOOLEAN, false, Importance.LOW, "indicates whether to generate a send timestamp in outgoing messages") - .define(SolaceSinkConstants.SOL_GENERATE_RCV_TIMESTAMPS, + .define(SolaceSinkConstants.SOL_GENERATE_RCV_TIMESTAMPS, Type.BOOLEAN, false, Importance.LOW, "Indicates whether to generate a receive timestamp on incoming messages") - .define(SolaceSinkConstants.SOL_GENERATE_SEQUENCE_NUMBERS, + .define(SolaceSinkConstants.SOL_GENERATE_SEQUENCE_NUMBERS, Type.BOOLEAN, false, Importance.LOW, "Indicates whether to generate a sequence number in outgoing messages") - .define(SolaceSinkConstants.SOL_CALCULATE_MESSAGE_EXPIRATION, + .define(SolaceSinkConstants.SOL_CALCULATE_MESSAGE_EXPIRATION, Type.BOOLEAN, false, Importance.LOW, "Indicates whether to calculate message expiration time in " + "outgoing messages and incoming messages") .define(SolaceSinkConstants.SOL_PUB_MULTI_THREAD, Type.BOOLEAN, true, Importance.LOW, "If enabled (default), the XMLMessageProducer is safe to access from multiple threads") - .define(SolaceSinkConstants.SOL_PUB_USE_INTERMEDIATE_DIRECT_BUF, + .define(SolaceSinkConstants.SOL_PUB_USE_INTERMEDIATE_DIRECT_BUF, Type.BOOLEAN, true, Importance.LOW, "If enabled, during send operations, the XMLMessageProducer " + "concatenates all published data. This can result in higher throughput " + "for certain send operations. It can, however, lead to performance " + "degradation for some scenarios with large messages") - .define(SolaceSinkConstants.SOL_MESSAGE_CALLBACK_ON_REACTOR, + .define(SolaceSinkConstants.SOL_MESSAGE_CALLBACK_ON_REACTOR, Type.BOOLEAN, false, Importance.LOW, "If enabled, messages delivered asynchronously to an XMLMessageListener are " + "delivered directly from the I/O thread instead of a consumer notification thread. " + "An application making use of this setting MUST return quickly from the " + "onReceive() callback, and MUST NOT call ANY session methods from the I/O thread") - .define(SolaceSinkConstants.SOL_IGNORE_DUPLICATE_SUBSCRIPTION_ERROR, + .define(SolaceSinkConstants.SOL_IGNORE_DUPLICATE_SUBSCRIPTION_ERROR, Type.BOOLEAN, false, Importance.LOW, "ignore errors caused by subscriptions being already presents") - .define(SolaceSinkConstants.SOL_IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR, + .define(SolaceSinkConstants.SOL_IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR, Type.BOOLEAN, false, Importance.LOW, "When removing subscriptions ignore errors caused by subscriptions not being found.") .define(SolaceSinkConstants.SOL_NO_LOCAL, Type.BOOLEAN, false, Importance.LOW, "If this property is true, messages published on the session will not be " + "received on the same session even if the client has a subscription " + "that matches the published topic.") - .define(SolaceSinkConstants.SOL_SUB_ACK_WINDOW_SIZE, + .define(SolaceSinkConstants.SOL_SUB_ACK_WINDOW_SIZE, Type.INT, 255, Importance.LOW, "The size of the sliding subscriber ACK window. The valid range is 1-255") - .define(SolaceSinkConstants.SOL_QUEUE_MESSAGES_AUTOFLUSH_SIZE, + .define(SolaceSinkConstants.SOL_QUEUE_MESSAGES_AUTOFLUSH_SIZE, Type.INT, 200, Importance.LOW, "Number of outstanding transacted messages before autoflush. Must be lower than " + "max PubSub+ transaction size (255). The valid range is 1-200") - .define(SolaceSinkConstants.SOl_AUTHENTICATION_SCHEME, + .define(SolaceSinkConstants.SOl_AUTHENTICATION_SCHEME, Type.STRING, "AUTHENTICATION_SCHEME_BASIC", Importance.MEDIUM, "String property specifying the authentication scheme.") - .define(SolaceSinkConstants.SOL_KRB_SERVICE_NAME, + .define(SolaceSinkConstants.SOL_KRB_SERVICE_NAME, Type.STRING, "solace", Importance.MEDIUM, "This property is used to specify the ServiceName portion of the " + "Service Principal Name (SPN) that has a format of ServiceName/ApplianceName@REALM.") - .define(SolaceSinkConstants.SOL_SSL_CONNECTION_DOWNGRADE_TO, + .define(SolaceSinkConstants.SOL_SSL_CONNECTION_DOWNGRADE_TO, Type.STRING, "", Importance.MEDIUM, "Session property specifying a transport protocol that SSL session " + "connection will be downgraded to after client authentication. " + "Allowed values: TRANSPORT_PROTOCOL_PLAIN_TEXT.") - .define(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, + .define(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE, Type.BOOLEAN, true, Importance.LOW, "Specifies if writing messages to queue destination shall use transactions.") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectTimeoutInMillis, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectTimeoutInMillis, Type.INT, 30000, Importance.MEDIUM, "Timeout value (in ms) for creating an initial connection to Solace") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_readTimeoutInMillis, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_readTimeoutInMillis, Type.INT, 10000, Importance.MEDIUM, "Timeout value (in ms) for reading a reply from Solace") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetries, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetries, Type.INT, 0, Importance.MEDIUM, "The number of times to attempt and retry a connection to the host appliance " + "(or list of appliances) during initial connection setup") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_reconnectRetries, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_reconnectRetries, Type.INT, 0, Importance.MEDIUM, "The number of times to attempt to reconnect to the appliance " + "(or list of appliances) after an initial connected session goes down") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetriesPerHost, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_connectRetriesPerHost, Type.INT, 0, Importance.MEDIUM, "This property defines how many times to try to connect or reconnect to a " + "single host before moving to the next host in the list") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_reconnectRetryWaitInMillis, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_reconnectRetryWaitInMillis, Type.INT, 3000, Importance.MEDIUM, "How much time in (MS) to wait between each attempt to connect or reconnect to a host") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_keepAliveIntervalInMillis, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_keepAliveIntervalInMillis, Type.INT, 3000, Importance.MEDIUM, "The amount of time (in ms) to wait between sending out keep-alive messages") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_keepAliveLimit, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_keepAliveLimit, Type.INT, 10, Importance.MEDIUM, "The maximum number of consecutive keep-alive messages that can be sent " + "without receiving a response before the connection is closed by the API") .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_sendBuffer, Type.INT, 65536, Importance.MEDIUM, "The size (in bytes) of the send socket buffer.") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_receiveBuffer, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_receiveBuffer, Type.INT, 65536, Importance.MEDIUM, "The size (in bytes) of the receive socket buffer.") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_tcpNoDelay, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_tcpNoDelay, Type.BOOLEAN, true, Importance.LOW, "Whether to set the TCP_NODELAY option. When enabled, this " + "option disables the Nagle's algorithm.") - .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_compressionLevel, + .define(SolaceSinkConstants.SOL_CHANNEL_PROPERTY_compressionLevel, Type.INT, 0, Importance.MEDIUM, "A compressionLevel setting of 1-9 sets the ZLIB compression level to use; " + "a setting of 0 disables compression entirely.") @@ -194,47 +197,47 @@ public static ConfigDef solaceConfigDef() { .define(SolaceSinkConstants.SOL_SSL_CIPHER_SUITES, Type.STRING, "", Importance.LOW, "This property is used to specify a comma separated list of " + "cipher suites in order of preference used for SSL connections. ") - .define(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE, + .define(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE, Type.BOOLEAN, true, Importance.LOW, "This property is used to specify whether the API should validate server certificates ") - .define(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE_DATE, + .define(SolaceSinkConstants.SOL_SSL_VALIDATE_CERTIFICATE_DATE, Type.BOOLEAN, true, Importance.LOW, "This property is used to specify whether the API should validate " + "server certificate's expiry") - .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE, + .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE, Type.STRING, "/lib/security/jssecacerts", Importance.LOW, "This property is used to specify the truststore file to use in URL or path format.") - .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD, Type.STRING, "", Importance.LOW, + .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD, Type.PASSWORD, "", Importance.LOW, "This property is used to specify the password of the truststore " + "given in SSL_TRUST_STORE") - .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE_FORMAT, + .define(SolaceSinkConstants.SOL_SSL_TRUST_STORE_FORMAT, Type.STRING, "JKS", Importance.LOW, "This property is used to specify the format of the truststore " + "given in SSL_TRUST_STORE.") - .define(SolaceSinkConstants.SOL_SSL_TRUSTED_COMMON_NAME_LIST, + .define(SolaceSinkConstants.SOL_SSL_TRUSTED_COMMON_NAME_LIST, Type.STRING, "", Importance.LOW, "This property is used to specify a comma separated list of acceptable " + "common names for matching with server certificates.") .define(SolaceSinkConstants.SOL_SSL_KEY_STORE, Type.STRING, "", Importance.LOW, "This property is used to specify the keystore file to use in URL or path format.") - .define(SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD, Type.STRING, "", Importance.LOW, + .define(SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD, Type.PASSWORD, "", Importance.LOW, "This property is used to specify the password of the " + "keystore specified by SSL_KEY_STORE.") .define(SolaceSinkConstants.SOL_SSL_KEY_STORE_FORMAT, Type.STRING, "JKS", Importance.LOW, "This property is used to specify the format of the keystore given in SSL_KEY_STORE.") - .define(SolaceSinkConstants.SOL_SSL_KEY_STORE_NORMALIZED_FORMAT, + .define(SolaceSinkConstants.SOL_SSL_KEY_STORE_NORMALIZED_FORMAT, Type.STRING, "JKS", Importance.LOW, "This property is used to specify the format of an internal normalized " + "representation of the keystore if it needs to be different from the default format.") - .define(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_ALIAS, + .define(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_ALIAS, Type.STRING, "", Importance.LOW, "This property is used to specify the alias of the private key to use " + "for client certificate authentication.") - .define(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD, - Type.STRING, "", Importance.LOW, + .define(SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD, + Type.PASSWORD, "", Importance.LOW, "This property is used to specify the password that deciphers " + "the private key from the key store.") - .define(SolaceSinkConstants.SOL_ACK_EVENT_MODE, + .define(SolaceSinkConstants.SOL_ACK_EVENT_MODE, Type.STRING, "SUPPORTED_ACK_EVENT_MODE_WINDOWED", Importance.MEDIUM, "API sends out message acknowledgement event for Guaranteed Messages," @@ -254,11 +257,21 @@ public static ConfigDef solaceConfigDef() { .define(SolaceSinkConstants.SOL_KERBEROS_LOGIN_CONFIG, Type.STRING, "", Importance.LOW, "Location of the Kerberos Login Configuration File") - + .define( + SolaceSinkConstants.SOL_EMIT_KAFKA_RECORD_HEADERS_ENABLED, + Type.BOOLEAN, + false, + Importance.LOW, + "Should Kafka headers be automatically copied to Solace messages as user properties." + ) ; } + public boolean isEmitKafkaRecordHeadersEnabled() { + return getBoolean(SolaceSinkConstants.SOL_EMIT_KAFKA_RECORD_HEADERS_ENABLED); + } + static ConfigDef config = solaceConfigDef(); } diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConstants.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConstants.java index 3050bb0..ad3fa5b 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConstants.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkConstants.java @@ -30,6 +30,9 @@ public class SolaceSinkConstants { // High Importance Solace Message processor of Kafka Sink Records public static final String SOL_RECORD_PROCESSOR = "sol.record_processor_class"; + // Medium Importance Solace Message processor of Kafka Sink Records + public static final String SOL_RECORD_PROCESSOR_IGNORE_ERROR = "sol.record_processor.error.ignore"; + // High Importance Solace public static final String SOL_HOST = "sol.host"; public static final String SOL_USERNAME = "sol.username"; @@ -50,12 +53,12 @@ public class SolaceSinkConstants { public static final String SOL_CALCULATE_MESSAGE_EXPIRATION = "sol.calculate_message_expiration"; public static final String SOL_REAPPLY_SUBSCRIPTIONS = "sol.reapply_subscriptions"; public static final String SOL_PUB_MULTI_THREAD = "sol.pub_multi_thread"; - public static final String SOL_PUB_USE_INTERMEDIATE_DIRECT_BUF + public static final String SOL_PUB_USE_INTERMEDIATE_DIRECT_BUF = "sol.pub_use_immediate_direct_pub"; public static final String SOL_MESSAGE_CALLBACK_ON_REACTOR = "sol.message_callback_on_reactor"; - public static final String SOL_IGNORE_DUPLICATE_SUBSCRIPTION_ERROR + public static final String SOL_IGNORE_DUPLICATE_SUBSCRIPTION_ERROR = "sol.ignore_duplicate_subscription_error"; - public static final String SOL_IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR + public static final String SOL_IGNORE_SUBSCRIPTION_NOT_FOUND_ERROR = "sol.ignore_subscription_not_found_error"; public static final String SOL_NO_LOCAL = "sol.no_local"; public static final String SOL_ACK_EVENT_MODE = "sol.ack_event_mode"; @@ -77,35 +80,35 @@ public class SolaceSinkConstants { public static final String SOL_SSL_KEY_STORE = "sol.ssl_key_store"; public static final String SOL_SSL_KEY_STORE_PASSWORD = "sol.ssl_key_store_password"; public static final String SOL_SSL_KEY_STORE_FORMAT = "sol.ssl_key_store_format"; - public static final String SOL_SSL_KEY_STORE_NORMALIZED_FORMAT + public static final String SOL_SSL_KEY_STORE_NORMALIZED_FORMAT = "sol.ssl_key_store_normalized_format"; public static final String SOL_SSL_PRIVATE_KEY_ALIAS = "sol.ssl_private_key_alias"; public static final String SOL_SSL_PRIVATE_KEY_PASSWORD = "sol.ssl_private_key_password"; // Low Importance Solace Channel Properties - public static final String SOL_CHANNEL_PROPERTY_connectTimeoutInMillis + public static final String SOL_CHANNEL_PROPERTY_connectTimeoutInMillis = "sol.channel_properties.connect_timout_in_millis"; - public static final String SOL_CHANNEL_PROPERTY_readTimeoutInMillis + public static final String SOL_CHANNEL_PROPERTY_readTimeoutInMillis = "sol.channel_properties.read_timeout_in_millis"; - public static final String SOL_CHANNEL_PROPERTY_connectRetries + public static final String SOL_CHANNEL_PROPERTY_connectRetries = "sol.channel_properties.connect_retries"; - public static final String SOL_CHANNEL_PROPERTY_reconnectRetries + public static final String SOL_CHANNEL_PROPERTY_reconnectRetries = "sol.channel_properties.reconnect_retries"; - public static final String SOL_CHANNEL_PROPERTY_connectRetriesPerHost + public static final String SOL_CHANNEL_PROPERTY_connectRetriesPerHost = "sol.channnel_properties.connect_retries_per_host"; - public static final String SOL_CHANNEL_PROPERTY_reconnectRetryWaitInMillis + public static final String SOL_CHANNEL_PROPERTY_reconnectRetryWaitInMillis = "sol.channel_properties.reconnect_retry_wait_in_millis"; - public static final String SOL_CHANNEL_PROPERTY_keepAliveIntervalInMillis + public static final String SOL_CHANNEL_PROPERTY_keepAliveIntervalInMillis = "sol.channel_properties.keep_alive_interval_in_millis"; - public static final String SOL_CHANNEL_PROPERTY_keepAliveLimit + public static final String SOL_CHANNEL_PROPERTY_keepAliveLimit = "sol.channel_properties.keep_alive_limit"; - public static final String SOL_CHANNEL_PROPERTY_sendBuffer + public static final String SOL_CHANNEL_PROPERTY_sendBuffer = "sol.channel_properties.send_buffer"; - public static final String SOL_CHANNEL_PROPERTY_receiveBuffer + public static final String SOL_CHANNEL_PROPERTY_receiveBuffer = "sol.channle_properties.receive_buffer"; - public static final String SOL_CHANNEL_PROPERTY_tcpNoDelay + public static final String SOL_CHANNEL_PROPERTY_tcpNoDelay = "sol.channel_properties.tcp_no_delay"; - public static final String SOL_CHANNEL_PROPERTY_compressionLevel + public static final String SOL_CHANNEL_PROPERTY_compressionLevel = "sol.channel_properties.compression_level"; // Low Importance Persistent Message Properties @@ -132,7 +135,10 @@ public class SolaceSinkConstants { // Allowable values for the test sample include: NONE, DESTINATION, // CORRELATION_ID, CORRELATION_ID_AS_BYTES public static final String SOL_KAFKA_MESSAGE_KEY = "sol.kafka_message_key"; - + + // Low important Kafka headers + public static final String SOL_EMIT_KAFKA_RECORD_HEADERS_ENABLED = "sol.emit.kafka.record.headers.enabled"; + // Low importance, offset for replay - if null, continue from last offset when was last stopped // value of 0 is start from beginning public static final String SOL_KAFKA_REPLAY_OFFSET = "sol.kafka_replay_offset"; @@ -140,10 +146,10 @@ public class SolaceSinkConstants { // Allow SolRecordProcessor to control the creation of destinations rather than SolaceSinkSender // Requires a destination property in the user SDTMap with a key "dynamicDestination" public static final String SOL_DYNAMIC_DESTINATION = "sol.dynamic_destination"; - + //Low importance Kerberos details public static final String SOL_KERBEROS_LOGIN_CONFIG = "sol.kerberos.login.conf"; - public static final String SOL_KERBEROS_KRB5_CONFIG = "sol.kerberos.krb5.conf"; - + public static final String SOL_KERBEROS_KRB5_CONFIG = "sol.kerberos.krb5.conf"; + } diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkSender.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkSender.java index af103f1..85b789b 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkSender.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkSender.java @@ -30,61 +30,56 @@ import com.solacesystems.jcsmp.SDTMap; import com.solacesystems.jcsmp.Topic; import com.solacesystems.jcsmp.XMLMessageProducer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; import org.apache.kafka.connect.sink.SinkRecord; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; + public class SolaceSinkSender { private static final Logger log = LoggerFactory.getLogger(SolaceSinkSender.class); - private SolaceSinkConnectorConfig sconfig; - private XMLMessageProducer topicProducer; + private final SolaceSinkConnectorConfig sconfig; + private final XMLMessageProducer topicProducer; private XMLMessageProducer queueProducer; - private SolSessionHandler sessionHandler; - private BytesXMLMessage message; - private List topics = new ArrayList(); + private final SolSessionHandler sessionHandler; + private final List topics = new ArrayList<>(); private Queue solQueue = null; private boolean useTxforQueue = false; - private Class cprocessor; - private SolRecordProcessorIF processor; - private String kafkaKey; - private AtomicInteger txMsgCounter = new AtomicInteger(); - private SolaceSinkTask sinkTask; - private Map offsets - = new HashMap(); - + private final SolRecordProcessorIF processor; + private final String kafkaKey; + private final AtomicInteger txMsgCounter = new AtomicInteger(); + private final SolaceSinkTask sinkTask; + private final Map offsets = new HashMap<>(); + /** * Class that sends Solace Messages from Kafka Records. * @param sconfig JCSMP Configuration * @param sessionHandler SolSessionHandler * @param useTxforQueue * @param sinkTask Connector Sink Task - * @throws JCSMPException + * @throws JCSMPException */ - public SolaceSinkSender(SolaceSinkConnectorConfig sconfig, SolSessionHandler sessionHandler, - boolean useTxforQueue, SolaceSinkTask sinkTask) throws JCSMPException { + public SolaceSinkSender(final SolaceSinkConnectorConfig sconfig, + final SolSessionHandler sessionHandler, + final boolean useTxforQueue, + final SolaceSinkTask sinkTask) throws JCSMPException { this.sconfig = sconfig; this.sessionHandler = sessionHandler; this.useTxforQueue = useTxforQueue; this.sinkTask = sinkTask; - kafkaKey = sconfig.getString(SolaceSinkConstants.SOL_KAFKA_MESSAGE_KEY); - topicProducer = sessionHandler.getSession().getMessageProducer(new SolStreamingMessageCallbackHandler()); - cprocessor = (this.sconfig.getClass(SolaceSinkConstants.SOL_RECORD_PROCESSOR)); - try { - processor = (SolRecordProcessorIF) cprocessor.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - log.info("================ Received exception while creating record processing class {}, " - + "with the following: {} ", - e.getCause(), e.getStackTrace()); - } + this.kafkaKey = sconfig.getString(SolaceSinkConstants.SOL_KAFKA_MESSAGE_KEY); + this.topicProducer = sessionHandler.getSession().getMessageProducer(new SolStreamingMessageCallbackHandler()); + this.processor = sconfig.getConfiguredInstance(SolaceSinkConstants.SOL_RECORD_PROCESSOR, SolRecordProcessorIF.class); } /** @@ -99,7 +94,7 @@ public void setupDestinationTopics() { counter++; } } - + /** * Generate PubSub queue */ @@ -125,6 +120,7 @@ public void setupDestinationQueue() throws JCSMPException { * @param record Kafka Records */ public void sendRecord(SinkRecord record) { + BytesXMLMessage message; try { message = processor.processRecord(kafkaKey, record); offsets.put(new TopicPartition(record.topic(), record.kafkaPartition()), @@ -133,11 +129,13 @@ public void sendRecord(SinkRecord record) { + "Offset: {}", record.topic(), record.kafkaPartition(), record.kafkaOffset()); } catch (Exception e) { - log.info( - "================ Encountered exception in record processing....discarded." - + " Cause: {}, Stacktrace: {} ", - e.getCause(), e.getStackTrace()); - return; + if (sconfig.getBoolean(SolaceSinkConstants.SOL_RECORD_PROCESSOR_IGNORE_ERROR)) { + log.warn("================ Encountered exception in record processing for record of topic {}, partition {} " + + "and offset {}....discarded", record.topic(), record.kafkaPartition(), record.kafkaOffset(), e); + return; + } else { + throw new ConnectException("Encountered exception in record processing", e); + } } if (message.getAttachmentContentLength() == 0 || message.getAttachmentByteBuffer() == null) { @@ -145,6 +143,8 @@ public void sendRecord(SinkRecord record) { return; } + mayEnrichUserPropertiesWithKafkaRecordHeaders(record, message); + if (sconfig.getBoolean(SolaceSinkConstants.SOL_DYNAMIC_DESTINATION)) { // Process use Dynamic destination from SolRecordProcessor SDTMap userMap = message.getProperties(); @@ -152,17 +152,21 @@ public void sendRecord(SinkRecord record) { try { dest = userMap.getDestination("dynamicDestination"); } catch (SDTException e) { - log.info("================ Received exception retrieving Dynamic Destination: " - + "{}, with the following: {} ", - e.getCause(), e.getStackTrace()); + if (sconfig.getBoolean(SolaceSinkConstants.SOL_RECORD_PROCESSOR_IGNORE_ERROR)) { + log.warn("================ Received exception retrieving Dynamic Destination....discarded", e); + return; + } else { + throw new ConnectException("Received exception retrieving Dynamic Destination", e); + } } try { topicProducer.send(message, dest); + } catch (IllegalArgumentException e) { + throw new ConnectException(String.format("Received exception while sending message to topic %s", + dest != null ? dest.getName() : null), e); } catch (JCSMPException e) { - log.info( - "================ Received exception while sending message to topic {}: " - + "{}, with the following: {} ", - dest.getName(), e.getCause(), e.getStackTrace()); + throw new RetriableException(String.format("Received exception while sending message to topic %s", + dest != null ? dest.getName() : null), e); } } else { // Process when Dynamic destination is not set @@ -174,10 +178,12 @@ public void sendRecord(SinkRecord record) { txMsgCounter.getAndIncrement(); log.trace("================ Count of TX message is now: {}", txMsgCounter.get()); } + } catch (IllegalArgumentException e) { + throw new ConnectException(String.format("Received exception while sending message to queue %s", + solQueue.getName()), e); } catch (JCSMPException e) { - log.info("================ Received exception while sending message to queue {}: " - + "{}, with the following: {} ", - solQueue.getName(), e.getCause(), e.getStackTrace()); + throw new RetriableException(String.format("Received exception while sending message to queue %s", + solQueue.getName()), e); } } if (topics.size() != 0 && message.getDestination() == null) { @@ -186,46 +192,64 @@ public void sendRecord(SinkRecord record) { while (topics.size() > count) { try { topicProducer.send(message, topics.get(count)); + } catch (IllegalArgumentException e) { + throw new ConnectException(String.format("Received exception while sending message to topic %s", + topics.get(count).getName()), e); } catch (JCSMPException e) { - log.trace( - "================ Received exception while sending message to topic {}: " - + "{}, with the following: {} ", - topics.get(count).getName(), e.getCause(), e.getStackTrace()); + throw new RetriableException(String.format("Received exception while sending message to topic %s", + topics.get(count).getName()), e); } count++; } } } - + // Solace limits transaction size to 255 messages so need to force commit if ( useTxforQueue && txMsgCounter.get() > sconfig.getInt(SolaceSinkConstants.SOL_QUEUE_MESSAGES_AUTOFLUSH_SIZE)-1 ) { log.debug("================ Queue transaction autoflush size reached, flushing offsets from connector"); - sinkTask.flush(offsets); + try { + sinkTask.flush(offsets); + } catch (ConnectException e) { + if (e.getCause() instanceof JCSMPException) { + throw new RetriableException(e.getMessage(), e.getCause()); + } else { + throw e; + } + } } } - + + /** + * Visible for testing. + */ + void mayEnrichUserPropertiesWithKafkaRecordHeaders(final SinkRecord record, + final BytesXMLMessage message) { + if (sconfig.isEmitKafkaRecordHeadersEnabled() && !record.headers().isEmpty()) { + final SDTMap userMap = Optional + .ofNullable(message.getProperties()) + .orElseGet(JCSMPFactory.onlyInstance()::createMap); + record.headers().forEach(header -> { + try { + userMap.putObject(header.key(), header.value()); + } catch (SDTException e) { + // Re-throw the exception because there is nothing else to do - usually that exception should not happen. + throw new RuntimeException("Failed to add object message property from kafka record-header", e); + } + }); + message.setProperties(userMap); + } + } + /** * Commit Solace and Kafka records. - * @return Boolean Status */ - public synchronized boolean commit() { - boolean commited = true; - try { - if (useTxforQueue) { - sessionHandler.getTxSession().commit(); - commited = true; - txMsgCounter.set(0); - log.debug("Comitted Solace records for transaction with status: {}", - sessionHandler.getTxSession().getStatus().name()); - } - } catch (JCSMPException e) { - log.info("Received Solace TX exception {}, with the following: {} ", - e.getCause(), e.getStackTrace()); - log.info("The TX error could be due to using dynamic destinations and " - + " \"sol.dynamic_destination=true\" was not set in the configuration "); - commited = false; + public synchronized void commit() throws JCSMPException { + if (useTxforQueue) { + sessionHandler.getTxSession().commit(); + txMsgCounter.set(0); + log.debug("Comitted Solace records for transaction with status: {}", + sessionHandler.getTxSession().getStatus().name()); } - return commited; } /** diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkTask.java b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkTask.java index eff2b5f..5d56790 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkTask.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/SolaceSinkTask.java @@ -20,19 +20,20 @@ package com.solace.connector.kafka.connect.sink; import com.solacesystems.jcsmp.JCSMPException; -import java.util.Collection; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; - import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.sink.SinkTask; import org.apache.kafka.connect.sink.SinkTaskContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + public class SolaceSinkTask extends SinkTask { private static final Logger log = LoggerFactory.getLogger(SolaceSinkTask.class); private SolSessionHandler solSessionHandler; @@ -49,16 +50,16 @@ public String version() { @Override public void start(Map props) { - connectorConfig = new SolaceSinkConnectorConfig(props); + connectorConfig = new SolaceSinkConnectorConfig(props); solSessionHandler = new SolSessionHandler(connectorConfig); try { solSessionHandler.configureSession(); solSessionHandler.connectSession(); } catch (JCSMPException e) { - failStart(e, "================ Failed to create JCSMPSession"); + throw new ConnectException("Failed to create JCSMPSession", e); } log.info("================ JCSMPSession Connected"); - + if (connectorConfig.getString(SolaceSinkConstants.SOl_QUEUE) != null) { // Use transactions for queue destination useTxforQueue = connectorConfig.getBoolean(SolaceSinkConstants.SOl_USE_TRANSACTIONS_FOR_QUEUE); @@ -67,12 +68,12 @@ public void start(Map props) { solSessionHandler.createTxSession(); log.info("================ Transacted Session has been Created for PubSub+ queue destination"); } catch (JCSMPException e) { - failStart(e, "================ Failed to create Transacted Session for PubSub+ queue destination, " - + "make sure transacted sessions are enabled"); + throw new ConnectException("Failed to create Transacted Session for PubSub+ queue destination, " + + "make sure transacted sessions are enabled", e); } } } - + try { solSender = new SolaceSinkSender(connectorConfig, solSessionHandler, useTxforQueue, this); if (connectorConfig.getString(SolaceSinkConstants.SOL_TOPICS) != null) { @@ -81,22 +82,15 @@ public void start(Map props) { if (connectorConfig.getString(SolaceSinkConstants.SOl_QUEUE) != null) { solSender.setupDestinationQueue(); } - } catch (JCSMPException e) { - failStart(e, "Failed to setup sender to PubSub+"); + } catch (Exception e) { + throw new ConnectException("Failed to setup sender to PubSub+", e); } } - - private void failStart(JCSMPException e, String logMessage) { - log.info("Received Solace exception {}, with the " - + "following: {} ", e.getCause(), e.getStackTrace()); - log.info( "message"); - stop(); // Connector cannot continue - } @Override public void put(Collection records) { for (SinkRecord r : records) { - log.trace("Putting record to topic {}, partition {} and offset {}", r.topic(), + log.trace("Putting record to topic {}, partition {} and offset {}", r.topic(), r.kafkaPartition(), r.kafkaOffset()); solSender.sendRecord(r); @@ -124,21 +118,23 @@ public synchronized void flush(Map currentOff for (Map.Entry entry : currentOffsets.entrySet()) { TopicPartition tp = entry.getKey(); OffsetAndMetadata om = entry.getValue(); - log.debug("Flushing up to topic {}, partition {} and offset {}", tp.topic(), + log.debug("Flushing up to topic {}, partition {} and offset {}", tp.topic(), tp.partition(), om.offset()); } if (useTxforQueue) { - boolean commited = solSender.commit(); - if (!commited) { - log.info("Error in commiting transaction, shutting down"); - stop(); + try { + solSender.commit(); + } catch (JCSMPException e) { + // Consider using RetriableException if the Kafka Connect API one day decides to support it for flush/commit + throw new ConnectException("Error in committing transaction. The TX error could be due to using dynamic " + + "destinations and \"sol.dynamic_destination=true\" was not set in the configuration.", e); } } } /** * Create reference for SinkTaskContext - required for replay. - * + * * @param context SinkTaskContext */ public void initialize(SinkTaskContext context) { @@ -147,8 +143,8 @@ public void initialize(SinkTaskContext context) { /** * Opens access to partition write, this populates SinkTask Context - * which allows setting of offset from which to start reading. - * + * which allows setting of offset from which to start reading. + * * @param partitions List of TopicPartitions for Topic */ public void open(Collection partitions) { @@ -159,7 +155,7 @@ public void open(Collection partitions) { Iterator partsIt = parts.iterator(); while (partsIt.hasNext()) { TopicPartition tp = partsIt.next(); - context.offset(tp, offsetLong); + context.offset(tp, offsetLong); } } } diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleKeyedRecordProcessor.java b/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleKeyedRecordProcessor.java index 5be0781..eab2003 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleKeyedRecordProcessor.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleKeyedRecordProcessor.java @@ -63,12 +63,12 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { userHeader.putInteger("k_partition", record.kafkaPartition()); userHeader.putLong("k_offset", record.kafkaOffset()); } catch (SDTException e) { - log.info("Received Solace SDTException {}, with the following: {} ", + log.info("Received Solace SDTException {}, with the following: {} ", e.getCause(), e.getStackTrace()); } msg.setProperties(userHeader); msg.setApplicationMessageType("ResendOfKafkaTopic: " + kafkaTopic); - + Object recordKey = record.key(); Schema keySchema = record.keySchema(); @@ -101,7 +101,7 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { } } else if (keyheader == KeyHeader.DESTINATION && keySchema.type() == Schema.Type.STRING) { // Destination is already determined by sink settings so set just the correlationId. - // Receiving app can evaluate it + // Receiving app can evaluate it msg.setCorrelationId((String) recordKey); } else { // Do nothing in all other cases @@ -118,7 +118,7 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { } else if (recordValue instanceof ByteBuffer) { msg.writeAttachment((byte[]) ((ByteBuffer) recordValue).array()); } else if (recordValue instanceof String) { - msg.writeAttachment(((String) recordValue).getBytes()); + msg.writeAttachment(((String) recordValue).getBytes(StandardCharsets.UTF_8)); } else { // Unknown recordValue type msg.reset(); @@ -130,9 +130,9 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { msg.writeAttachment((byte[]) ((ByteBuffer) recordValue).array()); } } else if (valueSchema.type() == Schema.Type.STRING) { - msg.writeAttachment(((String) recordValue).getBytes()); + msg.writeAttachment(((String) recordValue).getBytes(StandardCharsets.UTF_8)); } else { - // Do nothing in all other cases + // Do nothing in all other cases msg.reset(); } } else { diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleRecordProcessor.java b/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleRecordProcessor.java index 28910fc..b285094 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleRecordProcessor.java +++ b/src/main/java/com/solace/connector/kafka/connect/sink/recordprocessor/SolSimpleRecordProcessor.java @@ -26,6 +26,8 @@ import com.solacesystems.jcsmp.SDTMap; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.sink.SinkRecord; import org.slf4j.Logger; @@ -37,7 +39,7 @@ public class SolSimpleRecordProcessor implements SolRecordProcessorIF { @Override public BytesXMLMessage processRecord(String skey, SinkRecord record) { BytesXMLMessage msg = JCSMPFactory.onlyInstance().createMessage(BytesXMLMessage.class); - + // Add Record Topic,Partition,Offset to Solace Msg String kafkaTopic = record.topic(); SDTMap userHeader = JCSMPFactory.onlyInstance().createMap(); @@ -46,12 +48,12 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { userHeader.putInteger("k_partition", record.kafkaPartition()); userHeader.putLong("k_offset", record.kafkaOffset()); } catch (SDTException e) { - log.info("Received Solace SDTException {}, with the following: {} ", + log.info("Received Solace SDTException {}, with the following: {} ", e.getCause(), e.getStackTrace()); } msg.setProperties(userHeader); msg.setApplicationMessageType("ResendOfKafkaTopic: " + kafkaTopic); - + Schema valueSchema = record.valueSchema(); Object recordValue = record.value(); // get message body details from record @@ -63,7 +65,7 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { } else if (recordValue instanceof ByteBuffer) { msg.writeAttachment((byte[]) ((ByteBuffer) recordValue).array()); } else if (recordValue instanceof String) { - msg.writeAttachment(((String) recordValue).getBytes()); + msg.writeAttachment(((String) recordValue).getBytes(StandardCharsets.UTF_8)); } else { // Unknown recordValue type msg.reset(); @@ -75,16 +77,16 @@ public BytesXMLMessage processRecord(String skey, SinkRecord record) { msg.writeAttachment((byte[]) ((ByteBuffer) recordValue).array()); } } else if (valueSchema.type() == Schema.Type.STRING) { - msg.writeAttachment(((String) recordValue).getBytes()); + msg.writeAttachment(((String) recordValue).getBytes(StandardCharsets.UTF_8)); } else { - // Do nothing in all other cases + // Do nothing in all other cases msg.reset(); } } else { // Invalid message msg.reset(); } - + return msg; } diff --git a/src/main/java/com/solace/connector/kafka/connect/sink/VersionUtil.java b/src/template/java/com/solace/connector/kafka/connect/sink/VersionUtil.java similarity index 89% rename from src/main/java/com/solace/connector/kafka/connect/sink/VersionUtil.java rename to src/template/java/com/solace/connector/kafka/connect/sink/VersionUtil.java index 54fda38..9642e02 100644 --- a/src/main/java/com/solace/connector/kafka/connect/sink/VersionUtil.java +++ b/src/template/java/com/solace/connector/kafka/connect/sink/VersionUtil.java @@ -6,8 +6,6 @@ public class VersionUtil { * @return Version Number */ public static String getVersion() { - - return "2.0.2"; - + return "${version}"; } } diff --git a/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSessionHandlerTest.java b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSessionHandlerTest.java new file mode 100644 index 0000000..58816d4 --- /dev/null +++ b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSessionHandlerTest.java @@ -0,0 +1,44 @@ +package com.solace.connector.kafka.connect.sink; + +import com.solacesystems.jcsmp.JCSMPProperties; +import org.apache.commons.lang.RandomStringUtils; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class SolaceSessionHandlerTest { + @ParameterizedTest + @CsvSource({ + SolaceSinkConstants.SOL_PASSWORD + ',' + JCSMPProperties.PASSWORD, + SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD + ',' + JCSMPProperties.SSL_KEY_STORE_PASSWORD, + SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD + ',' + JCSMPProperties.SSL_PRIVATE_KEY_PASSWORD, + SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD + ',' + JCSMPProperties.SSL_TRUST_STORE_PASSWORD + }) + public void testConfigurePasswords(String connectorProperty, String jcsmpProperty) { + Map properties = new HashMap<>(); + properties.put(connectorProperty, RandomStringUtils.randomAlphanumeric(30)); + SolSessionHandler sessionHandler = new SolSessionHandler(new SolaceSinkConnectorConfig(properties)); + sessionHandler.configureSession(); + assertEquals(properties.get(connectorProperty), + sessionHandler.properties.getStringProperty(jcsmpProperty)); + } + + @ParameterizedTest + @CsvSource({ + SolaceSinkConstants.SOL_PASSWORD + ',' + JCSMPProperties.PASSWORD, + SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD + ',' + JCSMPProperties.SSL_KEY_STORE_PASSWORD, + SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD + ',' + JCSMPProperties.SSL_PRIVATE_KEY_PASSWORD, + SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD + ',' + JCSMPProperties.SSL_TRUST_STORE_PASSWORD + }) + public void testConfigureNullPasswords(String connectorProperty, String jcsmpProperty) { + Map properties = new HashMap<>(); + properties.put(connectorProperty, null); + SolSessionHandler sessionHandler = new SolSessionHandler(new SolaceSinkConnectorConfig(properties)); + sessionHandler.configureSession(); + assertEquals(properties.get(connectorProperty), sessionHandler.properties.getStringProperty(jcsmpProperty)); + } +} diff --git a/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfigTest.java b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfigTest.java new file mode 100644 index 0000000..b51b222 --- /dev/null +++ b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkConnectorConfigTest.java @@ -0,0 +1,66 @@ +package com.solace.connector.kafka.connect.sink; + +import com.solacesystems.jcsmp.BytesXMLMessage; +import org.apache.commons.lang.RandomStringUtils; +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +public class SolaceSinkConnectorConfigTest { + @ParameterizedTest + @ValueSource(strings = { + SolaceSinkConstants.SOL_PASSWORD, + SolaceSinkConstants.SOL_SSL_KEY_STORE_PASSWORD, + SolaceSinkConstants.SOL_SSL_PRIVATE_KEY_PASSWORD, + SolaceSinkConstants.SOL_SSL_TRUST_STORE_PASSWORD + }) + public void testPasswordsObfuscation(String property) { + Map properties = new HashMap<>(); + properties.put(property, RandomStringUtils.randomAlphanumeric(30)); + SolaceSinkConnectorConfig config = new SolaceSinkConnectorConfig(properties); + Password password = config.getPassword(property); + assertEquals(Password.HIDDEN, password.toString()); + assertEquals(properties.get(property), password.value()); + } + + @Test + public void shouldReturnConfiguredSolRecordProcessorIFGivenConfigurableClass() { + // GIVEN + Map configProps = new HashMap<>(); + configProps.put("processor.config", "dummy"); + configProps.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, TestSolRecordProcessorIF.class.getName()); + + // WHEN + SolaceSinkConnectorConfig config = new SolaceSinkConnectorConfig(configProps); + + // THEN + SolRecordProcessorIF processor = config.getConfiguredInstance(SolaceSinkConstants.SOL_RECORD_PROCESSOR, SolRecordProcessorIF.class);; + assertNotNull(processor); + assertNotNull(((TestSolRecordProcessorIF)processor).configs); + assertEquals("dummy", ((TestSolRecordProcessorIF)processor).configs.get("processor.config")); + + } + + public static class TestSolRecordProcessorIF implements SolRecordProcessorIF { + + Map configs; + + @Override + public void configure(Map configs) { + this.configs = configs; + } + + @Override + public BytesXMLMessage processRecord(String skey, SinkRecord record) { + return null; + } + } +} diff --git a/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkSenderTest.java b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkSenderTest.java new file mode 100644 index 0000000..ea71da9 --- /dev/null +++ b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkSenderTest.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.solace.connector.kafka.connect.sink; + +import com.solace.connector.kafka.connect.sink.recordprocessor.SolSimpleRecordProcessor; +import com.solacesystems.jcsmp.BytesXMLMessage; +import com.solacesystems.jcsmp.JCSMPException; +import com.solacesystems.jcsmp.JCSMPFactory; +import com.solacesystems.jcsmp.JCSMPSession; +import com.solacesystems.jcsmp.SDTMap; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.header.ConnectHeaders; +import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +@ExtendWith(MockitoExtension.class) +public class SolaceSinkSenderTest { + + @Mock private SolSessionHandler mkSessionHandler; + @Mock private JCSMPSession mkJcsmpSession; + @Mock private SolaceSinkTask mkSolaceSinkTask; + + @Test + public void shouldAddKafkaRecordHeadersOnBytesXMLMessageWhenEnabled() throws JCSMPException { + // GIVEN + Mockito.when(mkSessionHandler.getSession()).thenReturn(mkJcsmpSession); + Mockito.when(mkJcsmpSession.getMessageProducer(Mockito.any())).thenReturn(null); + + Map config = new HashMap<>(); + config.put(SolaceSinkConstants.SOL_EMIT_KAFKA_RECORD_HEADERS_ENABLED, "true"); + config.put(SolaceSinkConstants.SOL_RECORD_PROCESSOR, SolSimpleRecordProcessor.class.getName()); + + final SolaceSinkConnectorConfig connectorConfig = new SolaceSinkConnectorConfig(config); + + final SolaceSinkSender sender = new SolaceSinkSender( + connectorConfig, + mkSessionHandler, + false, + mkSolaceSinkTask + ); + + ConnectHeaders headers = new ConnectHeaders(); + + headers.addString("h2", "val2"); + headers.addString("h3", "val3"); + headers.addString("h3", "val4"); + headers.addString("h3", "val5"); + + SinkRecord record = new SinkRecord( + "topic", + 0, + Schema.STRING_SCHEMA, + "key", + Schema.STRING_SCHEMA, + "value", + 0L, + 0L, + TimestampType.CREATE_TIME, + headers + ); + + // WHEN + BytesXMLMessage msg = JCSMPFactory.onlyInstance().createMessage(BytesXMLMessage.class); + + SDTMap existing = JCSMPFactory.onlyInstance().createMap(); + existing.putString("h1", "val1"); + msg.setProperties(existing); + sender.mayEnrichUserPropertiesWithKafkaRecordHeaders(record, msg); + + // THEN + SDTMap properties = msg.getProperties(); + assertNotNull(properties); + assertEquals("val1", properties.getString("h1")); + assertEquals("val2", properties.getString("h2")); + assertEquals("val5", properties.getString("h3")); + } +} \ No newline at end of file diff --git a/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkTaskTest.java b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkTaskTest.java new file mode 100644 index 0000000..1afd66d --- /dev/null +++ b/src/test/java/com/solace/connector/kafka/connect/sink/SolaceSinkTaskTest.java @@ -0,0 +1,38 @@ +package com.solace.connector.kafka.connect.sink; + +import com.solacesystems.jcsmp.JCSMPException; +import org.apache.kafka.connect.errors.ConnectException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class SolaceSinkTaskTest { + private SolaceSinkTask solaceSinkTask; + + @BeforeEach + void setUp() { + solaceSinkTask = new SolaceSinkTask(); + } + + @AfterEach + void tearDown() { + solaceSinkTask.stop(); + } + + @Test + public void testFailSessionConnect() { + Map props = new HashMap<>(); + ConnectException thrown = assertThrows(ConnectException.class, () -> solaceSinkTask.start(props)); + assertThat(thrown.getMessage(), containsString("Failed to create JCSMPSession")); + assertThat(thrown.getCause(), instanceOf(JCSMPException.class)); + assertThat(thrown.getCause().getMessage(), containsString("Null value was passed in for property (host)")); + } +} diff --git a/src/test/java/com/solace/connector/kafka/connect/sink/VersionUtilTest.java b/src/test/java/com/solace/connector/kafka/connect/sink/VersionUtilTest.java new file mode 100644 index 0000000..23c4822 --- /dev/null +++ b/src/test/java/com/solace/connector/kafka/connect/sink/VersionUtilTest.java @@ -0,0 +1,16 @@ +package com.solace.connector.kafka.connect.sink; + +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertLinesMatch; + +public class VersionUtilTest { + @Test + public void testGetVersion() { + assertLinesMatch(Collections.singletonList(Pattern.compile("^[0-9]+\\.[0-9]+\\.[0-9]+$").pattern()), + Collections.singletonList(VersionUtil.getVersion())); + } +} diff --git a/src/test/resources/log4j2.xml b/src/test/resources/log4j2.xml new file mode 100644 index 0000000..01eb664 --- /dev/null +++ b/src/test/resources/log4j2.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file