From 61fe5179c93f4e6c574b2205a923953e8e668bbe Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Tue, 20 Aug 2024 14:59:54 +0800
Subject: [PATCH 1/6] [ISSUE #5079] Enhancement update for admin-server
---
eventmesh-admin-server/bin/start-admin.sh | 51 ++++++------
eventmesh-admin-server/build.gradle | 2 +
eventmesh-admin-server/conf/application.yaml | 8 +-
eventmesh-admin-server/conf/eventmesh.sql | 2 +-
.../conf/mapper/EventMeshVerifyMapper.xml | 5 +-
.../admin/server/web/HttpServer.java | 22 +++++
.../admin/server/web/db/DBThreadPool.java | 26 +++++-
.../server/web/db/entity/EventMeshVerify.java | 2 +
.../handler/impl/FetchJobRequestHandler.java | 2 +-
.../handler/impl/ReportJobRequestHandler.java | 56 +++++++++++++
.../handler/impl/ReportPositionHandler.java | 1 +
.../web/handler/impl/ReportVerifyHandler.java | 50 ++++++++++--
.../web/service/job/JobInfoBizService.java | 81 ++++++++++++++++---
.../position/impl/HttpPositionHandler.java | 57 +++++++++++++
.../web/service/verify/VerifyBizService.java | 1 +
.../eventmesh/common/remote/JobState.java | 51 ++++++++++++
.../remote/request/ReportVerifyRequest.java | 2 +
...e.eventmesh.common.remote.payload.IPayload | 1 +
.../offsetmgmt/admin/AdminOffsetService.java | 3 +
19 files changed, 370 insertions(+), 53 deletions(-)
create mode 100644 eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
create mode 100644 eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
create mode 100644 eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
diff --git a/eventmesh-admin-server/bin/start-admin.sh b/eventmesh-admin-server/bin/start-admin.sh
index 93c3644397..1633036617 100644
--- a/eventmesh-admin-server/bin/start-admin.sh
+++ b/eventmesh-admin-server/bin/start-admin.sh
@@ -56,34 +56,34 @@ function extract_java_version {
#}
function get_pid {
- local ppid=""
- if [ -f ${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file ]; then
- ppid=$(cat ${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file)
- # If the process does not exist, it indicates that the previous process terminated abnormally.
+ local ppid=""
+ if [ -f ${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file ]; then
+ ppid=$(cat ${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file)
+ # If the process does not exist, it indicates that the previous process terminated abnormally.
if [ ! -d /proc/$ppid ]; then
# Remove the residual file.
rm ${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file
echo -e "ERROR\t EventMesh process had already terminated unexpectedly before, please check log output."
ppid=""
fi
- else
- if [[ $OS =~ Msys ]]; then
- # There is a Bug on Msys that may not be able to kill the identified process
- ppid=`jps -v | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | grep java | grep -v grep | awk -F ' ' {'print $1'}`
- elif [[ $OS =~ Darwin ]]; then
- # Known problem: grep Java may not be able to accurately identify Java processes
- ppid=$(/bin/ps -o user,pid,command | grep "java" | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | grep -Ev "^root" |awk -F ' ' {'print $2'})
- else
- if [ $DOCKER ]; then
- # No need to exclude root user in Docker containers.
- ppid=$(ps -C java -o user,pid,command --cols 99999 | grep -w $EVENTMESH_ADMIN_HOME | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | awk -F ' ' {'print $2'})
- else
+ else
+ if [[ $OS =~ Msys ]]; then
+ # There is a Bug on Msys that may not be able to kill the identified process
+ ppid=`jps -v | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | grep java | grep -v grep | awk -F ' ' {'print $1'}`
+ elif [[ $OS =~ Darwin ]]; then
+ # Known problem: grep Java may not be able to accurately identify Java processes
+ ppid=$(/bin/ps -o user,pid,command | grep "java" | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | grep -Ev "^root" |awk -F ' ' {'print $2'})
+ else
+ if [ $DOCKER ]; then
+ # No need to exclude root user in Docker containers.
+ ppid=$(ps -C java -o user,pid,command --cols 99999 | grep -w $EVENTMESH_ADMIN_HOME | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | awk -F ' ' {'print $2'})
+ else
# It is required to identify the process as accurately as possible on Linux.
ppid=$(ps -C java -o user,pid,command --cols 99999 | grep -w $EVENTMESH_ADMIN_HOME | grep -i "org.apache.eventmesh.admin.server.ExampleAdminServer" | grep -Ev "^root" | awk -F ' ' {'print $2'})
fi
- fi
- fi
- echo "$ppid";
+ fi
+ fi
+ echo "$ppid";
}
#===========================================================================================
@@ -136,8 +136,7 @@ export JAVA_HOME
GC_LOG_FILE="${EVENTMESH_ADMIN_LOG_HOME}/eventmesh_admin_gc_%p.log"
-#JAVA_OPT="${JAVA_OPT} -server -Xms2048M -Xmx4096M -Xmn2048m -XX:SurvivorRatio=4"
-JAVA_OPT=`cat ${EVENTMESH_ADMIN_HOME}/conf/server.env | grep APP_START_JVM_OPTION::: | awk -F ':::' {'print $2'}`
+JAVA_OPT="${JAVA_OPT} -server -Xms1g -Xmx1g"
JAVA_OPT="${JAVA_OPT} -XX:+UseG1GC -XX:G1HeapRegionSize=16m -XX:G1ReservePercent=25 -XX:InitiatingHeapOccupancyPercent=30 -XX:SoftRefLRUPolicyMSPerMB=0 -XX:SurvivorRatio=8 -XX:MaxGCPauseMillis=50"
JAVA_OPT="${JAVA_OPT} -verbose:gc"
if [[ "$JAVA_VERSION" == "8" ]]; then
@@ -172,7 +171,7 @@ JAVA_OPT="${JAVA_OPT} -DeventMeshPluginDir=${EVENTMESH_ADMIN_HOME}/plugin"
# echo "proxy is running already"
# exit 9;
# else
-# echo "err pid$pid, rm pid.file"
+# echo "err pid$pid, rm pid.file"
# rm pid.file
# fi
#fi
@@ -183,8 +182,8 @@ if [[ $pid == "ERROR"* ]]; then
exit 9
fi
if [ -n "$pid" ]; then
- echo -e "ERROR\t The server is already running (pid=$pid), there is no need to execute start.sh again."
- exit 9
+ echo -e "ERROR\t The server is already running (pid=$pid), there is no need to execute start.sh again."
+ exit 9
fi
make_logs_dir
@@ -193,9 +192,9 @@ echo "Using Java version: $JAVA_VERSION, path: $JAVA" >> ${EVENTMESH_ADMIN_LOG_H
EVENTMESH_ADMIN_MAIN=org.apache.eventmesh.admin.server.ExampleAdminServer
if [ $DOCKER ]; then
- $JAVA $JAVA_OPT -classpath ${EVENTMESH_ADMIN_HOME}/conf:${EVENTMESH_ADMIN_HOME}/apps/*:${EVENTMESH_ADMIN_HOME}/lib/* $EVENTMESH_ADMIN_MAIN >> ${EVENTMESH_ADMIN_LOG_HOME}/eventmesh-admin.out
+ $JAVA $JAVA_OPT -classpath ${EVENTMESH_ADMIN_HOME}/conf:${EVENTMESH_ADMIN_HOME}/apps/*:${EVENTMESH_ADMIN_HOME}/lib/* $EVENTMESH_ADMIN_MAIN >> ${EVENTMESH_ADMIN_LOG_HOME}/eventmesh-admin.out
else
- $JAVA $JAVA_OPT -classpath ${EVENTMESH_ADMIN_HOME}/conf:${EVENTMESH_ADMIN_HOME}/apps/*:${EVENTMESH_ADMIN_HOME}/lib/* $EVENTMESH_ADMIN_MAIN >> ${EVENTMESH_ADMIN_LOG_HOME}/eventmesh-admin.out 2>&1 &
+ $JAVA $JAVA_OPT -classpath ${EVENTMESH_ADMIN_HOME}/conf:${EVENTMESH_ADMIN_HOME}/apps/*:${EVENTMESH_ADMIN_HOME}/lib/* $EVENTMESH_ADMIN_MAIN >> ${EVENTMESH_ADMIN_LOG_HOME}/eventmesh-admin.out 2>&1 &
echo $!>${EVENTMESH_ADMIN_HOME}/bin/pid-admin.file
fi
exit 0
diff --git a/eventmesh-admin-server/build.gradle b/eventmesh-admin-server/build.gradle
index 1fec2c7c52..95c8fa1372 100644
--- a/eventmesh-admin-server/build.gradle
+++ b/eventmesh-admin-server/build.gradle
@@ -38,6 +38,8 @@ dependencies {
implementation "com.alibaba:druid-spring-boot-starter"
compileOnly 'com.mysql:mysql-connector-j'
compileOnly 'org.projectlombok:lombok'
+ testImplementation 'junit:junit:4.12'
+ testImplementation 'org.projectlombok:lombok'
annotationProcessor 'org.projectlombok:lombok'
}
diff --git a/eventmesh-admin-server/conf/application.yaml b/eventmesh-admin-server/conf/application.yaml
index 274196db60..3d702e579e 100644
--- a/eventmesh-admin-server/conf/application.yaml
+++ b/eventmesh-admin-server/conf/application.yaml
@@ -35,8 +35,8 @@ event-mesh:
# grpc server port
port: 8081
adminServerList:
- region1:
+ R1:
- http://localhost:8082
- region2:
- - http://localhost:8083
- region: region1
\ No newline at end of file
+ R2:
+ - http://localhost:8082
+ region: R1
\ No newline at end of file
diff --git a/eventmesh-admin-server/conf/eventmesh.sql b/eventmesh-admin-server/conf/eventmesh.sql
index 986320570a..6e28daca8a 100644
--- a/eventmesh-admin-server/conf/eventmesh.sql
+++ b/eventmesh-admin-server/conf/eventmesh.sql
@@ -102,7 +102,6 @@ CREATE TABLE IF NOT EXISTS `event_mesh_runtime_heartbeat` (
`updateTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`createTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
- UNIQUE KEY `runtimeAddr` (`runtimeAddr`),
KEY `jobID` (`jobID`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
@@ -137,6 +136,7 @@ CREATE TABLE IF NOT EXISTS `event_mesh_task_info` (
CREATE TABLE IF NOT EXISTS `event_mesh_verify` (
`id` int unsigned NOT NULL AUTO_INCREMENT,
`taskID` varchar(50) COLLATE utf8_bin DEFAULT NULL,
+ `jobID` varchar(50) COLLATE utf8_bin DEFAULT NULL,
`recordID` varchar(50) COLLATE utf8_bin DEFAULT NULL,
`recordSig` varchar(50) COLLATE utf8_bin DEFAULT NULL,
`connectorName` varchar(200) COLLATE utf8_bin DEFAULT NULL,
diff --git a/eventmesh-admin-server/conf/mapper/EventMeshVerifyMapper.xml b/eventmesh-admin-server/conf/mapper/EventMeshVerifyMapper.xml
index b7b042145a..45727498cc 100644
--- a/eventmesh-admin-server/conf/mapper/EventMeshVerifyMapper.xml
+++ b/eventmesh-admin-server/conf/mapper/EventMeshVerifyMapper.xml
@@ -26,6 +26,7 @@
+
@@ -35,8 +36,8 @@
- id,taskID,recordID,
- recordSig,connectorName,connectorStage,
+ id,taskID,jobID,recordID,
+ recordSig,connectorName,connectorStage,
position,createTime
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
index 12afb3a3d4..8350802f75 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
@@ -17,8 +17,11 @@
package org.apache.eventmesh.admin.server.web;
+import lombok.extern.slf4j.Slf4j;
import org.apache.eventmesh.admin.server.web.service.task.TaskBizService;
+import org.apache.eventmesh.admin.server.web.service.verify.VerifyBizService;
import org.apache.eventmesh.common.remote.request.CreateTaskRequest;
+import org.apache.eventmesh.common.remote.request.ReportVerifyRequest;
import org.apache.eventmesh.common.remote.response.CreateTaskResponse;
import org.apache.eventmesh.common.utils.JsonUtils;
@@ -31,17 +34,36 @@
@RestController
@RequestMapping("/eventmesh/admin")
+@Slf4j
public class HttpServer {
@Autowired
private TaskBizService taskService;
+ @Autowired
+ private VerifyBizService verifyService;
+
@RequestMapping(value = "/createTask", method = RequestMethod.POST)
public ResponseEntity createOrUpdateTask(@RequestBody CreateTaskRequest task) {
+ log.info("receive http proto create task:{}",task);
CreateTaskResponse createTaskResponse = taskService.createTask(task);
+ log.info("receive http proto create task result:{}",createTaskResponse);
return ResponseEntity.ok(JsonUtils.toJSONString(Response.success(createTaskResponse)));
}
+
+ @RequestMapping(value = "/reportVerify", method = RequestMethod.POST)
+ public ResponseEntity reportVerify(@RequestBody ReportVerifyRequest request) {
+ log.info("receive http proto report verify request:{}", request);
+ boolean result = verifyService.reportVerifyRecord(request);
+ log.info("receive http proto report verify result:{}", result);
+ if (result) {
+ return ResponseEntity.ok("report verify success.request:" + JsonUtils.toJSONString(request));
+ } else {
+ return ResponseEntity.internalServerError().body("report verify success.request:" + JsonUtils.toJSONString(request));
+ }
+ }
+
public boolean deleteTask(Long id) {
return false;
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
index f1de764967..124eca4261 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
@@ -20,6 +20,7 @@
import org.apache.eventmesh.common.EventMeshThreadFactory;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -39,17 +40,34 @@ public class DBThreadPool {
new LinkedBlockingQueue<>(1000), new EventMeshThreadFactory("admin-server-db"),
new ThreadPoolExecutor.DiscardOldestPolicy());
+
+ private final ScheduledThreadPoolExecutor checkScheduledExecutor =
+ new ScheduledThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), new EventMeshThreadFactory("admin-server-check-scheduled"),
+ new ThreadPoolExecutor.DiscardOldestPolicy());
+
@PreDestroy
private void destroy() {
if (!executor.isShutdown()) {
try {
executor.shutdown();
if (!executor.awaitTermination(30, TimeUnit.SECONDS)) {
- log.info("wait heart beat handler thread pool shutdown timeout, it will shutdown immediately");
+ log.info("wait handler thread pool shutdown timeout, it will shutdown immediately");
executor.shutdownNow();
}
} catch (InterruptedException e) {
- log.warn("wait heart beat handler thread pool shutdown fail");
+ log.warn("wait handler thread pool shutdown fail");
+ }
+ }
+
+ if (!checkScheduledExecutor.isShutdown()) {
+ try {
+ checkScheduledExecutor.shutdown();
+ if (!checkScheduledExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+ log.info("wait scheduled thread pool shutdown timeout, it will shutdown immediately");
+ checkScheduledExecutor.shutdownNow();
+ }
+ } catch (InterruptedException e) {
+ log.warn("wait scheduled thread pool shutdown fail");
}
}
}
@@ -57,4 +75,8 @@ private void destroy() {
public ThreadPoolExecutor getExecutors() {
return executor;
}
+
+ public ScheduledThreadPoolExecutor getCheckExecutor() {
+ return checkScheduledExecutor;
+ }
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
index 5425c5c57b..9d3e817ff9 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
@@ -37,6 +37,8 @@ public class EventMeshVerify implements Serializable {
private String taskID;
+ private String jobID;
+
private String recordID;
private String recordSig;
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/FetchJobRequestHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/FetchJobRequestHandler.java
index b377bcddd8..3392084c28 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/FetchJobRequestHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/FetchJobRequestHandler.java
@@ -56,7 +56,7 @@ public FetchJobResponse handler(FetchJobRequest request, Metadata metadata) {
config.setSourceConnectorConfig(JsonUtils.objectToMap(detail.getSourceDataSource().getConf()));
config.setSourceConnectorDesc(detail.getSourceConnectorDesc());
config.setSinkConnectorConfig(JsonUtils.objectToMap(detail.getSinkDataSource().getConf()));
- config.setSourceConnectorDesc(detail.getSinkConnectorDesc());
+ config.setSinkConnectorDesc(detail.getSinkConnectorDesc());
response.setConnectorConfig(config);
response.setTransportType(detail.getTransportType());
response.setState(detail.getState());
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
new file mode 100644
index 0000000000..defec3f8ee
--- /dev/null
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.admin.server.web.handler.impl;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.eventmesh.admin.server.web.db.entity.EventMeshJobInfo;
+import org.apache.eventmesh.admin.server.web.handler.BaseRequestHandler;
+import org.apache.eventmesh.admin.server.web.service.job.JobInfoBizService;
+import org.apache.eventmesh.common.protocol.grpc.adminserver.Metadata;
+import org.apache.eventmesh.common.remote.exception.ErrorCode;
+import org.apache.eventmesh.common.remote.request.ReportJobRequest;
+import org.apache.eventmesh.common.remote.response.SimpleResponse;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+@Component
+@Slf4j
+public class ReportJobRequestHandler extends BaseRequestHandler {
+
+ @Autowired
+ JobInfoBizService jobInfoBizService;
+
+ @Override
+ public SimpleResponse handler(ReportJobRequest request, Metadata metadata) {
+ log.info("receive report job request:{}", request);
+ if (StringUtils.isBlank(request.getJobID())) {
+ return SimpleResponse.fail(ErrorCode.BAD_REQUEST, "illegal job id, it's empty");
+ }
+ EventMeshJobInfo jobInfo = jobInfoBizService.getJobInfo(request.getJobID());
+ if (jobInfo == null) {
+ return SimpleResponse.fail(ErrorCode.BAD_REQUEST, "illegal job id, not exist target job,jobID:" + request.getJobID());
+ }
+ boolean result = jobInfoBizService.updateJobState(jobInfo.getJobID(), request.getState());
+ if (result) {
+ return SimpleResponse.success();
+ } else {
+ return SimpleResponse.fail(ErrorCode.INTERNAL_ERR, "update job failed.");
+ }
+ }
+}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
index 5e2a968262..78335d419a 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
@@ -48,6 +48,7 @@ public class ReportPositionHandler extends BaseRequestHandler adminServerList = properties.getAdminServerList().get(fromRegion);
+ if (adminServerList == null || adminServerList.isEmpty()) {
+ throw new RuntimeException("No admin server available for region: " + fromRegion);
+ }
+ String targetUrl = adminServerList.get(new Random().nextInt(adminServerList.size())) + "/eventmesh/admin/reportVerify";
+ RestTemplate restTemplate = new RestTemplate();
+ ResponseEntity response = restTemplate.postForEntity(targetUrl, request, String.class);
+ if (!response.getStatusCode().is2xxSuccessful()) {
+ return SimpleResponse.fail(ErrorCode.INTERNAL_ERR, "save verify request fail,code:" + response.getStatusCode() + ",msg:" + response.getBody());
+ }
+ return SimpleResponse.success();
}
- return verifyService.reportVerifyRecord(request) ? SimpleResponse.success() : SimpleResponse.fail(ErrorCode.INTERNAL_ERR, "save verify "
- + "request fail");
}
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
index 0657383e23..70abececb1 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
@@ -19,15 +19,19 @@
import org.apache.eventmesh.admin.server.AdminServerProperties;
import org.apache.eventmesh.admin.server.AdminServerRuntimeException;
+import org.apache.eventmesh.admin.server.web.db.DBThreadPool;
import org.apache.eventmesh.admin.server.web.db.entity.EventMeshDataSource;
import org.apache.eventmesh.admin.server.web.db.entity.EventMeshJobInfo;
+import org.apache.eventmesh.admin.server.web.db.entity.EventMeshRuntimeHeartbeat;
import org.apache.eventmesh.admin.server.web.db.service.EventMeshDataSourceService;
import org.apache.eventmesh.admin.server.web.db.service.EventMeshJobInfoExtService;
import org.apache.eventmesh.admin.server.web.db.service.EventMeshJobInfoService;
+import org.apache.eventmesh.admin.server.web.db.service.EventMeshRuntimeHeartbeatService;
import org.apache.eventmesh.admin.server.web.pojo.JobDetail;
import org.apache.eventmesh.admin.server.web.service.datasource.DataSourceBizService;
import org.apache.eventmesh.admin.server.web.service.position.PositionBizService;
import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.remote.JobState;
import org.apache.eventmesh.common.remote.TaskState;
import org.apache.eventmesh.common.remote.TransportType;
import org.apache.eventmesh.common.remote.datasource.DataSource;
@@ -35,20 +39,18 @@
import org.apache.eventmesh.common.remote.exception.ErrorCode;
import org.apache.eventmesh.common.remote.request.CreateOrUpdateDataSourceReq;
import org.apache.eventmesh.common.utils.JsonUtils;
-
import org.apache.commons.lang3.StringUtils;
-
+import java.time.Duration;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
-
+import java.util.concurrent.TimeUnit;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
-
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
-
import lombok.extern.slf4j.Slf4j;
+import javax.annotation.PostConstruct;
/**
* for table 'event_mesh_job_info' db operation
@@ -75,13 +77,41 @@ public class JobInfoBizService {
@Autowired
private AdminServerProperties properties;
+ @Autowired
+ EventMeshRuntimeHeartbeatService heartbeatService;
+
+ private final long heatBeatPeriod = Duration.ofMillis(5000).toMillis();
+
+ @Autowired
+ DBThreadPool executor;
+
+ @PostConstruct
+ public void init() {
+ log.info("init check job info scheduled task.");
+ executor.getCheckExecutor().scheduleAtFixedRate(new Runnable() {
+ @Override
+ public void run() {
+ checkJobInfo();
+ }
+ }, 10, 10, TimeUnit.SECONDS);
+ }
+
public boolean updateJobState(String jobID, TaskState state) {
if (jobID == null || state == null) {
return false;
}
EventMeshJobInfo jobInfo = new EventMeshJobInfo();
jobInfo.setJobState(state.name());
- return jobInfoService.update(jobInfo, Wrappers.update().eq("jobID", jobID).ne("state", TaskState.DELETE.name()));
+ return jobInfoService.update(jobInfo, Wrappers.update().eq("jobID", jobID).ne("jobState", JobState.DELETE.name()));
+ }
+
+ public boolean updateJobState(String jobID, JobState state) {
+ if (jobID == null || state == null) {
+ return false;
+ }
+ EventMeshJobInfo jobInfo = new EventMeshJobInfo();
+ jobInfo.setJobState(state.name());
+ return jobInfoService.update(jobInfo, Wrappers.update().eq("jobID", jobID).ne("jobState", JobState.DELETE.name()));
}
@Transactional
@@ -114,7 +144,8 @@ public List createJobs(List jobs) {
source.setOperator(job.getCreateUid());
source.setRegion(job.getSourceDataSource().getRegion());
source.setDesc(job.getSourceConnectorDesc());
- source.setConfig(job.getSourceDataSource().getConf());
+ Config sourceConfig = job.getSourceDataSource().getConf();
+ source.setConfig(sourceConfig);
source.setConfigClass(job.getSourceDataSource().getConfClazz().getName());
EventMeshDataSource createdSource = dataSourceBizService.createDataSource(source);
entity.setSourceData(createdSource.getId());
@@ -124,7 +155,8 @@ public List createJobs(List jobs) {
sink.setOperator(job.getCreateUid());
sink.setRegion(job.getSinkDataSource().getRegion());
sink.setDesc(job.getSinkConnectorDesc());
- sink.setConfig(job.getSinkDataSource().getConf());
+ Config sinkConfig = job.getSinkDataSource().getConf();
+ sink.setConfig(sinkConfig);
sink.setConfigClass(job.getSinkDataSource().getConfClazz().getName());
EventMeshDataSource createdSink = dataSourceBizService.createDataSource(sink);
entity.setTargetData(createdSink.getId());
@@ -134,7 +166,7 @@ public List createJobs(List jobs) {
int changed = jobInfoExtService.batchSave(entityList);
if (changed != jobs.size()) {
throw new AdminServerRuntimeException(ErrorCode.INTERNAL_ERR, String.format("create [%d] jobs of not match expect [%d]",
- changed, jobs.size()));
+ changed, jobs.size()));
}
return entityList;
}
@@ -168,7 +200,7 @@ public JobDetail getJobDetail(String jobID) {
detail.setSourceConnectorDesc(source.getDescription());
if (source.getDataType() != null) {
detail.setPositions(positionBizService.getPositionByJobID(job.getJobID(),
- DataSourceType.getDataSourceType(source.getDataType())));
+ DataSourceType.getDataSourceType(source.getDataType())));
}
}
@@ -195,6 +227,35 @@ public JobDetail getJobDetail(String jobID) {
detail.setTransportType(TransportType.getTransportType(job.getTransportType()));
return detail;
}
+
+ public EventMeshJobInfo getJobInfo(String jobID) {
+ if (jobID == null) {
+ return null;
+ }
+ EventMeshJobInfo job = jobInfoService.getOne(Wrappers.query().eq("jobID", jobID));
+ return job;
+ }
+
+ public void checkJobInfo() {
+ List eventMeshJobInfoList = jobInfoService.list(Wrappers.query().eq("jobState", JobState.RUNNING.name()));
+ log.info("start check job info.to check job size:{}", eventMeshJobInfoList.size());
+ for (EventMeshJobInfo jobInfo : eventMeshJobInfoList) {
+ String jobID = jobInfo.getJobID();
+ if (StringUtils.isEmpty(jobID)) {
+ continue;
+ }
+ EventMeshRuntimeHeartbeat heartbeat = heartbeatService.getOne(Wrappers.query().eq("jobID", jobID));
+ if (heartbeat == null) {
+ continue;
+ }
+ // if last heart beat update time have delay three period.print job heart beat delay warn
+ long currentTimeStamp = System.currentTimeMillis();
+ if (currentTimeStamp - heartbeat.getUpdateTime().getTime() > 3 * heatBeatPeriod) {
+ log.warn("current job heart heart has delay.jobID:{},currentTimeStamp:{},last update time:{}", jobID, currentTimeStamp, heartbeat.getUpdateTime());
+ }
+ }
+ }
+
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
new file mode 100644
index 0000000000..b0f89ec03d
--- /dev/null
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.admin.server.web.service.position.impl;
+
+import lombok.extern.slf4j.Slf4j;
+import org.apache.eventmesh.admin.server.web.db.service.EventMeshPositionReporterHistoryService;
+import org.apache.eventmesh.admin.server.web.service.position.PositionHandler;
+import org.apache.eventmesh.common.protocol.grpc.adminserver.Metadata;
+import org.apache.eventmesh.common.remote.datasource.DataSourceType;
+import org.apache.eventmesh.common.remote.offset.RecordPosition;
+import org.apache.eventmesh.common.remote.request.FetchPositionRequest;
+import org.apache.eventmesh.common.remote.request.ReportPositionRequest;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+import java.util.ArrayList;
+import java.util.List;
+
+@Component
+@Slf4j
+public class HttpPositionHandler extends PositionHandler {
+ @Autowired
+ EventMeshPositionReporterHistoryService historyService;
+
+ @Override
+ protected DataSourceType getSourceType() {
+ return DataSourceType.HTTP;
+ }
+
+ @Override
+ public boolean handler(ReportPositionRequest request, Metadata metadata) {
+ log.info("receive http position report request:{}", request);
+ // mock wemq postion report store
+ return true;
+ }
+
+ @Override
+ public List handler(FetchPositionRequest request, Metadata metadata) {
+ // mock http position fetch request
+ List recordPositionList = new ArrayList<>();
+ return recordPositionList;
+ }
+}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
index 74f208b199..9d648e0a72 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
@@ -35,6 +35,7 @@ public boolean reportVerifyRecord(ReportVerifyRequest request) {
verify.setRecordSig(request.getRecordSig());
verify.setPosition(request.getPosition());
verify.setTaskID(request.getTaskID());
+ verify.setJobID(request.getJobID());
verify.setConnectorName(request.getConnectorName());
verify.setConnectorStage(request.getConnectorStage());
return verifyService.save(verify);
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
new file mode 100644
index 0000000000..53d20f2ace
--- /dev/null
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.common.remote;
+
+import lombok.ToString;
+
+import java.util.HashMap;
+import java.util.Map;
+
+@ToString
+public enum JobState {
+ INIT, RUNNING, COMPLETE, DELETE, FAIL;
+ private static final JobState[] STATES_NUM_INDEX = JobState.values();
+ private static final Map STATES_NAME_INDEX = new HashMap<>();
+ static {
+ for (JobState jobState : STATES_NUM_INDEX) {
+ STATES_NAME_INDEX.put(jobState.name(), jobState);
+ }
+ }
+
+ public static JobState fromIndex(Integer index) {
+ if (index == null || index < 0 || index >= STATES_NUM_INDEX.length) {
+ return null;
+ }
+
+ return STATES_NUM_INDEX[index];
+ }
+
+ public static JobState fromIndex(String index) {
+ if (index == null || index.isEmpty()) {
+ return null;
+ }
+
+ return STATES_NAME_INDEX.get(index);
+ }
+}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportVerifyRequest.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportVerifyRequest.java
index cd541949f4..bd38881c3d 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportVerifyRequest.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportVerifyRequest.java
@@ -28,6 +28,8 @@ public class ReportVerifyRequest extends BaseRemoteRequest {
private String taskID;
+ private String jobID;
+
private String recordID;
private String recordSig;
diff --git a/eventmesh-common/src/main/resources/META-INF/services/org.apache.eventmesh.common.remote.payload.IPayload b/eventmesh-common/src/main/resources/META-INF/services/org.apache.eventmesh.common.remote.payload.IPayload
index 82d5c94dd3..433cf57ed1 100644
--- a/eventmesh-common/src/main/resources/META-INF/services/org.apache.eventmesh.common.remote.payload.IPayload
+++ b/eventmesh-common/src/main/resources/META-INF/services/org.apache.eventmesh.common.remote.payload.IPayload
@@ -16,6 +16,7 @@
org.apache.eventmesh.common.remote.request.FetchJobRequest
org.apache.eventmesh.common.remote.response.FetchJobResponse
org.apache.eventmesh.common.remote.request.ReportPositionRequest
+org.apache.eventmesh.common.remote.request.ReportJobRequest
org.apache.eventmesh.common.remote.request.ReportVerifyRequest
org.apache.eventmesh.common.remote.request.ReportHeartBeatRequest
org.apache.eventmesh.common.remote.request.FetchPositionRequest
diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java
index 977661b134..993352a979 100644
--- a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java
+++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java
@@ -112,6 +112,8 @@ public void persist() {
reportPositionRequest.setRecordPositionList(recordToSyncList);
+ log.debug("start report position request: {}", JsonUtils.toJSONString(reportPositionRequest));
+
Metadata metadata = Metadata.newBuilder()
.setType(ReportPositionRequest.class.getSimpleName())
.build();
@@ -121,6 +123,7 @@ public void persist() {
.build())
.build();
requestObserver.onNext(payload);
+ log.debug("end report position request: {}", JsonUtils.toJSONString(reportPositionRequest));
for (Map.Entry entry : recordMap.entrySet()) {
positionStore.remove(entry.getKey());
From 1b5b52611372918fb129e566675dc5593fffa482 Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Tue, 20 Aug 2024 15:32:28 +0800
Subject: [PATCH 2/6] fix check style error
---
.../admin/server/web/HttpServer.java | 7 ++--
.../admin/server/web/db/DBThreadPool.java | 4 +--
.../server/web/db/entity/EventMeshVerify.java | 1 +
.../handler/impl/ReportJobRequestHandler.java | 7 ++--
.../handler/impl/ReportPositionHandler.java | 1 +
.../web/handler/impl/ReportVerifyHandler.java | 21 +++++++----
.../web/service/job/JobInfoBizService.java | 15 +++++---
.../position/impl/HttpPositionHandler.java | 10 ++++--
.../web/service/verify/VerifyBizService.java | 1 +
.../eventmesh/common/remote/JobState.java | 5 +--
.../common/remote/TransportType.java | 1 +
.../remote/request/ReportJobRequest.java | 36 +++++++++++++++++++
12 files changed, 86 insertions(+), 23 deletions(-)
create mode 100644 eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
index 8350802f75..2454e9f02c 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/HttpServer.java
@@ -17,7 +17,6 @@
package org.apache.eventmesh.admin.server.web;
-import lombok.extern.slf4j.Slf4j;
import org.apache.eventmesh.admin.server.web.service.task.TaskBizService;
import org.apache.eventmesh.admin.server.web.service.verify.VerifyBizService;
import org.apache.eventmesh.common.remote.request.CreateTaskRequest;
@@ -32,6 +31,8 @@
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
+import lombok.extern.slf4j.Slf4j;
+
@RestController
@RequestMapping("/eventmesh/admin")
@Slf4j
@@ -45,9 +46,9 @@ public class HttpServer {
@RequestMapping(value = "/createTask", method = RequestMethod.POST)
public ResponseEntity createOrUpdateTask(@RequestBody CreateTaskRequest task) {
- log.info("receive http proto create task:{}",task);
+ log.info("receive http proto create task:{}", task);
CreateTaskResponse createTaskResponse = taskService.createTask(task);
- log.info("receive http proto create task result:{}",createTaskResponse);
+ log.info("receive http proto create task result:{}", createTaskResponse);
return ResponseEntity.ok(JsonUtils.toJSONString(Response.success(createTaskResponse)));
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
index 124eca4261..277ea66656 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/DBThreadPool.java
@@ -42,8 +42,8 @@ public class DBThreadPool {
private final ScheduledThreadPoolExecutor checkScheduledExecutor =
- new ScheduledThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), new EventMeshThreadFactory("admin-server-check-scheduled"),
- new ThreadPoolExecutor.DiscardOldestPolicy());
+ new ScheduledThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), new EventMeshThreadFactory("admin-server-check-scheduled"),
+ new ThreadPoolExecutor.DiscardOldestPolicy());
@PreDestroy
private void destroy() {
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
index 9d3e817ff9..c5a6c35f8d 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/db/entity/EventMeshVerify.java
@@ -32,6 +32,7 @@
@TableName(value = "event_mesh_verify")
@Data
public class EventMeshVerify implements Serializable {
+
@TableId(type = IdType.AUTO)
private Integer id;
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
index defec3f8ee..ea836ce7aa 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportJobRequestHandler.java
@@ -17,8 +17,6 @@
package org.apache.eventmesh.admin.server.web.handler.impl;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
import org.apache.eventmesh.admin.server.web.db.entity.EventMeshJobInfo;
import org.apache.eventmesh.admin.server.web.handler.BaseRequestHandler;
import org.apache.eventmesh.admin.server.web.service.job.JobInfoBizService;
@@ -26,9 +24,14 @@
import org.apache.eventmesh.common.remote.exception.ErrorCode;
import org.apache.eventmesh.common.remote.request.ReportJobRequest;
import org.apache.eventmesh.common.remote.response.SimpleResponse;
+
+import org.apache.commons.lang3.StringUtils;
+
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
+import lombok.extern.slf4j.Slf4j;
+
@Component
@Slf4j
public class ReportJobRequestHandler extends BaseRequestHandler {
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
index 78335d419a..7a30bef80a 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportPositionHandler.java
@@ -37,6 +37,7 @@
@Component
@Slf4j
public class ReportPositionHandler extends BaseRequestHandler {
+
@Autowired
private JobInfoBizService jobInfoBizService;
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportVerifyHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportVerifyHandler.java
index 99defbe7c4..9844f47c6a 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportVerifyHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/handler/impl/ReportVerifyHandler.java
@@ -26,18 +26,23 @@
import org.apache.eventmesh.common.remote.exception.ErrorCode;
import org.apache.eventmesh.common.remote.request.ReportVerifyRequest;
import org.apache.eventmesh.common.remote.response.SimpleResponse;
+
import org.apache.commons.lang3.StringUtils;
+
+import java.util.List;
+import java.util.Random;
+
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
-import lombok.extern.slf4j.Slf4j;
import org.springframework.web.client.RestTemplate;
-import java.util.List;
-import java.util.Random;
+
+import lombok.extern.slf4j.Slf4j;
@Component
@Slf4j
public class ReportVerifyHandler extends BaseRequestHandler {
+
@Autowired
private VerifyBizService verifyService;
@@ -49,7 +54,8 @@ public class ReportVerifyHandler extends BaseRequestHandler adminServerList = properties.getAdminServerList().get(fromRegion);
@@ -77,7 +83,8 @@ protected SimpleResponse handler(ReportVerifyRequest request, Metadata metadata)
RestTemplate restTemplate = new RestTemplate();
ResponseEntity response = restTemplate.postForEntity(targetUrl, request, String.class);
if (!response.getStatusCode().is2xxSuccessful()) {
- return SimpleResponse.fail(ErrorCode.INTERNAL_ERR, "save verify request fail,code:" + response.getStatusCode() + ",msg:" + response.getBody());
+ return SimpleResponse.fail(ErrorCode.INTERNAL_ERR,
+ "save verify request fail,code:" + response.getStatusCode() + ",msg:" + response.getBody());
}
return SimpleResponse.success();
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
index 70abececb1..a8b469d8b7 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
@@ -39,18 +39,24 @@
import org.apache.eventmesh.common.remote.exception.ErrorCode;
import org.apache.eventmesh.common.remote.request.CreateOrUpdateDataSourceReq;
import org.apache.eventmesh.common.utils.JsonUtils;
+
import org.apache.commons.lang3.StringUtils;
+
import java.time.Duration;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
+
+import javax.annotation.PostConstruct;
+
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
+
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
+
import lombok.extern.slf4j.Slf4j;
-import javax.annotation.PostConstruct;
/**
* for table 'event_mesh_job_info' db operation
@@ -166,7 +172,7 @@ public List createJobs(List jobs) {
int changed = jobInfoExtService.batchSave(entityList);
if (changed != jobs.size()) {
throw new AdminServerRuntimeException(ErrorCode.INTERNAL_ERR, String.format("create [%d] jobs of not match expect [%d]",
- changed, jobs.size()));
+ changed, jobs.size()));
}
return entityList;
}
@@ -200,7 +206,7 @@ public JobDetail getJobDetail(String jobID) {
detail.setSourceConnectorDesc(source.getDescription());
if (source.getDataType() != null) {
detail.setPositions(positionBizService.getPositionByJobID(job.getJobID(),
- DataSourceType.getDataSourceType(source.getDataType())));
+ DataSourceType.getDataSourceType(source.getDataType())));
}
}
@@ -251,7 +257,8 @@ public void checkJobInfo() {
// if last heart beat update time have delay three period.print job heart beat delay warn
long currentTimeStamp = System.currentTimeMillis();
if (currentTimeStamp - heartbeat.getUpdateTime().getTime() > 3 * heatBeatPeriod) {
- log.warn("current job heart heart has delay.jobID:{},currentTimeStamp:{},last update time:{}", jobID, currentTimeStamp, heartbeat.getUpdateTime());
+ log.warn("current job heart heart has delay.jobID:{},currentTimeStamp:{},last update time:{}", jobID, currentTimeStamp,
+ heartbeat.getUpdateTime());
}
}
}
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
index b0f89ec03d..b8d536f388 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/position/impl/HttpPositionHandler.java
@@ -17,7 +17,6 @@
package org.apache.eventmesh.admin.server.web.service.position.impl;
-import lombok.extern.slf4j.Slf4j;
import org.apache.eventmesh.admin.server.web.db.service.EventMeshPositionReporterHistoryService;
import org.apache.eventmesh.admin.server.web.service.position.PositionHandler;
import org.apache.eventmesh.common.protocol.grpc.adminserver.Metadata;
@@ -25,14 +24,19 @@
import org.apache.eventmesh.common.remote.offset.RecordPosition;
import org.apache.eventmesh.common.remote.request.FetchPositionRequest;
import org.apache.eventmesh.common.remote.request.ReportPositionRequest;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.stereotype.Component;
+
import java.util.ArrayList;
import java.util.List;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+import lombok.extern.slf4j.Slf4j;
+
@Component
@Slf4j
public class HttpPositionHandler extends PositionHandler {
+
@Autowired
EventMeshPositionReporterHistoryService historyService;
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
index 9d648e0a72..e4f08b30cc 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/verify/VerifyBizService.java
@@ -26,6 +26,7 @@
@Service
public class VerifyBizService {
+
@Autowired
private EventMeshVerifyService verifyService;
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
index 53d20f2ace..da9daffe9c 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/JobState.java
@@ -17,16 +17,17 @@
package org.apache.eventmesh.common.remote;
-import lombok.ToString;
-
import java.util.HashMap;
import java.util.Map;
+import lombok.ToString;
+
@ToString
public enum JobState {
INIT, RUNNING, COMPLETE, DELETE, FAIL;
private static final JobState[] STATES_NUM_INDEX = JobState.values();
private static final Map STATES_NAME_INDEX = new HashMap<>();
+
static {
for (JobState jobState : STATES_NUM_INDEX) {
STATES_NAME_INDEX.put(jobState.name(), jobState);
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/TransportType.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/TransportType.java
index 82e7bc021d..6b43598398 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/TransportType.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/TransportType.java
@@ -35,6 +35,7 @@ public enum TransportType {
HTTP_REDIS(DataSourceType.HTTP, DataSourceType.REDIS),
HTTP_ROCKETMQ(DataSourceType.HTTP, DataSourceType.ROCKETMQ),
REDIS_MQ(DataSourceType.REDIS, DataSourceType.ROCKETMQ),
+ HTTP_HTTP(DataSourceType.HTTP, DataSourceType.HTTP),
;
private static final Map INDEX_TYPES = new HashMap<>();
private static final TransportType[] TYPES = TransportType.values();
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
new file mode 100644
index 0000000000..9e7444459f
--- /dev/null
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.common.remote.request;
+
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+import lombok.ToString;
+import org.apache.eventmesh.common.remote.JobState;
+
+@Data
+@EqualsAndHashCode(callSuper = true)
+@ToString
+public class ReportJobRequest extends BaseRemoteRequest {
+
+ private String jobID;
+
+ private JobState state;
+
+ private String address;
+
+}
From 9ddeb877c41558276fda9021321611e6dc382b79 Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Tue, 20 Aug 2024 15:39:36 +0800
Subject: [PATCH 3/6] fix check style error
---
.../eventmesh/common/remote/request/ReportJobRequest.java | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
index 9e7444459f..aec33e4616 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/remote/request/ReportJobRequest.java
@@ -17,10 +17,11 @@
package org.apache.eventmesh.common.remote.request;
+import org.apache.eventmesh.common.remote.JobState;
+
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.ToString;
-import org.apache.eventmesh.common.remote.JobState;
@Data
@EqualsAndHashCode(callSuper = true)
From 39edcb03479dc72e29ad2c47182a892a51253f7f Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Wed, 21 Aug 2024 18:55:16 +0800
Subject: [PATCH 4/6] [ISSUE #5081] Enhancement update for connectors &
admin-server
---
.../web/service/job/JobInfoBizService.java | 3 +
.../common/config/ConfigService.java | 11 +-
.../connector/http}/HttpRetryConfig.java | 2 +-
.../connector/http}/HttpSinkConfig.java | 2 +-
.../connector/http}/HttpWebhookConfig.java | 2 +-
.../connector/http}/SinkConnectorConfig.java | 11 +-
.../connector/http/SourceConnectorConfig.java | 6 +-
.../connector/rdb/canal/CanalSinkConfig.java | 25 +-
.../rdb/canal/CanalSinkFullConfig.java | 2 +-
.../rdb/canal/CanalSinkIncrementConfig.java | 50 +
.../rdb/canal/CanalSourceConfig.java | 58 +-
.../rdb/canal/CanalSourceFullConfig.java | 2 +-
.../rdb/canal/CanalSourceIncrementConfig.java | 86 ++
.../eventmesh/common/utils/JsonUtils.java | 11 +
.../SqlBuilderLoadInterceptor.java | 16 +-
.../sink/connector/CanalSinkConnector.java | 778 +---------------
.../connector/CanalSinkFullConnector.java | 43 +-
.../CanalSinkIncrementConnector.java | 865 ++++++++++++++++++
.../connector/canal/source/EntryParser.java | 18 +-
.../source/connector/CanalFullProducer.java | 8 +-
.../connector/CanalSourceConnector.java | 319 +------
.../connector/CanalSourceFullConnector.java | 26 +-
.../CanalSourceIncrementConnector.java | 383 ++++++++
.../source/position/CanalFullPositionMgr.java | 4 +-
.../http/sink/HttpSinkConnector.java | 4 +-
.../http/sink/data/HttpConnectRecord.java | 40 +-
.../sink/handler/AbstractHttpSinkHandler.java | 4 +-
.../http/sink/handler/HttpSinkHandler.java | 10 +-
.../handler/impl/CommonHttpSinkHandler.java | 61 +-
.../impl/HttpSinkHandlerRetryWrapper.java | 10 +-
.../handler/impl/WebhookHttpSinkHandler.java | 13 +-
.../http/source/data/WebhookRequest.java | 4 +
.../source/protocol/impl/CommonProtocol.java | 31 +-
.../source/protocol/impl/GitHubProtocol.java | 2 +-
...esh.openconnect.api.ConnectorCreateService | 20 +
.../http/sink/HttpSinkConnectorTest.java | 4 +-
.../api/connector/SinkConnectorContext.java | 7 +
.../api/connector/SourceConnectorContext.java | 3 +
.../offsetmgmt/api/data/DefaultKeyValue.java | 5 +
eventmesh-runtime-v2/build.gradle | 1 +
.../runtime/connector/ConnectorRuntime.java | 101 +-
.../connector/ConnectorRuntimeConfig.java | 3 +
42 files changed, 1777 insertions(+), 1277 deletions(-)
rename {eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config => eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http}/HttpRetryConfig.java (95%)
rename {eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config => eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http}/HttpSinkConfig.java (94%)
rename {eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config => eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http}/HttpWebhookConfig.java (95%)
rename {eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config => eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http}/SinkConnectorConfig.java (84%)
create mode 100644 eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkIncrementConfig.java
create mode 100644 eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceIncrementConfig.java
create mode 100644 eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkIncrementConnector.java
create mode 100644 eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceIncrementConnector.java
create mode 100644 eventmesh-connectors/eventmesh-connector-http/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService
diff --git a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
index a8b469d8b7..76df629e69 100644
--- a/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
+++ b/eventmesh-admin-server/src/main/java/org/apache/eventmesh/admin/server/web/service/job/JobInfoBizService.java
@@ -37,6 +37,7 @@
import org.apache.eventmesh.common.remote.datasource.DataSource;
import org.apache.eventmesh.common.remote.datasource.DataSourceType;
import org.apache.eventmesh.common.remote.exception.ErrorCode;
+import org.apache.eventmesh.common.remote.job.JobType;
import org.apache.eventmesh.common.remote.request.CreateOrUpdateDataSourceReq;
import org.apache.eventmesh.common.utils.JsonUtils;
@@ -231,6 +232,8 @@ public JobDetail getJobDetail(String jobID) {
}
detail.setState(state);
detail.setTransportType(TransportType.getTransportType(job.getTransportType()));
+ detail.setJobType(JobType.fromIndex(job.getJobType()));
+ detail.setJobDesc(job.getJobDesc());
return detail;
}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/ConfigService.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/ConfigService.java
index 939c9d8d67..3f3f609a1f 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/ConfigService.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/ConfigService.java
@@ -131,7 +131,7 @@ public T getConfig(ConfigInfo configInfo) throws IOException {
} else {
filePath = path.startsWith(FILE_PATH_PREFIX) ? path.substring(FILE_PATH_PREFIX.length()) : this.configPath + path;
}
-
+ filePath = normalizeFilePath(filePath);
if (filePath.contains(".jar")) {
try (final InputStream inputStream = getClass().getResourceAsStream(Objects.requireNonNull(resourceUrl))) {
if (inputStream == null) {
@@ -152,6 +152,15 @@ public T getConfig(ConfigInfo configInfo) throws IOException {
return (T) object;
}
+ private String normalizeFilePath(String filePath) {
+ if (System.getProperty("os.name").toLowerCase().contains("win")) {
+ if (filePath.startsWith("/")) {
+ filePath = filePath.substring(1);
+ }
+ }
+ return filePath;
+ }
+
private void populateConfig(Object object, Class> clazz, Config config)
throws NoSuchFieldException, IOException, IllegalAccessException {
ConfigInfo configInfo = new ConfigInfo();
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpRetryConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpRetryConfig.java
similarity index 95%
rename from eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpRetryConfig.java
rename to eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpRetryConfig.java
index 08c3a323e7..319732a875 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpRetryConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpRetryConfig.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.eventmesh.connector.http.sink.config;
+package org.apache.eventmesh.common.config.connector.http;
import lombok.Data;
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpSinkConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpSinkConfig.java
similarity index 94%
rename from eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpSinkConfig.java
rename to eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpSinkConfig.java
index 5997b90b7d..3c429f3355 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpSinkConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpSinkConfig.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.eventmesh.connector.http.sink.config;
+package org.apache.eventmesh.common.config.connector.http;
import org.apache.eventmesh.common.config.connector.SinkConfig;
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpWebhookConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpWebhookConfig.java
similarity index 95%
rename from eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpWebhookConfig.java
rename to eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpWebhookConfig.java
index f15bac4568..96b9e09826 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/HttpWebhookConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/HttpWebhookConfig.java
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-package org.apache.eventmesh.connector.http.sink.config;
+package org.apache.eventmesh.common.config.connector.http;
import lombok.Data;
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/SinkConnectorConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SinkConnectorConfig.java
similarity index 84%
rename from eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/SinkConnectorConfig.java
rename to eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SinkConnectorConfig.java
index 9bb338cceb..ccebe5a998 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/config/SinkConnectorConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SinkConnectorConfig.java
@@ -15,9 +15,8 @@
* limitations under the License.
*/
-package org.apache.eventmesh.connector.http.sink.config;
+package org.apache.eventmesh.common.config.connector.http;
-import io.vertx.core.http.HttpClientOptions;
import lombok.Data;
@@ -29,19 +28,19 @@ public class SinkConnectorConfig {
private String[] urls;
// keepAlive, default true
- private boolean keepAlive = HttpClientOptions.DEFAULT_KEEP_ALIVE;
+ private boolean keepAlive = true;
// timeunit: ms, default 60000ms
- private int keepAliveTimeout = HttpClientOptions.DEFAULT_KEEP_ALIVE_TIMEOUT * 1000; // Keep units consistent
+ private int keepAliveTimeout = 60 * 1000; // Keep units consistent
// timeunit: ms, default 5000ms, recommended scope: 5000ms - 10000ms
private int connectionTimeout = 5000;
// timeunit: ms, default 5000ms
- private int idleTimeout;
+ private int idleTimeout = 5000;
// maximum number of HTTP/1 connections a client will pool, default 5
- private int maxConnectionPoolSize = HttpClientOptions.DEFAULT_MAX_POOL_SIZE;
+ private int maxConnectionPoolSize = 5;
// retry config
private HttpRetryConfig retryConfig = new HttpRetryConfig();
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SourceConnectorConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SourceConnectorConfig.java
index b7f075e6d3..58d910bf2d 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SourceConnectorConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/http/SourceConnectorConfig.java
@@ -27,7 +27,7 @@ public class SourceConnectorConfig {
private String connectorName;
- private String path;
+ private String path = "/";
private int port;
@@ -51,11 +51,11 @@ public class SourceConnectorConfig {
private int batchSize = 10;
// protocol, default CloudEvent
- private String protocol = "CloudEvent";
+ private String protocol = "Common";
// extra config, e.g. GitHub secret
private Map extraConfig = new HashMap<>();
// data consistency enabled, default true
- private boolean dataConsistencyEnabled = true;
+ private boolean dataConsistencyEnabled = false;
}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkConfig.java
index 026f33f4fc..c535c7f52a 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkConfig.java
@@ -18,7 +18,8 @@
package org.apache.eventmesh.common.config.connector.rdb.canal;
import org.apache.eventmesh.common.config.connector.SinkConfig;
-import org.apache.eventmesh.common.remote.job.SyncMode;
+
+import java.util.Map;
import lombok.Data;
import lombok.EqualsAndHashCode;
@@ -27,25 +28,7 @@
@EqualsAndHashCode(callSuper = true)
public class CanalSinkConfig extends SinkConfig {
- // batchSize
- private Integer batchSize = 50;
-
- // enable batch
- private Boolean useBatch = true;
-
- // sink thread size for single channel
- private Integer poolSize = 5;
-
- // sync mode: field/row
- private SyncMode syncMode;
-
- private boolean isGTIDMode = true;
-
- private boolean isMariaDB = true;
-
- // skip sink process exception
- private Boolean skipException = false;
-
- public SinkConnectorConfig sinkConnectorConfig;
+ // used to convert canal full/increment/check connector config
+ private Map sinkConfig;
}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkFullConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkFullConfig.java
index c2b881df6c..f1d78a65dc 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkFullConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkFullConfig.java
@@ -26,6 +26,6 @@
@Data
@EqualsAndHashCode(callSuper = true)
public class CanalSinkFullConfig extends SinkConfig {
- private SinkConnectorConfig sinkConfig;
+ private SinkConnectorConfig sinkConnectorConfig;
private String zeroDate;
}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkIncrementConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkIncrementConfig.java
new file mode 100644
index 0000000000..32112a769b
--- /dev/null
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSinkIncrementConfig.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.common.config.connector.rdb.canal;
+
+import org.apache.eventmesh.common.remote.job.SyncMode;
+
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+
+@Data
+@EqualsAndHashCode(callSuper = true)
+public class CanalSinkIncrementConfig extends CanalSinkConfig {
+
+ // batchSize
+ private Integer batchSize = 50;
+
+ // enable batch
+ private Boolean useBatch = true;
+
+ // sink thread size for single channel
+ private Integer poolSize = 5;
+
+ // sync mode: field/row
+ private SyncMode syncMode;
+
+ private boolean isGTIDMode = true;
+
+ private boolean isMariaDB = true;
+
+ // skip sink process exception
+ private Boolean skipException = false;
+
+ public SinkConnectorConfig sinkConnectorConfig;
+
+}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceConfig.java
index 8331d32cb7..db17fbe75d 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceConfig.java
@@ -18,11 +18,8 @@
package org.apache.eventmesh.common.config.connector.rdb.canal;
import org.apache.eventmesh.common.config.connector.SourceConfig;
-import org.apache.eventmesh.common.remote.job.SyncConsistency;
-import org.apache.eventmesh.common.remote.job.SyncMode;
-import org.apache.eventmesh.common.remote.offset.RecordPosition;
-import java.util.List;
+import java.util.Map;
import lombok.Data;
import lombok.EqualsAndHashCode;
@@ -31,56 +28,7 @@
@EqualsAndHashCode(callSuper = true)
public class CanalSourceConfig extends SourceConfig {
- private String destination;
+ // used to convert canal full/increment/check connector config
+ private Map sourceConfig;
- private Long canalInstanceId;
-
- private String desc;
-
- private boolean ddlSync = true;
-
- private boolean filterTableError = false;
-
- private Long slaveId;
-
- private Short clientId;
-
- private String serverUUID;
-
- private boolean isMariaDB = true;
-
- private boolean isGTIDMode = true;
-
- private Integer batchSize = 10000;
-
- private Long batchTimeout = -1L;
-
- private String tableFilter;
-
- private String fieldFilter;
-
- private List recordPositions;
-
- // ================================= channel parameter
- // ================================
-
- // enable remedy
- private Boolean enableRemedy = false;
-
- // sync mode: field/row
- private SyncMode syncMode;
-
- // sync consistency
- private SyncConsistency syncConsistency;
-
- // ================================= system parameter
- // ================================
-
- // Column name of the bidirectional synchronization mark
- private String needSyncMarkTableColumnName = "needSync";
-
- // Column value of the bidirectional synchronization mark
- private String needSyncMarkTableColumnValue = "needSync";
-
- private SourceConnectorConfig sourceConnectorConfig;
}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceFullConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceFullConfig.java
index a2ab8ba31d..15398b303a 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceFullConfig.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceFullConfig.java
@@ -28,7 +28,7 @@
@Data
@EqualsAndHashCode(callSuper = true)
public class CanalSourceFullConfig extends SourceConfig {
- private SourceConnectorConfig connectorConfig;
+ private SourceConnectorConfig sourceConnectorConfig;
private List startPosition;
private int parallel;
private int flushSize;
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceIncrementConfig.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceIncrementConfig.java
new file mode 100644
index 0000000000..94fe007b5f
--- /dev/null
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/config/connector/rdb/canal/CanalSourceIncrementConfig.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.common.config.connector.rdb.canal;
+
+import org.apache.eventmesh.common.remote.job.SyncConsistency;
+import org.apache.eventmesh.common.remote.job.SyncMode;
+import org.apache.eventmesh.common.remote.offset.RecordPosition;
+
+import java.util.List;
+
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+
+@Data
+@EqualsAndHashCode(callSuper = true)
+public class CanalSourceIncrementConfig extends CanalSourceConfig {
+
+ private String destination;
+
+ private Long canalInstanceId;
+
+ private String desc;
+
+ private boolean ddlSync = true;
+
+ private boolean filterTableError = false;
+
+ private Long slaveId;
+
+ private Short clientId;
+
+ private String serverUUID;
+
+ private boolean isMariaDB = true;
+
+ private boolean isGTIDMode = true;
+
+ private Integer batchSize = 10000;
+
+ private Long batchTimeout = -1L;
+
+ private String tableFilter;
+
+ private String fieldFilter;
+
+ private List recordPositions;
+
+ // ================================= channel parameter
+ // ================================
+
+ // enable remedy
+ private Boolean enableRemedy = false;
+
+ // sync mode: field/row
+ private SyncMode syncMode;
+
+ // sync consistency
+ private SyncConsistency syncConsistency;
+
+ // ================================= system parameter
+ // ================================
+
+ // Column name of the bidirectional synchronization mark
+ private String needSyncMarkTableColumnName = "needSync";
+
+ // Column value of the bidirectional synchronization mark
+ private String needSyncMarkTableColumnValue = "needSync";
+
+ private SourceConnectorConfig sourceConnectorConfig;
+
+}
diff --git a/eventmesh-common/src/main/java/org/apache/eventmesh/common/utils/JsonUtils.java b/eventmesh-common/src/main/java/org/apache/eventmesh/common/utils/JsonUtils.java
index 9e9cea304d..f2328541c4 100644
--- a/eventmesh-common/src/main/java/org/apache/eventmesh/common/utils/JsonUtils.java
+++ b/eventmesh-common/src/main/java/org/apache/eventmesh/common/utils/JsonUtils.java
@@ -58,6 +58,10 @@ public static T convertValue(Object fromValue, Class toValueType) {
return OBJECT_MAPPER.convertValue(fromValue, toValueType);
}
+ public static T convertValue(Object fromValue, TypeReference toValueTypeRef) {
+ return OBJECT_MAPPER.convertValue(fromValue, toValueTypeRef);
+ }
+
public static T mapToObject(Map map, Class beanClass) {
if (map == null) {
return null;
@@ -177,6 +181,13 @@ public static T parseTypeReferenceObject(String text, TypeReference typeR
}
}
+ public static T parseTypeReferenceObject(Object object, TypeReference typeReference) {
+ if (object == null) {
+ return null;
+ }
+ return convertValue(object, typeReference);
+ }
+
public static T parseTypeReferenceObject(byte[] text, TypeReference typeReference) {
try {
return OBJECT_MAPPER.readValue(text, typeReference);
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java
index 0ad07577f9..7d83bd4f3f 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java
@@ -17,7 +17,7 @@
package org.apache.eventmesh.connector.canal.interceptor;
-import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkIncrementConfig;
import org.apache.eventmesh.connector.canal.CanalConnectRecord;
import org.apache.eventmesh.connector.canal.dialect.DbDialect;
import org.apache.eventmesh.connector.canal.model.EventColumn;
@@ -40,7 +40,7 @@ public class SqlBuilderLoadInterceptor {
@Setter
private DbDialect dbDialect;
- public boolean before(CanalSinkConfig sinkConfig, CanalConnectRecord record) {
+ public boolean before(CanalSinkIncrementConfig sinkConfig, CanalConnectRecord record) {
// build sql
SqlTemplate sqlTemplate = dbDialect.getSqlTemplate();
EventType type = record.getEventType();
@@ -52,12 +52,12 @@ public boolean before(CanalSinkConfig sinkConfig, CanalConnectRecord record) {
if (type.isInsert()) {
sql = sqlTemplate.getMergeSql(schemaName,
- record.getTableName(),
- buildColumnNames(record.getKeys()),
- buildColumnNames(record.getColumns()),
- new String[] {},
- true,
- shardColumns);
+ record.getTableName(),
+ buildColumnNames(record.getKeys()),
+ buildColumnNames(record.getColumns()),
+ new String[] {},
+ true,
+ shardColumns);
} else if (type.isUpdate()) {
boolean existOldKeys = !CollectionUtils.isEmpty(record.getOldKeys());
boolean rowMode = sinkConfig.getSyncMode().isRow();
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java
index 49fb10dd35..b03df2dfff 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java
@@ -19,62 +19,14 @@
import org.apache.eventmesh.common.config.connector.Config;
import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig;
-import org.apache.eventmesh.connector.canal.CanalConnectRecord;
-import org.apache.eventmesh.connector.canal.DatabaseConnection;
-import org.apache.eventmesh.connector.canal.SqlUtils;
-import org.apache.eventmesh.connector.canal.dialect.DbDialect;
-import org.apache.eventmesh.connector.canal.dialect.MysqlDialect;
-import org.apache.eventmesh.connector.canal.interceptor.SqlBuilderLoadInterceptor;
-import org.apache.eventmesh.connector.canal.model.EventColumn;
-import org.apache.eventmesh.connector.canal.model.EventType;
-import org.apache.eventmesh.connector.canal.sink.DbLoadContext;
-import org.apache.eventmesh.connector.canal.sink.DbLoadData;
-import org.apache.eventmesh.connector.canal.sink.DbLoadData.TableLoadData;
-import org.apache.eventmesh.connector.canal.sink.DbLoadMerger;
-import org.apache.eventmesh.connector.canal.sink.GtidBatch;
-import org.apache.eventmesh.connector.canal.sink.GtidBatchManager;
-import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.common.remote.job.JobType;
import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
import org.apache.eventmesh.openconnect.api.connector.SinkConnectorContext;
import org.apache.eventmesh.openconnect.api.sink.Sink;
-import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendExceptionContext;
-import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendResult;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.commons.lang3.SerializationUtils;
-
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import org.springframework.dao.DataAccessException;
-import org.springframework.dao.DeadlockLoserDataAccessException;
-import org.springframework.jdbc.core.BatchPreparedStatementSetter;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.jdbc.core.PreparedStatementSetter;
-import org.springframework.jdbc.core.StatementCallback;
-import org.springframework.jdbc.core.StatementCreatorUtils;
-import org.springframework.jdbc.support.lob.DefaultLobHandler;
-import org.springframework.jdbc.support.lob.LobCreator;
-import org.springframework.transaction.support.TransactionCallback;
-import org.springframework.util.CollectionUtils;
-
-import com.alibaba.otter.canal.common.utils.NamedThreadFactory;
import lombok.extern.slf4j.Slf4j;
@@ -83,21 +35,7 @@ public class CanalSinkConnector implements Sink, ConnectorCreateService {
private CanalSinkConfig sinkConfig;
- private JdbcTemplate jdbcTemplate;
-
- private SqlBuilderLoadInterceptor interceptor;
-
- private DbDialect dbDialect;
-
- private ExecutorService executor;
-
- private ExecutorService gtidSingleExecutor;
-
- private int batchSize = 50;
-
- private boolean useBatch = true;
-
- private RdbTableMgr tableMgr;
+ private Sink sink;
@Override
public Class extends Config> configClass() {
@@ -114,77 +52,46 @@ public void init(Config config) throws Exception {
public void init(ConnectorContext connectorContext) throws Exception {
// init config for canal source connector
SinkConnectorContext sinkConnectorContext = (SinkConnectorContext) connectorContext;
- this.sinkConfig = (CanalSinkConfig) sinkConnectorContext.getSinkConfig();
- this.batchSize = sinkConfig.getBatchSize();
- this.useBatch = sinkConfig.getUseBatch();
- DatabaseConnection.sinkConfig = this.sinkConfig.getSinkConnectorConfig();
- DatabaseConnection.initSinkConnection();
- jdbcTemplate = new JdbcTemplate(DatabaseConnection.sinkDataSource);
- dbDialect = new MysqlDialect(jdbcTemplate, new DefaultLobHandler());
- interceptor = new SqlBuilderLoadInterceptor();
- interceptor.setDbDialect(dbDialect);
- tableMgr = new RdbTableMgr(sinkConfig.getSinkConnectorConfig(), DatabaseConnection.sinkDataSource);
- executor = new ThreadPoolExecutor(sinkConfig.getPoolSize(),
- sinkConfig.getPoolSize(),
- 0L,
- TimeUnit.MILLISECONDS,
- new ArrayBlockingQueue<>(sinkConfig.getPoolSize() * 4),
- new NamedThreadFactory("canalSink"),
- new ThreadPoolExecutor.CallerRunsPolicy());
- gtidSingleExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, "gtidSingleExecutor"));
+ if (sinkConnectorContext.getJobType().equals(JobType.FULL)) {
+ this.sink = new CanalSinkFullConnector();
+ } else if (sinkConnectorContext.getJobType().equals(JobType.INCREASE)) {
+ this.sink = new CanalSinkIncrementConnector();
+ } else if (sinkConnectorContext.getJobType().equals(JobType.CHECK)) {
+ this.sink = new CanalSinkCheckConnector();
+ } else {
+ throw new RuntimeException("unsupported job type " + sinkConnectorContext.getJobType());
+ }
+ this.sink.init(sinkConnectorContext);
}
@Override
public void start() throws Exception {
- tableMgr.start();
+ this.sink.start();
}
@Override
public void commit(ConnectRecord record) {
-
+ this.sink.commit(record);
}
@Override
public String name() {
- return this.sinkConfig.getSinkConnectorConfig().getConnectorName();
+ return this.sink.name();
}
@Override
public void onException(ConnectRecord record) {
-
+ this.sink.onException(record);
}
@Override
- public void stop() {
- executor.shutdown();
- gtidSingleExecutor.shutdown();
+ public void stop() throws Exception {
+ this.sink.stop();
}
@Override
public void put(List sinkRecords) {
- DbLoadContext context = new DbLoadContext();
- for (ConnectRecord connectRecord : sinkRecords) {
- List canalConnectRecordList = new ArrayList<>();
- // deep copy connectRecord data
- for (CanalConnectRecord record : (List) connectRecord.getData()) {
- canalConnectRecordList.add(SerializationUtils.clone(record));
- }
- canalConnectRecordList = filterRecord(canalConnectRecordList);
- if (isDdlDatas(canalConnectRecordList)) {
- doDdl(context, canalConnectRecordList, connectRecord);
- } else if (sinkConfig.isGTIDMode()) {
- doLoadWithGtid(context, sinkConfig, connectRecord);
- } else {
- canalConnectRecordList = DbLoadMerger.merge(canalConnectRecordList);
-
- DbLoadData loadData = new DbLoadData();
- doBefore(canalConnectRecordList, loadData);
-
- doLoad(context, sinkConfig, loadData, connectRecord);
-
- }
-
- }
+ this.sink.put(sinkRecords);
}
@Override
@@ -192,651 +99,4 @@ public Sink create() {
return new CanalSinkConnector();
}
- private boolean isDdlDatas(List canalConnectRecordList) {
- boolean result = false;
- for (CanalConnectRecord canalConnectRecord : canalConnectRecordList) {
- result |= canalConnectRecord.getEventType().isDdl();
- if (result && !canalConnectRecord.getEventType().isDdl()) {
- throw new RuntimeException("ddl/dml can't be in one batch, it's may be a bug , pls submit issues.");
- }
- }
- return result;
- }
-
- private List filterRecord(List canalConnectRecordList) {
- return canalConnectRecordList.stream()
- .filter(record -> tableMgr.getTable(record.getSchemaName(), record.getTableName()) != null)
- .collect(Collectors.toList());
- }
-
- private void doDdl(DbLoadContext context, List canalConnectRecordList, ConnectRecord connectRecord) {
- for (final CanalConnectRecord record : canalConnectRecordList) {
- try {
- Boolean result = jdbcTemplate.execute(new StatementCallback() {
-
- public Boolean doInStatement(Statement stmt) throws SQLException, DataAccessException {
- boolean result = true;
- if (StringUtils.isNotEmpty(record.getDdlSchemaName())) {
- result &= stmt.execute("use `" + record.getDdlSchemaName() + "`");
- }
- result &= stmt.execute(record.getSql());
- return result;
- }
- });
- if (Boolean.TRUE.equals(result)) {
- context.getProcessedRecords().add(record);
- } else {
- context.getFailedRecords().add(record);
- }
- } catch (Throwable e) {
- connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, e));
- throw new RuntimeException(e);
- }
- }
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
-
- private SendExceptionContext buildSendExceptionContext(ConnectRecord record, Throwable e) {
- SendExceptionContext sendExceptionContext = new SendExceptionContext();
- sendExceptionContext.setMessageId(record.getRecordId());
- sendExceptionContext.setCause(e);
- if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
- sendExceptionContext.setTopic(record.getExtension("topic"));
- }
- return sendExceptionContext;
- }
-
- private SendResult convertToSendResult(ConnectRecord record) {
- SendResult result = new SendResult();
- result.setMessageId(record.getRecordId());
- if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
- result.setTopic(record.getExtension("topic"));
- }
- return result;
- }
-
- private void doBefore(List canalConnectRecordList, final DbLoadData loadData) {
- for (final CanalConnectRecord record : canalConnectRecordList) {
- boolean filter = interceptor.before(sinkConfig, record);
- if (!filter) {
- loadData.merge(record);
- }
- }
- }
-
- private void doLoad(DbLoadContext context, CanalSinkConfig sinkConfig, DbLoadData loadData, ConnectRecord connectRecord) {
- List> batchDatas = new ArrayList<>();
- for (TableLoadData tableData : loadData.getTables()) {
- if (useBatch) {
- batchDatas.addAll(split(tableData.getDeleteDatas()));
- } else {
- for (CanalConnectRecord data : tableData.getDeleteDatas()) {
- batchDatas.add(Arrays.asList(data));
- }
- }
- }
-
- doTwoPhase(context, sinkConfig, batchDatas, true, connectRecord);
-
- batchDatas.clear();
-
- for (TableLoadData tableData : loadData.getTables()) {
- if (useBatch) {
- batchDatas.addAll(split(tableData.getInsertDatas()));
- batchDatas.addAll(split(tableData.getUpdateDatas()));
- } else {
- for (CanalConnectRecord data : tableData.getInsertDatas()) {
- batchDatas.add(Arrays.asList(data));
- }
- for (CanalConnectRecord data : tableData.getUpdateDatas()) {
- batchDatas.add(Arrays.asList(data));
- }
- }
- }
-
- doTwoPhase(context, sinkConfig, batchDatas, true, connectRecord);
-
- batchDatas.clear();
- }
-
- private void doLoadWithGtid(DbLoadContext context, CanalSinkConfig sinkConfig, ConnectRecord connectRecord) {
- int batchIndex = connectRecord.getExtension("batchIndex", Integer.class);
- int totalBatches = connectRecord.getExtension("totalBatches", Integer.class);
- List canalConnectRecordList = (List) connectRecord.getData();
- String gtid = canalConnectRecordList.get(0).getCurrentGtid();
- GtidBatchManager.addBatch(gtid, batchIndex, totalBatches, canalConnectRecordList);
- // check whether the batch is complete
- if (GtidBatchManager.isComplete(gtid)) {
- GtidBatch batch = GtidBatchManager.getGtidBatch(gtid);
- List> totalRows = batch.getBatches();
- List filteredRows = new ArrayList<>();
- for (List canalConnectRecords : totalRows) {
- canalConnectRecords = filterRecord(canalConnectRecords);
- if (!CollectionUtils.isEmpty(canalConnectRecords)) {
- for (final CanalConnectRecord record : canalConnectRecords) {
- boolean filter = interceptor.before(sinkConfig, record);
- filteredRows.add(record);
- }
- }
- }
- context.setGtid(gtid);
- Future result = gtidSingleExecutor.submit(new DbLoadWorker(context, filteredRows, dbDialect, false, sinkConfig));
- Exception ex = null;
- try {
- ex = result.get();
- if (ex == null) {
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- } catch (Exception e) {
- ex = e;
- }
- Boolean skipException = sinkConfig.getSkipException();
- if (skipException != null && skipException) {
- if (ex != null) {
- // do skip
- log.warn("skip exception will ack data : {} , caused by {}",
- filteredRows,
- ExceptionUtils.getFullStackTrace(ex));
- GtidBatchManager.removeGtidBatch(gtid);
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- } else {
- if (ex != null) {
- log.error("sink connector will shutdown by " + ex.getMessage(), ExceptionUtils.getFullStackTrace(ex));
- connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, ex));
- gtidSingleExecutor.shutdown();
- System.exit(1);
- } else {
- GtidBatchManager.removeGtidBatch(gtid);
- }
- }
- } else {
- log.info("Batch received, waiting for other batches.");
- // ack this record
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- }
-
- private List> split(List records) {
- List> result = new ArrayList<>();
- if (records == null || records.isEmpty()) {
- return result;
- } else {
- int[] bits = new int[records.size()];
- for (int i = 0; i < bits.length; i++) {
- while (i < bits.length && bits[i] == 1) {
- i++;
- }
-
- if (i >= bits.length) {
- break;
- }
-
- List batch = new ArrayList<>();
- bits[i] = 1;
- batch.add(records.get(i));
- for (int j = i + 1; j < bits.length && batch.size() < batchSize; j++) {
- if (bits[j] == 0 && canBatch(records.get(i), records.get(j))) {
- batch.add(records.get(j));
- bits[j] = 1;
- }
- }
- result.add(batch);
- }
-
- return result;
- }
- }
-
- private boolean canBatch(CanalConnectRecord source, CanalConnectRecord target) {
- return StringUtils.equals(source.getSchemaName(),
- target.getSchemaName())
- && StringUtils.equals(source.getTableName(), target.getTableName())
- && StringUtils.equals(source.getSql(), target.getSql());
- }
-
- private void doTwoPhase(DbLoadContext context, CanalSinkConfig sinkConfig, List> totalRows, boolean canBatch,
- ConnectRecord connectRecord) {
- List> results = new ArrayList<>();
- for (List rows : totalRows) {
- if (CollectionUtils.isEmpty(rows)) {
- continue;
- }
- results.add(executor.submit(new DbLoadWorker(context, rows, dbDialect, canBatch, sinkConfig)));
- }
-
- boolean partFailed = false;
- for (Future result : results) {
- Exception ex = null;
- try {
- ex = result.get();
- if (ex == null) {
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- } catch (Exception e) {
- ex = e;
- }
-
- if (ex != null) {
- log.warn("##load phase one failed!", ex);
- partFailed = true;
- }
- }
-
- if (partFailed) {
- List retryRecords = new ArrayList<>();
- for (List rows : totalRows) {
- retryRecords.addAll(rows);
- }
-
- context.getFailedRecords().clear();
-
- Boolean skipException = sinkConfig.getSkipException();
- if (skipException != null && skipException) {
- for (CanalConnectRecord retryRecord : retryRecords) {
- DbLoadWorker worker = new DbLoadWorker(context, Arrays.asList(retryRecord), dbDialect, false, sinkConfig);
- try {
- Exception ex = worker.call();
- if (ex != null) {
- // do skip
- log.warn("skip exception for data : {} , caused by {}",
- retryRecord,
- ExceptionUtils.getFullStackTrace(ex));
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- } catch (Exception ex) {
- // do skip
- log.warn("skip exception for data : {} , caused by {}",
- retryRecord,
- ExceptionUtils.getFullStackTrace(ex));
- connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
- }
- }
- } else {
- DbLoadWorker worker = new DbLoadWorker(context, retryRecords, dbDialect, false, sinkConfig);
- try {
- Exception ex = worker.call();
- if (ex != null) {
- throw ex;
- }
- } catch (Exception ex) {
- log.error("##load phase two failed!", ex);
- log.error("sink connector will shutdown by " + ex.getMessage(), ex);
- connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, ex));
- executor.shutdown();
- System.exit(1);
- }
- }
- }
- }
-
- enum ExecuteResult {
- SUCCESS, ERROR, RETRY
- }
-
- class DbLoadWorker implements Callable {
-
- private final DbLoadContext context;
- private final DbDialect dbDialect;
- private final List records;
- private final boolean canBatch;
-
- private final CanalSinkConfig sinkConfig;
-
- private final List allFailedRecords = new ArrayList<>();
- private final List allProcessedRecords = new ArrayList<>();
- private final List processedRecords = new ArrayList<>();
- private final List failedRecords = new ArrayList<>();
-
- public DbLoadWorker(DbLoadContext context, List records, DbDialect dbDialect, boolean canBatch,
- CanalSinkConfig sinkConfig) {
- this.context = context;
- this.records = records;
- this.canBatch = canBatch;
- this.dbDialect = dbDialect;
- this.sinkConfig = sinkConfig;
- }
-
- public Exception call() throws Exception {
- try {
- return doCall();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- private Exception doCall() {
- RuntimeException error = null;
- ExecuteResult exeResult = null;
-
- if (sinkConfig.isGTIDMode()) {
- int retryCount = 0;
- final List toExecuteRecords = new ArrayList<>();
- try {
- if (!CollectionUtils.isEmpty(failedRecords)) {
- // if failedRecords not empty, make it retry
- toExecuteRecords.addAll(failedRecords);
- } else {
- toExecuteRecords.addAll(records);
- // add to failed record first, maybe get lob or datasource error
- failedRecords.addAll(toExecuteRecords);
- }
- JdbcTemplate template = dbDialect.getJdbcTemplate();
- String sourceGtid = context.getGtid();
- if (StringUtils.isNotEmpty(sourceGtid) && !sinkConfig.isMariaDB()) {
- String setMySQLGtid = "SET @@session.gtid_next = '" + sourceGtid + "';";
- template.execute(setMySQLGtid);
- } else if (StringUtils.isNotEmpty(sourceGtid) && sinkConfig.isMariaDB()) {
- throw new RuntimeException("unsupport gtid mode for mariaDB");
- } else {
- log.error("gtid is empty in gtid mode");
- throw new RuntimeException("gtid is empty in gtid mode");
- }
-
- final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
- int affect = (Integer) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
- try {
- failedRecords.clear();
- processedRecords.clear();
- int affect1 = 0;
- for (CanalConnectRecord record : toExecuteRecords) {
- int affects = template.update(record.getSql(), new PreparedStatementSetter() {
- public void setValues(PreparedStatement ps) throws SQLException {
- doPreparedStatement(ps, dbDialect, lobCreator, record);
- }
- });
- affect1 = affect1 + affects;
- processStat(record, affects, false);
- }
- return affect1;
- } catch (Exception e) {
- // rollback
- status.setRollbackOnly();
- throw new RuntimeException("Failed to executed", e);
- } finally {
- lobCreator.close();
- }
- });
-
- // reset gtid
- if (sinkConfig.isMariaDB()) {
- throw new RuntimeException("unsupport gtid mode for mariaDB");
- } else {
- String resetMySQLGtid = "SET @@session.gtid_next = 'AUTOMATIC';";
- dbDialect.getJdbcTemplate().execute(resetMySQLGtid);
- }
-
- error = null;
- exeResult = ExecuteResult.SUCCESS;
- } catch (DeadlockLoserDataAccessException ex) {
- error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
- exeResult = ExecuteResult.RETRY;
- } catch (Throwable ex) {
- error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
- exeResult = ExecuteResult.ERROR;
- }
-
- if (ExecuteResult.SUCCESS == exeResult) {
- allFailedRecords.addAll(failedRecords);
- allProcessedRecords.addAll(processedRecords);
- failedRecords.clear();
- processedRecords.clear();
- } else if (ExecuteResult.RETRY == exeResult) {
- retryCount = retryCount + 1;
- processedRecords.clear();
- failedRecords.clear();
- failedRecords.addAll(toExecuteRecords);
- int retry = 3;
- if (retryCount >= retry) {
- processFailedDatas(toExecuteRecords.size());
- throw new RuntimeException(String.format("execute retry %s times failed", retryCount), error);
- } else {
- try {
- int retryWait = 3000;
- int wait = retryCount * retryWait;
- wait = Math.max(wait, retryWait);
- Thread.sleep(wait);
- } catch (InterruptedException ex) {
- Thread.interrupted();
- processFailedDatas(toExecuteRecords.size());
- throw new RuntimeException(ex);
- }
- }
- } else {
- processedRecords.clear();
- failedRecords.clear();
- failedRecords.addAll(toExecuteRecords);
- processFailedDatas(toExecuteRecords.size());
- throw error;
- }
- } else {
- int index = 0;
- while (index < records.size()) {
- final List toExecuteRecords = new ArrayList<>();
- if (useBatch && canBatch) {
- int end = Math.min(index + batchSize, records.size());
- toExecuteRecords.addAll(records.subList(index, end));
- index = end;
- } else {
- toExecuteRecords.add(records.get(index));
- index = index + 1;
- }
-
- int retryCount = 0;
- while (true) {
- try {
- if (!CollectionUtils.isEmpty(failedRecords)) {
- toExecuteRecords.clear();
- toExecuteRecords.addAll(failedRecords);
- } else {
- failedRecords.addAll(toExecuteRecords);
- }
-
- final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
- if (useBatch && canBatch) {
- JdbcTemplate template = dbDialect.getJdbcTemplate();
- final String sql = toExecuteRecords.get(0).getSql();
-
- int[] affects = new int[toExecuteRecords.size()];
-
- affects = (int[]) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
- try {
- failedRecords.clear();
- processedRecords.clear();
- int[] affects1 = template.batchUpdate(sql, new BatchPreparedStatementSetter() {
-
- public void setValues(PreparedStatement ps, int idx) throws SQLException {
- doPreparedStatement(ps, dbDialect, lobCreator, toExecuteRecords.get(idx));
- }
-
- public int getBatchSize() {
- return toExecuteRecords.size();
- }
- });
- return affects1;
- } catch (Exception e) {
- // rollback
- status.setRollbackOnly();
- throw new RuntimeException("Failed to execute batch with GTID", e);
- } finally {
- lobCreator.close();
- }
- });
-
- for (int i = 0; i < toExecuteRecords.size(); i++) {
- assert affects != null;
- processStat(toExecuteRecords.get(i), affects[i], true);
- }
- } else {
- final CanalConnectRecord record = toExecuteRecords.get(0);
- JdbcTemplate template = dbDialect.getJdbcTemplate();
- int affect = 0;
- affect = (Integer) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
- try {
- failedRecords.clear();
- processedRecords.clear();
- int affect1 = template.update(record.getSql(), new PreparedStatementSetter() {
-
- public void setValues(PreparedStatement ps) throws SQLException {
- doPreparedStatement(ps, dbDialect, lobCreator, record);
- }
- });
- return affect1;
- } catch (Exception e) {
- // rollback
- status.setRollbackOnly();
- throw new RuntimeException("Failed to executed", e);
- } finally {
- lobCreator.close();
- }
- });
- processStat(record, affect, false);
- }
-
- error = null;
- exeResult = ExecuteResult.SUCCESS;
- } catch (DeadlockLoserDataAccessException ex) {
- error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
- exeResult = ExecuteResult.RETRY;
- } catch (Throwable ex) {
- error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
- exeResult = ExecuteResult.ERROR;
- }
-
- if (ExecuteResult.SUCCESS == exeResult) {
- allFailedRecords.addAll(failedRecords);
- allProcessedRecords.addAll(processedRecords);
- failedRecords.clear();
- processedRecords.clear();
- break; // do next eventData
- } else if (ExecuteResult.RETRY == exeResult) {
- retryCount = retryCount + 1;
- processedRecords.clear();
- failedRecords.clear();
- failedRecords.addAll(toExecuteRecords);
- int retry = 3;
- if (retryCount >= retry) {
- processFailedDatas(index);
- throw new RuntimeException(String.format("execute retry %s times failed", retryCount), error);
- } else {
- try {
- int retryWait = 3000;
- int wait = retryCount * retryWait;
- wait = Math.max(wait, retryWait);
- Thread.sleep(wait);
- } catch (InterruptedException ex) {
- Thread.interrupted();
- processFailedDatas(index);
- throw new RuntimeException(ex);
- }
- }
- } else {
- processedRecords.clear();
- failedRecords.clear();
- failedRecords.addAll(toExecuteRecords);
- processFailedDatas(index);
- throw error;
- }
- }
- }
- }
-
- context.getFailedRecords().addAll(allFailedRecords);
- context.getProcessedRecords().addAll(allProcessedRecords);
- return null;
- }
-
- private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator,
- CanalConnectRecord record) throws SQLException {
- EventType type = record.getEventType();
- List columns = new ArrayList();
- if (type.isInsert()) {
- columns.addAll(record.getColumns());
- columns.addAll(record.getKeys());
- } else if (type.isDelete()) {
- columns.addAll(record.getKeys());
- } else if (type.isUpdate()) {
- boolean existOldKeys = !CollectionUtils.isEmpty(record.getOldKeys());
- columns.addAll(record.getUpdatedColumns());
- columns.addAll(record.getKeys());
- if (existOldKeys) {
- columns.addAll(record.getOldKeys());
- }
- }
-
- for (int i = 0; i < columns.size(); i++) {
- int paramIndex = i + 1;
- EventColumn column = columns.get(i);
- int sqlType = column.getColumnType();
-
- Object param = null;
- if (dbDialect instanceof MysqlDialect
- && (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE)) {
- param = column.getColumnValue();
- } else {
- param = SqlUtils.stringToSqlValue(column.getColumnValue(),
- sqlType,
- false,
- dbDialect.isEmptyStringNulled());
- }
-
- try {
- switch (sqlType) {
- case Types.CLOB:
- lobCreator.setClobAsString(ps, paramIndex, (String) param);
- break;
-
- case Types.BLOB:
- lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param);
- break;
- case Types.TIME:
- case Types.TIMESTAMP:
- case Types.DATE:
- if (dbDialect instanceof MysqlDialect) {
- ps.setObject(paramIndex, param);
- } else {
- StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
- }
- break;
- case Types.BIT:
- if (dbDialect instanceof MysqlDialect) {
- StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param);
- } else {
- StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
- }
- break;
- default:
- StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
- break;
- }
- } catch (SQLException ex) {
- log.error("## SetParam error , [pairId={}, sqltype={}, value={}]",
- record.getPairId(), sqlType, param);
- throw ex;
- }
- }
- }
-
- private void processStat(CanalConnectRecord record, int affect, boolean batch) {
- if (batch && (affect < 1 && affect != Statement.SUCCESS_NO_INFO)) {
- failedRecords.add(record);
- } else if (!batch && affect < 1) {
- failedRecords.add(record);
- } else {
- processedRecords.add(record);
- // this.processStat(record, context);
- }
- }
-
- private void processFailedDatas(int index) {
- allFailedRecords.addAll(failedRecords);
- context.getFailedRecords().addAll(allFailedRecords);
- for (; index < records.size(); index++) {
- context.getFailedRecords().add(records.get(index));
- }
- allProcessedRecords.addAll(processedRecords);
- context.getProcessedRecords().addAll(allProcessedRecords);
- }
- }
-
}
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkFullConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkFullConnector.java
index 2b4c9d7a94..4137123922 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkFullConnector.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkFullConnector.java
@@ -18,12 +18,14 @@
package org.apache.eventmesh.connector.canal.sink.connector;
import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig;
import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkFullConfig;
import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.Constants;
import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.MySQLColumnDef;
import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.MySQLTableDef;
import org.apache.eventmesh.common.exception.EventMeshException;
import org.apache.eventmesh.common.remote.offset.canal.CanalFullRecordOffset;
+import org.apache.eventmesh.common.utils.JsonUtils;
import org.apache.eventmesh.connector.canal.DatabaseConnection;
import org.apache.eventmesh.connector.canal.SqlUtils;
import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
@@ -31,7 +33,10 @@
import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
import org.apache.eventmesh.openconnect.api.connector.SinkConnectorContext;
import org.apache.eventmesh.openconnect.api.sink.Sink;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendExceptionContext;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendResult;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import org.apache.eventmesh.openconnect.util.ConfigUtil;
import org.apache.commons.lang3.StringUtils;
@@ -47,11 +52,13 @@
import java.util.concurrent.locks.LockSupport;
import com.alibaba.druid.pool.DruidPooledConnection;
+import com.fasterxml.jackson.core.type.TypeReference;
import lombok.extern.slf4j.Slf4j;
@Slf4j
public class CanalSinkFullConnector implements Sink, ConnectorCreateService {
+
private CanalSinkFullConfig config;
private RdbTableMgr tableMgr;
private final DateTimeFormatter dataTimePattern = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS");
@@ -84,19 +91,21 @@ public void init(Config config) throws Exception {
@Override
public void init(ConnectorContext connectorContext) throws Exception {
- this.config = (CanalSinkFullConfig) ((SinkConnectorContext) connectorContext).getSinkConfig();
+ SinkConnectorContext sinkConnectorContext = (SinkConnectorContext) connectorContext;
+ CanalSinkConfig canalSinkConfig = (CanalSinkConfig) sinkConnectorContext.getSinkConfig();
+ this.config = ConfigUtil.parse(canalSinkConfig.getSinkConfig(), CanalSinkFullConfig.class);
init();
}
private void init() {
- if (config.getSinkConfig() == null) {
+ if (config.getSinkConnectorConfig() == null) {
throw new EventMeshException(String.format("[%s] sink config is null", this.getClass()));
}
- DatabaseConnection.sinkConfig = this.config.getSinkConfig();
+ DatabaseConnection.sinkConfig = this.config.getSinkConnectorConfig();
DatabaseConnection.initSinkConnection();
DatabaseConnection.sinkDataSource.setDefaultAutoCommit(false);
- tableMgr = new RdbTableMgr(this.config.getSinkConfig(), DatabaseConnection.sinkDataSource);
+ tableMgr = new RdbTableMgr(this.config.getSinkConnectorConfig(), DatabaseConnection.sinkDataSource);
}
@Override
@@ -123,7 +132,9 @@ public void put(List sinkRecords) {
return;
}
ConnectRecord record = sinkRecords.get(0);
- List> data = (List>) record.getData();
+ List> data =
+ JsonUtils.parseTypeReferenceObject((byte[]) record.getData(), new TypeReference>>() {
+ });
if (data == null || data.isEmpty()) {
if (log.isDebugEnabled()) {
log.debug("[{}] got rows data is none", this.getClass());
@@ -159,13 +170,16 @@ public void put(List sinkRecords) {
}
statement.executeBatch();
connection.commit();
+ record.getCallback().onSuccess(convertToSendResult(record));
} catch (SQLException e) {
log.warn("full sink process schema [{}] table [{}] connector write fail", tableDefinition.getSchemaName(), tableDefinition.getTableName(),
e);
LockSupport.parkNanos(3000 * 1000L);
+ record.getCallback().onException(buildSendExceptionContext(record, e));
} catch (Exception e) {
log.error("full sink process schema [{}] table [{}] catch unknown exception", tableDefinition.getSchemaName(),
tableDefinition.getTableName(), e);
+ record.getCallback().onException(buildSendExceptionContext(record, e));
try {
if (connection != null && !connection.isClosed()) {
connection.rollback();
@@ -193,6 +207,25 @@ public void put(List sinkRecords) {
}
}
+ private SendExceptionContext buildSendExceptionContext(ConnectRecord record, Throwable e) {
+ SendExceptionContext sendExceptionContext = new SendExceptionContext();
+ sendExceptionContext.setMessageId(record.getRecordId());
+ sendExceptionContext.setCause(e);
+ if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
+ sendExceptionContext.setTopic(record.getExtension("topic"));
+ }
+ return sendExceptionContext;
+ }
+
+ private SendResult convertToSendResult(ConnectRecord record) {
+ SendResult result = new SendResult();
+ result.setMessageId(record.getRecordId());
+ if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
+ result.setTopic(record.getExtension("topic"));
+ }
+ return result;
+ }
+
private void setPrepareParams(PreparedStatement preparedStatement, Map col, List columnDefs) throws Exception {
for (int i = 0; i < columnDefs.size(); i++) {
writeColumn(preparedStatement, i + 1, columnDefs.get(i), col.get(columnDefs.get(i).getName()));
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkIncrementConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkIncrementConnector.java
new file mode 100644
index 0000000000..e165a5ffe6
--- /dev/null
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkIncrementConnector.java
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.connector.canal.sink.connector;
+
+import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkIncrementConfig;
+import org.apache.eventmesh.common.utils.JsonUtils;
+import org.apache.eventmesh.connector.canal.CanalConnectRecord;
+import org.apache.eventmesh.connector.canal.DatabaseConnection;
+import org.apache.eventmesh.connector.canal.SqlUtils;
+import org.apache.eventmesh.connector.canal.dialect.DbDialect;
+import org.apache.eventmesh.connector.canal.dialect.MysqlDialect;
+import org.apache.eventmesh.connector.canal.interceptor.SqlBuilderLoadInterceptor;
+import org.apache.eventmesh.connector.canal.model.EventColumn;
+import org.apache.eventmesh.connector.canal.model.EventType;
+import org.apache.eventmesh.connector.canal.sink.DbLoadContext;
+import org.apache.eventmesh.connector.canal.sink.DbLoadData;
+import org.apache.eventmesh.connector.canal.sink.DbLoadData.TableLoadData;
+import org.apache.eventmesh.connector.canal.sink.DbLoadMerger;
+import org.apache.eventmesh.connector.canal.sink.GtidBatch;
+import org.apache.eventmesh.connector.canal.sink.GtidBatchManager;
+import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
+import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
+import org.apache.eventmesh.openconnect.api.connector.SinkConnectorContext;
+import org.apache.eventmesh.openconnect.api.sink.Sink;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendExceptionContext;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.callback.SendResult;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import org.apache.eventmesh.openconnect.util.ConfigUtil;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.SerializationUtils;
+
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+import org.springframework.dao.DataAccessException;
+import org.springframework.dao.DeadlockLoserDataAccessException;
+import org.springframework.jdbc.core.BatchPreparedStatementSetter;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.PreparedStatementSetter;
+import org.springframework.jdbc.core.StatementCallback;
+import org.springframework.jdbc.core.StatementCreatorUtils;
+import org.springframework.jdbc.support.lob.DefaultLobHandler;
+import org.springframework.jdbc.support.lob.LobCreator;
+import org.springframework.transaction.support.TransactionCallback;
+import org.springframework.util.CollectionUtils;
+
+import com.alibaba.otter.canal.common.utils.NamedThreadFactory;
+import com.fasterxml.jackson.core.type.TypeReference;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class CanalSinkIncrementConnector implements Sink, ConnectorCreateService {
+
+ private CanalSinkIncrementConfig sinkConfig;
+
+ private JdbcTemplate jdbcTemplate;
+
+ private SqlBuilderLoadInterceptor interceptor;
+
+ private DbDialect dbDialect;
+
+ private ExecutorService executor;
+
+ private ExecutorService gtidSingleExecutor;
+
+ private int batchSize = 50;
+
+ private boolean useBatch = true;
+
+ private RdbTableMgr tableMgr;
+
+ @Override
+ public Class extends Config> configClass() {
+ return CanalSinkIncrementConfig.class;
+ }
+
+ @Override
+ public void init(Config config) throws Exception {
+ // init config for canal source connector
+ this.sinkConfig = (CanalSinkIncrementConfig) config;
+ }
+
+ @Override
+ public void init(ConnectorContext connectorContext) throws Exception {
+ // init config for canal source connector
+ SinkConnectorContext sinkConnectorContext = (SinkConnectorContext) connectorContext;
+ CanalSinkConfig canalSinkConfig = (CanalSinkConfig) sinkConnectorContext.getSinkConfig();
+ this.sinkConfig = ConfigUtil.parse(canalSinkConfig.getSinkConfig(), CanalSinkIncrementConfig.class);
+ this.batchSize = sinkConfig.getBatchSize();
+ this.useBatch = sinkConfig.getUseBatch();
+ DatabaseConnection.sinkConfig = this.sinkConfig.getSinkConnectorConfig();
+ DatabaseConnection.initSinkConnection();
+ jdbcTemplate = new JdbcTemplate(DatabaseConnection.sinkDataSource);
+ dbDialect = new MysqlDialect(jdbcTemplate, new DefaultLobHandler());
+ interceptor = new SqlBuilderLoadInterceptor();
+ interceptor.setDbDialect(dbDialect);
+ tableMgr = new RdbTableMgr(sinkConfig.getSinkConnectorConfig(), DatabaseConnection.sinkDataSource);
+ executor = new ThreadPoolExecutor(sinkConfig.getPoolSize(),
+ sinkConfig.getPoolSize(),
+ 0L,
+ TimeUnit.MILLISECONDS,
+ new ArrayBlockingQueue<>(sinkConfig.getPoolSize() * 4),
+ new NamedThreadFactory("canalSink"),
+ new ThreadPoolExecutor.CallerRunsPolicy());
+ gtidSingleExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, "gtidSingleExecutor"));
+ }
+
+ @Override
+ public void start() throws Exception {
+ tableMgr.start();
+ }
+
+ @Override
+ public void commit(ConnectRecord record) {
+
+ }
+
+ @Override
+ public String name() {
+ return this.sinkConfig.getSinkConnectorConfig().getConnectorName();
+ }
+
+ @Override
+ public void onException(ConnectRecord record) {
+
+ }
+
+ @Override
+ public void stop() {
+ executor.shutdown();
+ gtidSingleExecutor.shutdown();
+ }
+
+ @Override
+ public void put(List sinkRecords) {
+ DbLoadContext context = new DbLoadContext();
+ for (ConnectRecord connectRecord : sinkRecords) {
+ List canalConnectRecordList = new ArrayList<>();
+
+ List canalConnectRecords = convertToCanalConnectRecord(connectRecord);
+
+ // deep copy connectRecord data
+ for (CanalConnectRecord record : canalConnectRecords) {
+ canalConnectRecordList.add(SerializationUtils.clone(record));
+ }
+ canalConnectRecordList = filterRecord(canalConnectRecordList);
+ if (isDdlDatas(canalConnectRecordList)) {
+ doDdl(context, canalConnectRecordList, connectRecord);
+ } else if (sinkConfig.isGTIDMode()) {
+ doLoadWithGtid(context, sinkConfig, connectRecord);
+ } else {
+ canalConnectRecordList = DbLoadMerger.merge(canalConnectRecordList);
+
+ DbLoadData loadData = new DbLoadData();
+ doBefore(canalConnectRecordList, loadData);
+
+ doLoad(context, sinkConfig, loadData, connectRecord);
+
+ }
+
+ }
+ }
+
+ @Override
+ public Sink create() {
+ return new CanalSinkIncrementConnector();
+ }
+
+ private boolean isDdlDatas(List canalConnectRecordList) {
+ boolean result = false;
+ for (CanalConnectRecord canalConnectRecord : canalConnectRecordList) {
+ result |= canalConnectRecord.getEventType().isDdl();
+ if (result && !canalConnectRecord.getEventType().isDdl()) {
+ throw new RuntimeException("ddl/dml can't be in one batch, it's may be a bug , pls submit issues.");
+ }
+ }
+ return result;
+ }
+
+ private List filterRecord(List canalConnectRecordList) {
+ return canalConnectRecordList.stream()
+ .filter(record -> tableMgr.getTable(record.getSchemaName(), record.getTableName()) != null)
+ .collect(Collectors.toList());
+ }
+
+ private void doDdl(DbLoadContext context, List canalConnectRecordList, ConnectRecord connectRecord) {
+ for (final CanalConnectRecord record : canalConnectRecordList) {
+ try {
+ Boolean result = jdbcTemplate.execute(new StatementCallback() {
+
+ public Boolean doInStatement(Statement stmt) throws SQLException, DataAccessException {
+ boolean result = true;
+ if (StringUtils.isNotEmpty(record.getDdlSchemaName())) {
+ result &= stmt.execute("use `" + record.getDdlSchemaName() + "`");
+ }
+ result &= stmt.execute(record.getSql());
+ return result;
+ }
+ });
+ if (Boolean.TRUE.equals(result)) {
+ context.getProcessedRecords().add(record);
+ } else {
+ context.getFailedRecords().add(record);
+ }
+ } catch (Throwable e) {
+ connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, e));
+ throw new RuntimeException(e);
+ }
+ }
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+
+ private SendExceptionContext buildSendExceptionContext(ConnectRecord record, Throwable e) {
+ SendExceptionContext sendExceptionContext = new SendExceptionContext();
+ sendExceptionContext.setMessageId(record.getRecordId());
+ sendExceptionContext.setCause(e);
+ if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
+ sendExceptionContext.setTopic(record.getExtension("topic"));
+ }
+ return sendExceptionContext;
+ }
+
+ private SendResult convertToSendResult(ConnectRecord record) {
+ SendResult result = new SendResult();
+ result.setMessageId(record.getRecordId());
+ if (org.apache.commons.lang3.StringUtils.isNotEmpty(record.getExtension("topic"))) {
+ result.setTopic(record.getExtension("topic"));
+ }
+ return result;
+ }
+
+ private void doBefore(List canalConnectRecordList, final DbLoadData loadData) {
+ for (final CanalConnectRecord record : canalConnectRecordList) {
+ boolean filter = interceptor.before(sinkConfig, record);
+ if (!filter) {
+ loadData.merge(record);
+ }
+ }
+ }
+
+ private void doLoad(DbLoadContext context, CanalSinkIncrementConfig sinkConfig, DbLoadData loadData, ConnectRecord connectRecord) {
+ List> batchDatas = new ArrayList<>();
+ for (TableLoadData tableData : loadData.getTables()) {
+ if (useBatch) {
+ batchDatas.addAll(split(tableData.getDeleteDatas()));
+ } else {
+ for (CanalConnectRecord data : tableData.getDeleteDatas()) {
+ batchDatas.add(Arrays.asList(data));
+ }
+ }
+ }
+
+ doTwoPhase(context, sinkConfig, batchDatas, true, connectRecord);
+
+ batchDatas.clear();
+
+ for (TableLoadData tableData : loadData.getTables()) {
+ if (useBatch) {
+ batchDatas.addAll(split(tableData.getInsertDatas()));
+ batchDatas.addAll(split(tableData.getUpdateDatas()));
+ } else {
+ for (CanalConnectRecord data : tableData.getInsertDatas()) {
+ batchDatas.add(Arrays.asList(data));
+ }
+ for (CanalConnectRecord data : tableData.getUpdateDatas()) {
+ batchDatas.add(Arrays.asList(data));
+ }
+ }
+ }
+
+ doTwoPhase(context, sinkConfig, batchDatas, true, connectRecord);
+
+ batchDatas.clear();
+ }
+
+ private void doLoadWithGtid(DbLoadContext context, CanalSinkIncrementConfig sinkConfig, ConnectRecord connectRecord) {
+ int batchIndex = connectRecord.getExtension("batchIndex", Integer.class);
+ int totalBatches = connectRecord.getExtension("totalBatches", Integer.class);
+ List canalConnectRecordList = convertToCanalConnectRecord(connectRecord);
+
+ String gtid = canalConnectRecordList.get(0).getCurrentGtid();
+ GtidBatchManager.addBatch(gtid, batchIndex, totalBatches, canalConnectRecordList);
+ // check whether the batch is complete
+ if (GtidBatchManager.isComplete(gtid)) {
+ GtidBatch batch = GtidBatchManager.getGtidBatch(gtid);
+ List> totalRows = batch.getBatches();
+ List filteredRows = new ArrayList<>();
+ for (List canalConnectRecords : totalRows) {
+ canalConnectRecords = filterRecord(canalConnectRecords);
+ if (!CollectionUtils.isEmpty(canalConnectRecords)) {
+ for (final CanalConnectRecord record : canalConnectRecords) {
+ boolean filter = interceptor.before(sinkConfig, record);
+ filteredRows.add(record);
+ }
+ }
+ }
+ context.setGtid(gtid);
+ Future result = gtidSingleExecutor.submit(new DbLoadWorker(context, filteredRows, dbDialect, false, sinkConfig));
+ Exception ex = null;
+ try {
+ ex = result.get();
+ if (ex == null) {
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ } catch (Exception e) {
+ ex = e;
+ }
+ Boolean skipException = sinkConfig.getSkipException();
+ if (skipException != null && skipException) {
+ if (ex != null) {
+ // do skip
+ log.warn("skip exception will ack data : {} , caused by {}",
+ filteredRows,
+ ExceptionUtils.getFullStackTrace(ex));
+ GtidBatchManager.removeGtidBatch(gtid);
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ } else {
+ if (ex != null) {
+ log.error("sink connector will shutdown by " + ex.getMessage(), ExceptionUtils.getFullStackTrace(ex));
+ connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, ex));
+ gtidSingleExecutor.shutdown();
+ System.exit(1);
+ } else {
+ GtidBatchManager.removeGtidBatch(gtid);
+ }
+ }
+ } else {
+ log.info("Batch received, waiting for other batches.");
+ // ack this record
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ }
+
+ private List convertToCanalConnectRecord(ConnectRecord connectRecord) {
+ List canalConnectRecordList;
+ try {
+ canalConnectRecordList =
+ JsonUtils.parseTypeReferenceObject((byte[]) connectRecord.getData(), new TypeReference>() {
+ });
+ } catch (Exception e) {
+ log.error("Failed to parse the canalConnectRecords.", e);
+ connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, e));
+ throw new RuntimeException("Failed to parse the canalConnectRecords.", e);
+ }
+ return canalConnectRecordList;
+ }
+
+ private List> split(List records) {
+ List> result = new ArrayList<>();
+ if (records == null || records.isEmpty()) {
+ return result;
+ } else {
+ int[] bits = new int[records.size()];
+ for (int i = 0; i < bits.length; i++) {
+ while (i < bits.length && bits[i] == 1) {
+ i++;
+ }
+
+ if (i >= bits.length) {
+ break;
+ }
+
+ List batch = new ArrayList<>();
+ bits[i] = 1;
+ batch.add(records.get(i));
+ for (int j = i + 1; j < bits.length && batch.size() < batchSize; j++) {
+ if (bits[j] == 0 && canBatch(records.get(i), records.get(j))) {
+ batch.add(records.get(j));
+ bits[j] = 1;
+ }
+ }
+ result.add(batch);
+ }
+
+ return result;
+ }
+ }
+
+ private boolean canBatch(CanalConnectRecord source, CanalConnectRecord target) {
+ return StringUtils.equals(source.getSchemaName(),
+ target.getSchemaName())
+ && StringUtils.equals(source.getTableName(), target.getTableName())
+ && StringUtils.equals(source.getSql(), target.getSql());
+ }
+
+ private void doTwoPhase(DbLoadContext context, CanalSinkIncrementConfig sinkConfig, List> totalRows, boolean canBatch,
+ ConnectRecord connectRecord) {
+ List> results = new ArrayList<>();
+ for (List rows : totalRows) {
+ if (CollectionUtils.isEmpty(rows)) {
+ continue;
+ }
+ results.add(executor.submit(new DbLoadWorker(context, rows, dbDialect, canBatch, sinkConfig)));
+ }
+
+ boolean partFailed = false;
+ for (Future result : results) {
+ Exception ex = null;
+ try {
+ ex = result.get();
+ if (ex == null) {
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ } catch (Exception e) {
+ ex = e;
+ }
+
+ if (ex != null) {
+ log.warn("##load phase one failed!", ex);
+ partFailed = true;
+ }
+ }
+
+ if (partFailed) {
+ List retryRecords = new ArrayList<>();
+ for (List rows : totalRows) {
+ retryRecords.addAll(rows);
+ }
+
+ context.getFailedRecords().clear();
+
+ Boolean skipException = sinkConfig.getSkipException();
+ if (skipException != null && skipException) {
+ for (CanalConnectRecord retryRecord : retryRecords) {
+ DbLoadWorker worker = new DbLoadWorker(context, Arrays.asList(retryRecord), dbDialect, false, sinkConfig);
+ try {
+ Exception ex = worker.call();
+ if (ex != null) {
+ // do skip
+ log.warn("skip exception for data : {} , caused by {}",
+ retryRecord,
+ ExceptionUtils.getFullStackTrace(ex));
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ } catch (Exception ex) {
+ // do skip
+ log.warn("skip exception for data : {} , caused by {}",
+ retryRecord,
+ ExceptionUtils.getFullStackTrace(ex));
+ connectRecord.getCallback().onSuccess(convertToSendResult(connectRecord));
+ }
+ }
+ } else {
+ DbLoadWorker worker = new DbLoadWorker(context, retryRecords, dbDialect, false, sinkConfig);
+ try {
+ Exception ex = worker.call();
+ if (ex != null) {
+ throw ex;
+ }
+ } catch (Exception ex) {
+ log.error("##load phase two failed!", ex);
+ log.error("sink connector will shutdown by " + ex.getMessage(), ex);
+ connectRecord.getCallback().onException(buildSendExceptionContext(connectRecord, ex));
+ executor.shutdown();
+ System.exit(1);
+ }
+ }
+ }
+ }
+
+ enum ExecuteResult {
+ SUCCESS, ERROR, RETRY
+ }
+
+ class DbLoadWorker implements Callable {
+
+ private final DbLoadContext context;
+ private final DbDialect dbDialect;
+ private final List records;
+ private final boolean canBatch;
+
+ private final CanalSinkIncrementConfig sinkConfig;
+
+ private final List allFailedRecords = new ArrayList<>();
+ private final List allProcessedRecords = new ArrayList<>();
+ private final List processedRecords = new ArrayList<>();
+ private final List failedRecords = new ArrayList<>();
+
+ public DbLoadWorker(DbLoadContext context, List records, DbDialect dbDialect, boolean canBatch,
+ CanalSinkIncrementConfig sinkConfig) {
+ this.context = context;
+ this.records = records;
+ this.canBatch = canBatch;
+ this.dbDialect = dbDialect;
+ this.sinkConfig = sinkConfig;
+ }
+
+ public Exception call() throws Exception {
+ try {
+ return doCall();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private Exception doCall() {
+ RuntimeException error = null;
+ ExecuteResult exeResult = null;
+
+ if (sinkConfig.isGTIDMode()) {
+ int retryCount = 0;
+ final List toExecuteRecords = new ArrayList<>();
+ try {
+ if (!CollectionUtils.isEmpty(failedRecords)) {
+ // if failedRecords not empty, make it retry
+ toExecuteRecords.addAll(failedRecords);
+ } else {
+ toExecuteRecords.addAll(records);
+ // add to failed record first, maybe get lob or datasource error
+ failedRecords.addAll(toExecuteRecords);
+ }
+ JdbcTemplate template = dbDialect.getJdbcTemplate();
+ String sourceGtid = context.getGtid();
+ if (StringUtils.isNotEmpty(sourceGtid) && !sinkConfig.isMariaDB()) {
+ String setMySQLGtid = "SET @@session.gtid_next = '" + sourceGtid + "';";
+ template.execute(setMySQLGtid);
+ } else if (StringUtils.isNotEmpty(sourceGtid) && sinkConfig.isMariaDB()) {
+ throw new RuntimeException("unsupport gtid mode for mariaDB");
+ } else {
+ log.error("gtid is empty in gtid mode");
+ throw new RuntimeException("gtid is empty in gtid mode");
+ }
+
+ final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
+ int affect = (Integer) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
+ try {
+ failedRecords.clear();
+ processedRecords.clear();
+ int affect1 = 0;
+ for (CanalConnectRecord record : toExecuteRecords) {
+ int affects = template.update(record.getSql(), new PreparedStatementSetter() {
+ public void setValues(PreparedStatement ps) throws SQLException {
+ doPreparedStatement(ps, dbDialect, lobCreator, record);
+ }
+ });
+ affect1 = affect1 + affects;
+ processStat(record, affects, false);
+ }
+ return affect1;
+ } catch (Exception e) {
+ // rollback
+ status.setRollbackOnly();
+ throw new RuntimeException("Failed to executed", e);
+ } finally {
+ lobCreator.close();
+ }
+ });
+
+ // reset gtid
+ if (sinkConfig.isMariaDB()) {
+ throw new RuntimeException("unsupport gtid mode for mariaDB");
+ } else {
+ String resetMySQLGtid = "SET @@session.gtid_next = 'AUTOMATIC';";
+ dbDialect.getJdbcTemplate().execute(resetMySQLGtid);
+ }
+
+ error = null;
+ exeResult = ExecuteResult.SUCCESS;
+ } catch (DeadlockLoserDataAccessException ex) {
+ error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
+ exeResult = ExecuteResult.RETRY;
+ } catch (Throwable ex) {
+ error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
+ exeResult = ExecuteResult.ERROR;
+ }
+
+ if (ExecuteResult.SUCCESS == exeResult) {
+ allFailedRecords.addAll(failedRecords);
+ allProcessedRecords.addAll(processedRecords);
+ failedRecords.clear();
+ processedRecords.clear();
+ } else if (ExecuteResult.RETRY == exeResult) {
+ retryCount = retryCount + 1;
+ processedRecords.clear();
+ failedRecords.clear();
+ failedRecords.addAll(toExecuteRecords);
+ int retry = 3;
+ if (retryCount >= retry) {
+ processFailedDatas(toExecuteRecords.size());
+ throw new RuntimeException(String.format("execute retry %s times failed", retryCount), error);
+ } else {
+ try {
+ int retryWait = 3000;
+ int wait = retryCount * retryWait;
+ wait = Math.max(wait, retryWait);
+ Thread.sleep(wait);
+ } catch (InterruptedException ex) {
+ Thread.interrupted();
+ processFailedDatas(toExecuteRecords.size());
+ throw new RuntimeException(ex);
+ }
+ }
+ } else {
+ processedRecords.clear();
+ failedRecords.clear();
+ failedRecords.addAll(toExecuteRecords);
+ processFailedDatas(toExecuteRecords.size());
+ throw error;
+ }
+ } else {
+ int index = 0;
+ while (index < records.size()) {
+ final List toExecuteRecords = new ArrayList<>();
+ if (useBatch && canBatch) {
+ int end = Math.min(index + batchSize, records.size());
+ toExecuteRecords.addAll(records.subList(index, end));
+ index = end;
+ } else {
+ toExecuteRecords.add(records.get(index));
+ index = index + 1;
+ }
+
+ int retryCount = 0;
+ while (true) {
+ try {
+ if (!CollectionUtils.isEmpty(failedRecords)) {
+ toExecuteRecords.clear();
+ toExecuteRecords.addAll(failedRecords);
+ } else {
+ failedRecords.addAll(toExecuteRecords);
+ }
+
+ final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator();
+ if (useBatch && canBatch) {
+ JdbcTemplate template = dbDialect.getJdbcTemplate();
+ final String sql = toExecuteRecords.get(0).getSql();
+
+ int[] affects = new int[toExecuteRecords.size()];
+
+ affects = (int[]) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
+ try {
+ failedRecords.clear();
+ processedRecords.clear();
+ int[] affects1 = template.batchUpdate(sql, new BatchPreparedStatementSetter() {
+
+ public void setValues(PreparedStatement ps, int idx) throws SQLException {
+ doPreparedStatement(ps, dbDialect, lobCreator, toExecuteRecords.get(idx));
+ }
+
+ public int getBatchSize() {
+ return toExecuteRecords.size();
+ }
+ });
+ return affects1;
+ } catch (Exception e) {
+ // rollback
+ status.setRollbackOnly();
+ throw new RuntimeException("Failed to execute batch with GTID", e);
+ } finally {
+ lobCreator.close();
+ }
+ });
+
+ for (int i = 0; i < toExecuteRecords.size(); i++) {
+ assert affects != null;
+ processStat(toExecuteRecords.get(i), affects[i], true);
+ }
+ } else {
+ final CanalConnectRecord record = toExecuteRecords.get(0);
+ JdbcTemplate template = dbDialect.getJdbcTemplate();
+ int affect = 0;
+ affect = (Integer) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> {
+ try {
+ failedRecords.clear();
+ processedRecords.clear();
+ int affect1 = template.update(record.getSql(), new PreparedStatementSetter() {
+
+ public void setValues(PreparedStatement ps) throws SQLException {
+ doPreparedStatement(ps, dbDialect, lobCreator, record);
+ }
+ });
+ return affect1;
+ } catch (Exception e) {
+ // rollback
+ status.setRollbackOnly();
+ throw new RuntimeException("Failed to executed", e);
+ } finally {
+ lobCreator.close();
+ }
+ });
+ processStat(record, affect, false);
+ }
+
+ error = null;
+ exeResult = ExecuteResult.SUCCESS;
+ } catch (DeadlockLoserDataAccessException ex) {
+ error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
+ exeResult = ExecuteResult.RETRY;
+ } catch (Throwable ex) {
+ error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex));
+ exeResult = ExecuteResult.ERROR;
+ }
+
+ if (ExecuteResult.SUCCESS == exeResult) {
+ allFailedRecords.addAll(failedRecords);
+ allProcessedRecords.addAll(processedRecords);
+ failedRecords.clear();
+ processedRecords.clear();
+ break; // do next eventData
+ } else if (ExecuteResult.RETRY == exeResult) {
+ retryCount = retryCount + 1;
+ processedRecords.clear();
+ failedRecords.clear();
+ failedRecords.addAll(toExecuteRecords);
+ int retry = 3;
+ if (retryCount >= retry) {
+ processFailedDatas(index);
+ throw new RuntimeException(String.format("execute retry %s times failed", retryCount), error);
+ } else {
+ try {
+ int retryWait = 3000;
+ int wait = retryCount * retryWait;
+ wait = Math.max(wait, retryWait);
+ Thread.sleep(wait);
+ } catch (InterruptedException ex) {
+ Thread.interrupted();
+ processFailedDatas(index);
+ throw new RuntimeException(ex);
+ }
+ }
+ } else {
+ processedRecords.clear();
+ failedRecords.clear();
+ failedRecords.addAll(toExecuteRecords);
+ processFailedDatas(index);
+ throw error;
+ }
+ }
+ }
+ }
+
+ context.getFailedRecords().addAll(allFailedRecords);
+ context.getProcessedRecords().addAll(allProcessedRecords);
+ return null;
+ }
+
+ private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator,
+ CanalConnectRecord record) throws SQLException {
+ EventType type = record.getEventType();
+ List columns = new ArrayList();
+ if (type.isInsert()) {
+ columns.addAll(record.getColumns());
+ columns.addAll(record.getKeys());
+ } else if (type.isDelete()) {
+ columns.addAll(record.getKeys());
+ } else if (type.isUpdate()) {
+ boolean existOldKeys = !CollectionUtils.isEmpty(record.getOldKeys());
+ columns.addAll(record.getUpdatedColumns());
+ columns.addAll(record.getKeys());
+ if (existOldKeys) {
+ columns.addAll(record.getOldKeys());
+ }
+ }
+
+ for (int i = 0; i < columns.size(); i++) {
+ int paramIndex = i + 1;
+ EventColumn column = columns.get(i);
+ int sqlType = column.getColumnType();
+
+ Object param = null;
+ if (dbDialect instanceof MysqlDialect
+ && (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE)) {
+ param = column.getColumnValue();
+ } else {
+ param = SqlUtils.stringToSqlValue(column.getColumnValue(),
+ sqlType,
+ false,
+ dbDialect.isEmptyStringNulled());
+ }
+
+ try {
+ switch (sqlType) {
+ case Types.CLOB:
+ lobCreator.setClobAsString(ps, paramIndex, (String) param);
+ break;
+
+ case Types.BLOB:
+ lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param);
+ break;
+ case Types.TIME:
+ case Types.TIMESTAMP:
+ case Types.DATE:
+ if (dbDialect instanceof MysqlDialect) {
+ ps.setObject(paramIndex, param);
+ } else {
+ StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+ }
+ break;
+ case Types.BIT:
+ if (dbDialect instanceof MysqlDialect) {
+ StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param);
+ } else {
+ StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+ }
+ break;
+ default:
+ StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param);
+ break;
+ }
+ } catch (SQLException ex) {
+ log.error("## SetParam error , [pairId={}, sqltype={}, value={}]",
+ record.getPairId(), sqlType, param);
+ throw ex;
+ }
+ }
+ }
+
+ private void processStat(CanalConnectRecord record, int affect, boolean batch) {
+ if (batch && (affect < 1 && affect != Statement.SUCCESS_NO_INFO)) {
+ failedRecords.add(record);
+ } else if (!batch && affect < 1) {
+ failedRecords.add(record);
+ } else {
+ processedRecords.add(record);
+ // this.processStat(record, context);
+ }
+ }
+
+ private void processFailedDatas(int index) {
+ allFailedRecords.addAll(failedRecords);
+ context.getFailedRecords().addAll(allFailedRecords);
+ for (; index < records.size(); index++) {
+ context.getFailedRecords().add(records.get(index));
+ }
+ allProcessedRecords.addAll(processedRecords);
+ context.getProcessedRecords().addAll(allProcessedRecords);
+ }
+ }
+
+}
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java
index 75572a5faf..5a6ceb7c3f 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java
@@ -17,7 +17,7 @@
package org.apache.eventmesh.connector.canal.source;
-import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceIncrementConfig;
import org.apache.eventmesh.connector.canal.CanalConnectRecord;
import org.apache.eventmesh.connector.canal.model.EventColumn;
import org.apache.eventmesh.connector.canal.model.EventColumnIndexComparable;
@@ -48,7 +48,7 @@
@Slf4j
public class EntryParser {
- public static Map> parse(CanalSourceConfig sourceConfig, List datas,
+ public static Map> parse(CanalSourceIncrementConfig sourceConfig, List datas,
RdbTableMgr tables) {
List recordList = new ArrayList<>();
List transactionDataBuffer = new ArrayList<>();
@@ -90,12 +90,12 @@ public static Map> parse(CanalSourceConfig source
return recordMap;
}
- private static boolean checkGtidForEntry(Entry entry, CanalSourceConfig sourceConfig) {
+ private static boolean checkGtidForEntry(Entry entry, CanalSourceIncrementConfig sourceConfig) {
String currentGtid = entry.getHeader().getPropsList().get(0).getValue();
return currentGtid.contains(sourceConfig.getServerUUID());
}
- private static void parseRecordListWithEntryBuffer(CanalSourceConfig sourceConfig,
+ private static void parseRecordListWithEntryBuffer(CanalSourceIncrementConfig sourceConfig,
List recordList,
List transactionDataBuffer, RdbTableMgr tables) {
for (Entry bufferEntry : transactionDataBuffer) {
@@ -115,13 +115,13 @@ private static void parseRecordListWithEntryBuffer(CanalSourceConfig sourceConfi
}
}
- private static boolean checkNeedSync(CanalSourceConfig sourceConfig, RowChange rowChange) {
+ private static boolean checkNeedSync(CanalSourceIncrementConfig sourceConfig, RowChange rowChange) {
Column markedColumn = null;
CanalEntry.EventType eventType = rowChange.getEventType();
- if (eventType.equals(CanalEntry.EventType.DELETE) || eventType.equals(CanalEntry.EventType.UPDATE)) {
+ if (eventType.equals(CanalEntry.EventType.DELETE)) {
markedColumn = getColumnIgnoreCase(rowChange.getRowDatas(0).getBeforeColumnsList(),
sourceConfig.getNeedSyncMarkTableColumnName());
- } else if (eventType.equals(CanalEntry.EventType.INSERT)) {
+ } else if (eventType.equals(CanalEntry.EventType.INSERT) || eventType.equals(CanalEntry.EventType.UPDATE)) {
markedColumn = getColumnIgnoreCase(rowChange.getRowDatas(0).getAfterColumnsList(),
sourceConfig.getNeedSyncMarkTableColumnName());
}
@@ -141,7 +141,7 @@ private static Column getColumnIgnoreCase(List columns, String columName
return null;
}
- private static List internParse(CanalSourceConfig sourceConfig, Entry entry,
+ private static List internParse(CanalSourceIncrementConfig sourceConfig, Entry entry,
RdbTableMgr tableMgr) {
String schemaName = entry.getHeader().getSchemaName();
String tableName = entry.getHeader().getTableName();
@@ -180,7 +180,7 @@ private static List internParse(CanalSourceConfig sourceConf
return recordList;
}
- private static CanalConnectRecord internParse(CanalSourceConfig canalSourceConfig, Entry entry,
+ private static CanalConnectRecord internParse(CanalSourceIncrementConfig canalSourceConfig, Entry entry,
RowChange rowChange, RowData rowData) {
CanalConnectRecord canalConnectRecord = new CanalConnectRecord();
canalConnectRecord.setTableName(entry.getHeader().getTableName());
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalFullProducer.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalFullProducer.java
index 062bbb93a8..c0b2063d28 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalFullProducer.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalFullProducer.java
@@ -32,6 +32,7 @@
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
import java.math.BigDecimal;
+import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
@@ -83,6 +84,7 @@ public CanalFullProducer(BlockingQueue> queue, DataSource da
public void choosePrimaryKey() {
for (RdbColumnDefinition col : tableDefinition.getColumnDefinitions().values()) {
if (position.getCurPrimaryKeyCols().get(col.getName()) != null) {
+ // random choose the first primary key from the table
choosePrimaryKey.set(col.getName());
log.info("schema [{}] table [{}] choose primary key [{}]", tableDefinition.getSchemaName(), tableDefinition.getTableName(),
col.getName());
@@ -95,6 +97,7 @@ public void choosePrimaryKey() {
public void start(AtomicBoolean flag) {
choosePrimaryKey();
+ // used to page query
boolean isFirstSelect = true;
List> rows = new LinkedList<>();
while (flag.get()) {
@@ -120,6 +123,7 @@ public void start(AtomicBoolean flag) {
continue;
}
refreshPosition(lastCol);
+ // may be not reach
commitConnectRecord(rows);
rows = new LinkedList<>();
}
@@ -127,6 +131,7 @@ public void start(AtomicBoolean flag) {
if (lastCol == null || checkIsScanFinish(lastCol)) {
log.info("full scan db [{}] table [{}] finish", tableDefinition.getSchemaName(),
tableDefinition.getTableName());
+ // commit the last record if rows.size() < flushSize
commitConnectRecord(rows);
return;
}
@@ -164,7 +169,8 @@ private void commitConnectRecord(List> rows) throws Interrup
offset.setPosition(jobRdbFullPosition);
CanalFullRecordPartition partition = new CanalFullRecordPartition();
ArrayList records = new ArrayList<>();
- records.add(new ConnectRecord(partition, offset, System.currentTimeMillis(), rows));
+ byte[] rowsData = JsonUtils.toJSONString(rows).getBytes(StandardCharsets.UTF_8);
+ records.add(new ConnectRecord(partition, offset, System.currentTimeMillis(), rowsData));
queue.put(records);
}
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java
index ea5ccdeed0..e24301ae07 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java
@@ -19,53 +19,14 @@
import org.apache.eventmesh.common.config.connector.Config;
import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
-import org.apache.eventmesh.common.remote.offset.RecordPosition;
-import org.apache.eventmesh.common.remote.offset.canal.CanalRecordOffset;
-import org.apache.eventmesh.common.remote.offset.canal.CanalRecordPartition;
-import org.apache.eventmesh.common.utils.JsonUtils;
-import org.apache.eventmesh.connector.canal.CanalConnectRecord;
-import org.apache.eventmesh.connector.canal.DatabaseConnection;
-import org.apache.eventmesh.connector.canal.source.EntryParser;
-import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.common.remote.job.JobType;
import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
import org.apache.eventmesh.openconnect.api.connector.SourceConnectorContext;
import org.apache.eventmesh.openconnect.api.source.Source;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
-import org.apache.commons.lang3.StringUtils;
-
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.LockSupport;
-
-import com.alibaba.otter.canal.instance.core.CanalInstance;
-import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
-import com.alibaba.otter.canal.instance.manager.CanalInstanceWithManager;
-import com.alibaba.otter.canal.instance.manager.model.Canal;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.ClusterMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.RunMode;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
-import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
-import com.alibaba.otter.canal.parse.CanalEventParser;
-import com.alibaba.otter.canal.parse.ha.CanalHAController;
-import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
-import com.alibaba.otter.canal.protocol.CanalEntry;
-import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
-import com.alibaba.otter.canal.protocol.ClientIdentity;
-import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
import lombok.extern.slf4j.Slf4j;
@@ -74,19 +35,7 @@ public class CanalSourceConnector implements Source, ConnectorCreateService configClass() {
@@ -102,280 +51,48 @@ public void init(Config config) throws Exception {
@Override
public void init(ConnectorContext connectorContext) throws Exception {
SourceConnectorContext sourceConnectorContext = (SourceConnectorContext) connectorContext;
- this.sourceConfig = (CanalSourceConfig) sourceConnectorContext.getSourceConfig();
- if (sourceConnectorContext.getRecordPositionList() != null) {
- this.sourceConfig.setRecordPositions(sourceConnectorContext.getRecordPositionList());
- }
-
- if (StringUtils.isNotEmpty(sourceConfig.getTableFilter())) {
- tableFilter = sourceConfig.getTableFilter();
- }
- if (StringUtils.isNotEmpty(sourceConfig.getFieldFilter())) {
- fieldFilter = sourceConfig.getFieldFilter();
- }
-
- canalServer = CanalServerWithEmbedded.instance();
-
- canalServer.setCanalInstanceGenerator(new CanalInstanceGenerator() {
- @Override
- public CanalInstance generate(String destination) {
- Canal canal = buildCanal(sourceConfig);
-
- CanalInstanceWithManager instance = new CanalInstanceWithManager(canal, tableFilter) {
-
- protected CanalHAController initHaController() {
- return super.initHaController();
- }
-
- protected void startEventParserInternal(CanalEventParser parser, boolean isGroup) {
- super.startEventParserInternal(parser, isGroup);
-
- if (eventParser instanceof MysqlEventParser) {
- // set eventParser support type
- ((MysqlEventParser) eventParser).setSupportBinlogFormats("ROW");
- ((MysqlEventParser) eventParser).setSupportBinlogImages("FULL");
- MysqlEventParser mysqlEventParser = (MysqlEventParser) eventParser;
- mysqlEventParser.setParallel(false);
- if (StringUtils.isNotEmpty(fieldFilter)) {
- mysqlEventParser.setFieldFilter(fieldFilter);
- }
-
- CanalHAController haController = mysqlEventParser.getHaController();
- if (!haController.isStart()) {
- haController.start();
- }
- }
- }
- };
- return instance;
- }
- });
- DatabaseConnection.sourceConfig = sourceConfig.getSourceConnectorConfig();
- DatabaseConnection.initSourceConnection();
- tableMgr = new RdbTableMgr(sourceConfig.getSourceConnectorConfig(), DatabaseConnection.sourceDataSource);
- }
-
- private Canal buildCanal(CanalSourceConfig sourceConfig) {
- long slaveId = 10000;
- if (sourceConfig.getSlaveId() != null) {
- slaveId = sourceConfig.getSlaveId();
- }
-
- Canal canal = new Canal();
- canal.setId(sourceConfig.getCanalInstanceId());
- canal.setName(sourceConfig.getDestination());
- canal.setDesc(sourceConfig.getDesc());
-
- CanalParameter parameter = new CanalParameter();
-
- parameter.setRunMode(RunMode.EMBEDDED);
- parameter.setClusterMode(ClusterMode.STANDALONE);
- parameter.setMetaMode(MetaMode.MEMORY);
- parameter.setHaMode(HAMode.HEARTBEAT);
- parameter.setIndexMode(IndexMode.MEMORY);
- parameter.setStorageMode(StorageMode.MEMORY);
- parameter.setMemoryStorageBufferSize(32 * 1024);
-
- parameter.setSourcingType(SourcingType.MYSQL);
- parameter.setDbAddresses(Collections.singletonList(new InetSocketAddress(sourceConfig.getSourceConnectorConfig().getDbAddress(),
- sourceConfig.getSourceConnectorConfig().getDbPort())));
- parameter.setDbUsername(sourceConfig.getSourceConnectorConfig().getUserName());
- parameter.setDbPassword(sourceConfig.getSourceConnectorConfig().getPassWord());
-
- // set if enabled gtid mode
- parameter.setGtidEnable(sourceConfig.isGTIDMode());
-
- // check positions
- // example: Arrays.asList("{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}",
- // "{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}")
- if (sourceConfig.getRecordPositions() != null && !sourceConfig.getRecordPositions().isEmpty()) {
- List recordPositions = sourceConfig.getRecordPositions();
- List positions = new ArrayList<>();
- recordPositions.forEach(recordPosition -> {
- Map recordPositionMap = new HashMap<>();
- CanalRecordPartition canalRecordPartition = (CanalRecordPartition) (recordPosition.getRecordPartition());
- CanalRecordOffset canalRecordOffset = (CanalRecordOffset) (recordPosition.getRecordOffset());
- recordPositionMap.put("journalName", canalRecordPartition.getJournalName());
- recordPositionMap.put("timestamp", canalRecordPartition.getTimeStamp());
- recordPositionMap.put("position", canalRecordOffset.getOffset());
- // for mariaDB not support gtid mode
- if (sourceConfig.isGTIDMode() && !sourceConfig.isMariaDB()) {
- String gtidRange = canalRecordOffset.getGtid();
- if (gtidRange != null) {
- if (canalRecordOffset.getCurrentGtid() != null) {
- gtidRange = EntryParser.replaceGtidRange(canalRecordOffset.getGtid(), canalRecordOffset.getCurrentGtid(),
- sourceConfig.getServerUUID());
- }
- recordPositionMap.put("gtid", gtidRange);
- }
- }
- positions.add(JsonUtils.toJSONString(recordPositionMap));
- });
- parameter.setPositions(positions);
+ if (sourceConnectorContext.getJobType().equals(JobType.FULL)) {
+ this.source = new CanalSourceFullConnector();
+ } else if (sourceConnectorContext.getJobType().equals(JobType.INCREASE)) {
+ this.source = new CanalSourceIncrementConnector();
+ } else if (sourceConnectorContext.getJobType().equals(JobType.CHECK)) {
+ this.source = new CanalSourceCheckConnector();
+ } else {
+ throw new RuntimeException("unsupported job type " + sourceConnectorContext.getJobType());
}
-
- parameter.setSlaveId(slaveId);
-
- parameter.setDefaultConnectionTimeoutInSeconds(30);
- parameter.setConnectionCharset("UTF-8");
- parameter.setConnectionCharsetNumber((byte) 33);
- parameter.setReceiveBufferSize(8 * 1024);
- parameter.setSendBufferSize(8 * 1024);
-
- // heartbeat detect
- parameter.setDetectingEnable(false);
-
- parameter.setDdlIsolation(sourceConfig.isDdlSync());
- parameter.setFilterTableError(sourceConfig.isFilterTableError());
- parameter.setMemoryStorageRawEntry(false);
-
- canal.setCanalParameter(parameter);
- return canal;
+ this.source.init(sourceConnectorContext);
}
@Override
public void start() throws Exception {
- if (running) {
- return;
- }
- tableMgr.start();
- canalServer.start();
-
- canalServer.start(sourceConfig.getDestination());
- this.clientIdentity = new ClientIdentity(sourceConfig.getDestination(), sourceConfig.getClientId(), tableFilter);
- canalServer.subscribe(clientIdentity);
-
- running = true;
+ this.source.start();
}
@Override
public void commit(ConnectRecord record) {
- long batchId = Long.parseLong(record.getExtension("messageId"));
- int batchIndex = record.getExtension("batchIndex", Integer.class);
- int totalBatches = record.getExtension("totalBatches", Integer.class);
- if (batchIndex == totalBatches - 1) {
- log.debug("ack records batchIndex:{}, totalBatches:{}, batchId:{}",
- batchIndex, totalBatches, batchId);
- canalServer.ack(clientIdentity, batchId);
- }
+ this.source.commit(record);
}
@Override
public String name() {
- return this.sourceConfig.getSourceConnectorConfig().getConnectorName();
+ return this.source.name();
}
@Override
public void onException(ConnectRecord record) {
-
+ this.source.onException(record);
}
@Override
- public void stop() {
- if (!running) {
- return;
- }
- running = false;
- canalServer.stop(sourceConfig.getDestination());
- canalServer.stop();
+ public void stop() throws Exception {
+ this.source.stop();
}
@Override
public List poll() {
- int emptyTimes = 0;
- com.alibaba.otter.canal.protocol.Message message = null;
- if (sourceConfig.getBatchTimeout() < 0) {
- while (running) {
- message = canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize());
- if (message == null || message.getId() == -1L) { // empty
- applyWait(emptyTimes++);
- } else {
- break;
- }
- }
- } else { // perform with timeout
- while (running) {
- message =
- canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize(), sourceConfig.getBatchTimeout(), TimeUnit.MILLISECONDS);
- if (message == null || message.getId() == -1L) { // empty
- continue;
- }
- break;
- }
- }
-
- List entries;
- assert message != null;
- if (message.isRaw()) {
- entries = new ArrayList<>(message.getRawEntries().size());
- for (ByteString entry : message.getRawEntries()) {
- try {
- entries.add(CanalEntry.Entry.parseFrom(entry));
- } catch (InvalidProtocolBufferException e) {
- throw new RuntimeException(e);
- }
- }
- } else {
- entries = message.getEntries();
- }
-
- List result = new ArrayList<>();
- // key: Xid offset
- Map> connectorRecordMap = EntryParser.parse(sourceConfig, entries, tableMgr);
-
- if (!connectorRecordMap.isEmpty()) {
- Set>> entrySet = connectorRecordMap.entrySet();
- for (Map.Entry> entry : entrySet) {
- List connectRecordList = entry.getValue();
- CanalConnectRecord lastRecord = entry.getValue().get(connectRecordList.size() - 1);
- CanalRecordPartition canalRecordPartition = new CanalRecordPartition();
- canalRecordPartition.setServerUUID(sourceConfig.getServerUUID());
- canalRecordPartition.setJournalName(lastRecord.getJournalName());
- canalRecordPartition.setTimeStamp(lastRecord.getExecuteTime());
- // Xid offset with gtid
- Long binLogOffset = entry.getKey();
- CanalRecordOffset canalRecordOffset = new CanalRecordOffset();
- canalRecordOffset.setOffset(binLogOffset);
- if (StringUtils.isNotEmpty(lastRecord.getGtid()) && StringUtils.isNotEmpty(lastRecord.getCurrentGtid())) {
- canalRecordOffset.setGtid(lastRecord.getGtid());
- canalRecordOffset.setCurrentGtid(lastRecord.getCurrentGtid());
- }
-
- // split record list
- List> splitLists = new ArrayList<>();
- for (int i = 0; i < connectRecordList.size(); i += sourceConfig.getBatchSize()) {
- int end = Math.min(i + sourceConfig.getBatchSize(), connectRecordList.size());
- List subList = connectRecordList.subList(i, end);
- splitLists.add(subList);
- }
-
- for (int i = 0; i < splitLists.size(); i++) {
- ConnectRecord connectRecord = new ConnectRecord(canalRecordPartition, canalRecordOffset, System.currentTimeMillis());
- connectRecord.addExtension("messageId", String.valueOf(message.getId()));
- connectRecord.addExtension("batchIndex", i);
- connectRecord.addExtension("totalBatches", splitLists.size());
- connectRecord.setData(splitLists.get(i));
- result.add(connectRecord);
- }
- }
- } else {
- // for the message has been filtered need ack message
- canalServer.ack(clientIdentity, message.getId());
- }
-
- return result;
- }
-
- // Handle the situation of no data and avoid empty loop death
- private void applyWait(int emptyTimes) {
- int newEmptyTimes = Math.min(emptyTimes, maxEmptyTimes);
- if (emptyTimes <= 3) {
- Thread.yield();
- } else {
- LockSupport.parkNanos(1000 * 1000L * newEmptyTimes);
- }
+ return this.source.poll();
}
@Override
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceFullConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceFullConnector.java
index 97730463b5..c2632ee472 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceFullConnector.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceFullConnector.java
@@ -20,6 +20,7 @@
import org.apache.eventmesh.common.AbstractComponent;
import org.apache.eventmesh.common.EventMeshThreadFactory;
import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceFullConfig;
import org.apache.eventmesh.common.config.connector.rdb.canal.JobRdbFullPosition;
import org.apache.eventmesh.common.config.connector.rdb.canal.RdbDBDefinition;
@@ -32,11 +33,11 @@
import org.apache.eventmesh.connector.canal.source.position.TableFullPosition;
import org.apache.eventmesh.connector.canal.source.table.RdbSimpleTable;
import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
-import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
import org.apache.eventmesh.openconnect.api.connector.SourceConnectorContext;
import org.apache.eventmesh.openconnect.api.source.Source;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import org.apache.eventmesh.openconnect.util.ConfigUtil;
import java.util.LinkedList;
import java.util.List;
@@ -49,7 +50,8 @@
import lombok.extern.slf4j.Slf4j;
@Slf4j
-public class CanalSourceFullConnector extends AbstractComponent implements Source, ConnectorCreateService {
+public class CanalSourceFullConnector extends AbstractComponent implements Source {
+
private CanalSourceFullConfig config;
private CanalFullPositionMgr positionMgr;
private RdbTableMgr tableMgr;
@@ -62,14 +64,14 @@ protected void run() throws Exception {
this.tableMgr.start();
this.positionMgr.start();
if (positionMgr.isFinished()) {
- log.info("connector [{}] has finished the job", config.getConnectorConfig().getConnectorName());
+ log.info("connector [{}] has finished the job", config.getSourceConnectorConfig().getConnectorName());
return;
}
executor = new ThreadPoolExecutor(config.getParallel(), config.getParallel(), 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(), new EventMeshThreadFactory("canal-source-full"));
List producers = new LinkedList<>();
- if (config.getConnectorConfig().getDatabases() != null) {
- for (RdbDBDefinition db : config.getConnectorConfig().getDatabases()) {
+ if (config.getSourceConnectorConfig().getDatabases() != null) {
+ for (RdbDBDefinition db : config.getSourceConnectorConfig().getDatabases()) {
for (RdbTableDefinition table : db.getTables()) {
try {
log.info("it will create producer of db [{}] table [{}]", db.getSchemaName(), table.getTableName());
@@ -119,11 +121,6 @@ protected void shutdown() throws Exception {
}
}
- @Override
- public Source create() {
- return new CanalSourceFullConnector();
- }
-
@Override
public Class extends Config> configClass() {
return CanalSourceFullConfig.class;
@@ -136,16 +133,17 @@ public void init(Config config) throws Exception {
}
private void init() {
- DatabaseConnection.sourceConfig = this.config.getConnectorConfig();
+ DatabaseConnection.sourceConfig = this.config.getSourceConnectorConfig();
DatabaseConnection.initSourceConnection();
- this.tableMgr = new RdbTableMgr(config.getConnectorConfig(), DatabaseConnection.sourceDataSource);
+ this.tableMgr = new RdbTableMgr(config.getSourceConnectorConfig(), DatabaseConnection.sourceDataSource);
this.positionMgr = new CanalFullPositionMgr(config, tableMgr);
}
@Override
public void init(ConnectorContext connectorContext) throws Exception {
SourceConnectorContext sourceConnectorContext = (SourceConnectorContext) connectorContext;
- this.config = (CanalSourceFullConfig) sourceConnectorContext.getSourceConfig();
+ CanalSourceConfig canalSourceConfig = (CanalSourceConfig) sourceConnectorContext.getSourceConfig();
+ this.config = ConfigUtil.parse(canalSourceConfig.getSourceConfig(), CanalSourceFullConfig.class);
init();
}
@@ -156,7 +154,7 @@ public void commit(ConnectRecord record) {
@Override
public String name() {
- return this.config.getConnectorConfig().getConnectorName();
+ return this.config.getSourceConnectorConfig().getConnectorName();
}
@Override
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceIncrementConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceIncrementConnector.java
new file mode 100644
index 0000000000..4f7041b478
--- /dev/null
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceIncrementConnector.java
@@ -0,0 +1,383 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.connector.canal.source.connector;
+
+import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceIncrementConfig;
+import org.apache.eventmesh.common.remote.offset.RecordPosition;
+import org.apache.eventmesh.common.remote.offset.canal.CanalRecordOffset;
+import org.apache.eventmesh.common.remote.offset.canal.CanalRecordPartition;
+import org.apache.eventmesh.common.utils.JsonUtils;
+import org.apache.eventmesh.connector.canal.CanalConnectRecord;
+import org.apache.eventmesh.connector.canal.DatabaseConnection;
+import org.apache.eventmesh.connector.canal.source.EntryParser;
+import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
+import org.apache.eventmesh.openconnect.api.connector.SourceConnectorContext;
+import org.apache.eventmesh.openconnect.api.source.Source;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import org.apache.eventmesh.openconnect.util.ConfigUtil;
+
+import org.apache.commons.lang3.StringUtils;
+
+import java.net.InetSocketAddress;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.LockSupport;
+
+import com.alibaba.otter.canal.instance.core.CanalInstance;
+import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator;
+import com.alibaba.otter.canal.instance.manager.CanalInstanceWithManager;
+import com.alibaba.otter.canal.instance.manager.model.Canal;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.ClusterMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.RunMode;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType;
+import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode;
+import com.alibaba.otter.canal.parse.CanalEventParser;
+import com.alibaba.otter.canal.parse.ha.CanalHAController;
+import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser;
+import com.alibaba.otter.canal.protocol.CanalEntry.Entry;
+import com.alibaba.otter.canal.protocol.ClientIdentity;
+import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class CanalSourceIncrementConnector implements Source {
+
+ private CanalSourceIncrementConfig sourceConfig;
+
+ private CanalServerWithEmbedded canalServer;
+
+ private ClientIdentity clientIdentity;
+
+ private String tableFilter = null;
+
+ private String fieldFilter = null;
+
+ private volatile boolean running = false;
+
+ private static final int maxEmptyTimes = 10;
+
+ private RdbTableMgr tableMgr;
+
+ @Override
+ public Class extends Config> configClass() {
+ return CanalSourceConfig.class;
+ }
+
+ @Override
+ public void init(Config config) throws Exception {
+ // init config for canal source connector
+ this.sourceConfig = (CanalSourceIncrementConfig) config;
+ }
+
+ @Override
+ public void init(ConnectorContext connectorContext) throws Exception {
+ SourceConnectorContext sourceConnectorContext = (SourceConnectorContext) connectorContext;
+ CanalSourceConfig canalSourceConfig = (CanalSourceConfig) sourceConnectorContext.getSourceConfig();
+ this.sourceConfig = ConfigUtil.parse(canalSourceConfig.getSourceConfig(), CanalSourceIncrementConfig.class);
+ if (sourceConnectorContext.getRecordPositionList() != null) {
+ this.sourceConfig.setRecordPositions(sourceConnectorContext.getRecordPositionList());
+ }
+
+ if (StringUtils.isNotEmpty(sourceConfig.getTableFilter())) {
+ tableFilter = sourceConfig.getTableFilter();
+ }
+ if (StringUtils.isNotEmpty(sourceConfig.getFieldFilter())) {
+ fieldFilter = sourceConfig.getFieldFilter();
+ }
+
+ canalServer = CanalServerWithEmbedded.instance();
+
+ canalServer.setCanalInstanceGenerator(new CanalInstanceGenerator() {
+ @Override
+ public CanalInstance generate(String destination) {
+ Canal canal = buildCanal(sourceConfig);
+
+ CanalInstanceWithManager instance = new CanalInstanceWithManager(canal, tableFilter) {
+
+ protected CanalHAController initHaController() {
+ return super.initHaController();
+ }
+
+ protected void startEventParserInternal(CanalEventParser parser, boolean isGroup) {
+ super.startEventParserInternal(parser, isGroup);
+
+ if (eventParser instanceof MysqlEventParser) {
+ // set eventParser support type
+ ((MysqlEventParser) eventParser).setSupportBinlogFormats("ROW");
+ ((MysqlEventParser) eventParser).setSupportBinlogImages("FULL");
+ MysqlEventParser mysqlEventParser = (MysqlEventParser) eventParser;
+ mysqlEventParser.setParallel(false);
+ if (StringUtils.isNotEmpty(fieldFilter)) {
+ mysqlEventParser.setFieldFilter(fieldFilter);
+ }
+
+ CanalHAController haController = mysqlEventParser.getHaController();
+ if (!haController.isStart()) {
+ haController.start();
+ }
+ }
+ }
+ };
+ return instance;
+ }
+ });
+ DatabaseConnection.sourceConfig = sourceConfig.getSourceConnectorConfig();
+ DatabaseConnection.initSourceConnection();
+ tableMgr = new RdbTableMgr(sourceConfig.getSourceConnectorConfig(), DatabaseConnection.sourceDataSource);
+ }
+
+ private Canal buildCanal(CanalSourceIncrementConfig sourceConfig) {
+ long slaveId = 10000;
+ if (sourceConfig.getSlaveId() != null) {
+ slaveId = sourceConfig.getSlaveId();
+ }
+
+ Canal canal = new Canal();
+ canal.setId(sourceConfig.getCanalInstanceId());
+ canal.setName(sourceConfig.getDestination());
+ canal.setDesc(sourceConfig.getDesc());
+
+ CanalParameter parameter = new CanalParameter();
+
+ parameter.setRunMode(RunMode.EMBEDDED);
+ parameter.setClusterMode(ClusterMode.STANDALONE);
+ parameter.setMetaMode(MetaMode.MEMORY);
+ parameter.setHaMode(HAMode.HEARTBEAT);
+ parameter.setIndexMode(IndexMode.MEMORY);
+ parameter.setStorageMode(StorageMode.MEMORY);
+ parameter.setMemoryStorageBufferSize(32 * 1024);
+
+ parameter.setSourcingType(SourcingType.MYSQL);
+ parameter.setDbAddresses(Collections.singletonList(new InetSocketAddress(sourceConfig.getSourceConnectorConfig().getDbAddress(),
+ sourceConfig.getSourceConnectorConfig().getDbPort())));
+ parameter.setDbUsername(sourceConfig.getSourceConnectorConfig().getUserName());
+ parameter.setDbPassword(sourceConfig.getSourceConnectorConfig().getPassWord());
+
+ // set if enabled gtid mode
+ parameter.setGtidEnable(sourceConfig.isGTIDMode());
+
+ // check positions
+ // example: Arrays.asList("{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}",
+ // "{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}")
+ if (sourceConfig.getRecordPositions() != null && !sourceConfig.getRecordPositions().isEmpty()) {
+ List recordPositions = sourceConfig.getRecordPositions();
+ List positions = new ArrayList<>();
+ recordPositions.forEach(recordPosition -> {
+ Map recordPositionMap = new HashMap<>();
+ CanalRecordPartition canalRecordPartition = (CanalRecordPartition) (recordPosition.getRecordPartition());
+ CanalRecordOffset canalRecordOffset = (CanalRecordOffset) (recordPosition.getRecordOffset());
+ recordPositionMap.put("journalName", canalRecordPartition.getJournalName());
+ recordPositionMap.put("timestamp", canalRecordPartition.getTimeStamp());
+ recordPositionMap.put("position", canalRecordOffset.getOffset());
+ // for mariaDB not support gtid mode
+ if (sourceConfig.isGTIDMode() && !sourceConfig.isMariaDB()) {
+ String gtidRange = canalRecordOffset.getGtid();
+ if (gtidRange != null) {
+ if (canalRecordOffset.getCurrentGtid() != null) {
+ gtidRange = EntryParser.replaceGtidRange(canalRecordOffset.getGtid(), canalRecordOffset.getCurrentGtid(),
+ sourceConfig.getServerUUID());
+ }
+ recordPositionMap.put("gtid", gtidRange);
+ }
+ }
+ positions.add(JsonUtils.toJSONString(recordPositionMap));
+ });
+ parameter.setPositions(positions);
+ }
+
+ parameter.setSlaveId(slaveId);
+
+ parameter.setDefaultConnectionTimeoutInSeconds(30);
+ parameter.setConnectionCharset("UTF-8");
+ parameter.setConnectionCharsetNumber((byte) 33);
+ parameter.setReceiveBufferSize(8 * 1024);
+ parameter.setSendBufferSize(8 * 1024);
+
+ // heartbeat detect
+ parameter.setDetectingEnable(false);
+
+ parameter.setDdlIsolation(sourceConfig.isDdlSync());
+ parameter.setFilterTableError(sourceConfig.isFilterTableError());
+ parameter.setMemoryStorageRawEntry(false);
+
+ canal.setCanalParameter(parameter);
+ return canal;
+ }
+
+
+ @Override
+ public void start() throws Exception {
+ if (running) {
+ return;
+ }
+ tableMgr.start();
+ canalServer.start();
+
+ canalServer.start(sourceConfig.getDestination());
+ this.clientIdentity = new ClientIdentity(sourceConfig.getDestination(), sourceConfig.getClientId(), tableFilter);
+ canalServer.subscribe(clientIdentity);
+
+ running = true;
+ }
+
+
+ @Override
+ public void commit(ConnectRecord record) {
+ long batchId = Long.parseLong(record.getExtension("messageId"));
+ int batchIndex = record.getExtension("batchIndex", Integer.class);
+ int totalBatches = record.getExtension("totalBatches", Integer.class);
+ if (batchIndex == totalBatches - 1) {
+ log.debug("ack records batchIndex:{}, totalBatches:{}, batchId:{}",
+ batchIndex, totalBatches, batchId);
+ canalServer.ack(clientIdentity, batchId);
+ }
+ }
+
+ @Override
+ public String name() {
+ return this.sourceConfig.getSourceConnectorConfig().getConnectorName();
+ }
+
+ @Override
+ public void onException(ConnectRecord record) {
+
+ }
+
+ @Override
+ public void stop() {
+ if (!running) {
+ return;
+ }
+ running = false;
+ canalServer.stop(sourceConfig.getDestination());
+ canalServer.stop();
+ }
+
+ @Override
+ public List poll() {
+ int emptyTimes = 0;
+ com.alibaba.otter.canal.protocol.Message message = null;
+ if (sourceConfig.getBatchTimeout() < 0) {
+ while (running) {
+ message = canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize());
+ if (message == null || message.getId() == -1L) { // empty
+ applyWait(emptyTimes++);
+ } else {
+ break;
+ }
+ }
+ } else { // perform with timeout
+ while (running) {
+ message =
+ canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize(), sourceConfig.getBatchTimeout(), TimeUnit.MILLISECONDS);
+ if (message == null || message.getId() == -1L) { // empty
+ continue;
+ }
+ break;
+ }
+ }
+
+ List entries;
+ assert message != null;
+ if (message.isRaw()) {
+ entries = new ArrayList<>(message.getRawEntries().size());
+ for (ByteString entry : message.getRawEntries()) {
+ try {
+ entries.add(Entry.parseFrom(entry));
+ } catch (InvalidProtocolBufferException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ } else {
+ entries = message.getEntries();
+ }
+
+ List result = new ArrayList<>();
+ // key: Xid offset
+ Map> connectorRecordMap = EntryParser.parse(sourceConfig, entries, tableMgr);
+
+ if (!connectorRecordMap.isEmpty()) {
+ Set>> entrySet = connectorRecordMap.entrySet();
+ for (Map.Entry> entry : entrySet) {
+ List connectRecordList = entry.getValue();
+ CanalConnectRecord lastRecord = entry.getValue().get(connectRecordList.size() - 1);
+ CanalRecordPartition canalRecordPartition = new CanalRecordPartition();
+ canalRecordPartition.setServerUUID(sourceConfig.getServerUUID());
+ canalRecordPartition.setJournalName(lastRecord.getJournalName());
+ canalRecordPartition.setTimeStamp(lastRecord.getExecuteTime());
+ // Xid offset with gtid
+ Long binLogOffset = entry.getKey();
+ CanalRecordOffset canalRecordOffset = new CanalRecordOffset();
+ canalRecordOffset.setOffset(binLogOffset);
+ if (StringUtils.isNotEmpty(lastRecord.getGtid()) && StringUtils.isNotEmpty(lastRecord.getCurrentGtid())) {
+ canalRecordOffset.setGtid(lastRecord.getGtid());
+ canalRecordOffset.setCurrentGtid(lastRecord.getCurrentGtid());
+ }
+
+ // split record list
+ List> splitLists = new ArrayList<>();
+ for (int i = 0; i < connectRecordList.size(); i += sourceConfig.getBatchSize()) {
+ int end = Math.min(i + sourceConfig.getBatchSize(), connectRecordList.size());
+ List subList = connectRecordList.subList(i, end);
+ splitLists.add(subList);
+ }
+
+ for (int i = 0; i < splitLists.size(); i++) {
+ ConnectRecord connectRecord = new ConnectRecord(canalRecordPartition, canalRecordOffset, System.currentTimeMillis());
+ connectRecord.addExtension("messageId", String.valueOf(message.getId()));
+ connectRecord.addExtension("batchIndex", i);
+ connectRecord.addExtension("totalBatches", splitLists.size());
+ connectRecord.setData(JsonUtils.toJSONString(splitLists.get(i)).getBytes(StandardCharsets.UTF_8));
+ result.add(connectRecord);
+ }
+ }
+ } else {
+ // for the message has been filtered need ack message
+ canalServer.ack(clientIdentity, message.getId());
+ }
+
+ return result;
+ }
+
+ // Handle the situation of no data and avoid empty loop death
+ private void applyWait(int emptyTimes) {
+ int newEmptyTimes = Math.min(emptyTimes, maxEmptyTimes);
+ if (emptyTimes <= 3) {
+ Thread.yield();
+ } else {
+ LockSupport.parkNanos(1000 * 1000L * newEmptyTimes);
+ }
+ }
+
+}
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/position/CanalFullPositionMgr.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/position/CanalFullPositionMgr.java
index a9d47b4604..0ae1f8f8ff 100644
--- a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/position/CanalFullPositionMgr.java
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/position/CanalFullPositionMgr.java
@@ -60,7 +60,7 @@ public CanalFullPositionMgr(CanalSourceFullConfig config, RdbTableMgr tableMgr)
@Override
protected void run() throws Exception {
- if (config == null || config.getConnectorConfig() == null || config.getConnectorConfig().getDatabases() == null) {
+ if (config == null || config.getSourceConnectorConfig() == null || config.getSourceConnectorConfig().getDatabases() == null) {
log.info("config or database is null");
return;
}
@@ -93,7 +93,7 @@ public boolean isFinished() {
}
private void initPositions() {
- for (RdbDBDefinition database : config.getConnectorConfig().getDatabases()) {
+ for (RdbDBDefinition database : config.getSourceConnectorConfig().getDatabases()) {
for (RdbTableDefinition table : database.getTables()) {
try {
RdbSimpleTable simpleTable = new RdbSimpleTable(database.getSchemaName(), table.getTableName());
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnector.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnector.java
index 9b6038bdea..3df110f2e7 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnector.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnector.java
@@ -18,8 +18,8 @@
package org.apache.eventmesh.connector.http.sink;
import org.apache.eventmesh.common.config.connector.Config;
-import org.apache.eventmesh.connector.http.sink.config.HttpSinkConfig;
-import org.apache.eventmesh.connector.http.sink.config.SinkConnectorConfig;
+import org.apache.eventmesh.common.config.connector.http.HttpSinkConfig;
+import org.apache.eventmesh.common.config.connector.http.SinkConnectorConfig;
import org.apache.eventmesh.connector.http.sink.handler.HttpSinkHandler;
import org.apache.eventmesh.connector.http.sink.handler.impl.CommonHttpSinkHandler;
import org.apache.eventmesh.connector.http.sink.handler.impl.HttpSinkHandlerRetryWrapper;
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/data/HttpConnectRecord.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/data/HttpConnectRecord.java
index 95b40afe9e..9c8b1ce673 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/data/HttpConnectRecord.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/data/HttpConnectRecord.java
@@ -19,9 +19,11 @@
import org.apache.eventmesh.common.remote.offset.http.HttpRecordOffset;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.KeyValue;
import java.io.Serializable;
import java.time.LocalDateTime;
+import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
@@ -58,10 +60,9 @@ public class HttpConnectRecord implements Serializable {
*/
private String eventId;
- /**
- * The ConnectRecord to be sent
- */
- private ConnectRecord data;
+ private Object data;
+
+ private KeyValue extensions;
@Override
public String toString() {
@@ -71,6 +72,7 @@ public String toString() {
+ ", type='" + type
+ ", eventId='" + eventId
+ ", data=" + data
+ + ", extensions=" + extensions
+ '}';
}
@@ -83,16 +85,34 @@ public String toString() {
public static HttpConnectRecord convertConnectRecord(ConnectRecord record, String type) {
Map offsetMap = new HashMap<>();
if (record != null && record.getPosition() != null && record.getPosition().getRecordOffset() != null) {
- offsetMap = ((HttpRecordOffset) record.getPosition().getRecordOffset()).getOffsetMap();
+ if (HttpRecordOffset.class.equals(record.getPosition().getRecordOffsetClazz())) {
+ offsetMap = ((HttpRecordOffset) record.getPosition().getRecordOffset()).getOffsetMap();
+ }
}
String offset = "0";
if (!offsetMap.isEmpty()) {
offset = offsetMap.values().iterator().next().toString();
}
- return HttpConnectRecord.builder()
- .type(type)
- .eventId(type + "-" + offset)
- .data(record)
- .build();
+ if (record.getData() instanceof byte[]) {
+ String data = Base64.getEncoder().encodeToString((byte[]) record.getData());
+ record.addExtension("isBase64", true);
+ return HttpConnectRecord.builder()
+ .type(type)
+ .createTime(LocalDateTime.now())
+ .eventId(type + "-" + offset)
+ .data(data)
+ .extensions(record.getExtensions())
+ .build();
+ } else {
+ record.addExtension("isBase64", false);
+ return HttpConnectRecord.builder()
+ .type(type)
+ .createTime(LocalDateTime.now())
+ .eventId(type + "-" + offset)
+ .data(record.getData())
+ .extensions(record.getExtensions())
+ .build();
+ }
}
+
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/AbstractHttpSinkHandler.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/AbstractHttpSinkHandler.java
index 36d01115bb..5c868f4aa9 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/AbstractHttpSinkHandler.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/AbstractHttpSinkHandler.java
@@ -17,7 +17,7 @@
package org.apache.eventmesh.connector.http.sink.handler;
-import org.apache.eventmesh.connector.http.sink.config.SinkConnectorConfig;
+import org.apache.eventmesh.common.config.connector.http.SinkConnectorConfig;
import org.apache.eventmesh.connector.http.sink.data.HttpConnectRecord;
import org.apache.eventmesh.connector.http.sink.data.HttpRetryEvent;
import org.apache.eventmesh.connector.http.sink.data.MultiHttpRequestContext;
@@ -81,7 +81,7 @@ public void handle(ConnectRecord record) {
attributes.put(HttpRetryEvent.PREFIX + httpConnectRecord.getHttpRecordId(), retryEvent);
// deliver the record
- deliver(url, httpConnectRecord, attributes);
+ deliver(url, httpConnectRecord, attributes, record);
}
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/HttpSinkHandler.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/HttpSinkHandler.java
index 1731809ab9..d5a27940e5 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/HttpSinkHandler.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/HttpSinkHandler.java
@@ -33,14 +33,14 @@
*
* Any class that needs to process ConnectRecords via HTTP or HTTPS should implement this interface.
* Implementing classes must provide implementations for the {@link #start()}, {@link #handle(ConnectRecord)},
- * {@link #deliver(URI, HttpConnectRecord, Map)}, and {@link #stop()} methods.
+ * {@link #deliver(URI, HttpConnectRecord, Map, ConnectRecord)}, and {@link #stop()} methods.
*
* Implementing classes should ensure thread safety and handle HTTP/HTTPS communication efficiently.
* The {@link #start()} method initializes any necessary resources for HTTP/HTTPS communication. The {@link #handle(ConnectRecord)} method processes a
- * ConnectRecord by sending it over HTTP or HTTPS. The {@link #deliver(URI, HttpConnectRecord, Map)} method processes HttpConnectRecord on specified
- * URL while returning its own processing logic {@link #stop()} method releases any resources used for HTTP/HTTPS communication.
+ * ConnectRecord by sending it over HTTP or HTTPS. The {@link #deliver(URI, HttpConnectRecord, Map, ConnectRecord)} method processes HttpConnectRecord
+ * on specified URL while returning its own processing logic {@link #stop()} method releases any resources used for HTTP/HTTPS communication.
*
- * It's recommended to handle exceptions gracefully within the {@link #deliver(URI, HttpConnectRecord, Map)} method
+ *
It's recommended to handle exceptions gracefully within the {@link #deliver(URI, HttpConnectRecord, Map, ConnectRecord)} method
* to prevent message loss or processing interruptions.
*/
public interface HttpSinkHandler {
@@ -66,7 +66,7 @@ public interface HttpSinkHandler {
* @param attributes additional attributes to be used in processing
* @return processing chain
*/
- Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes);
+ Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes, ConnectRecord connectRecord);
/**
* Cleans up and releases resources used by the HTTP/HTTPS handler. This method should be called when the handler is no longer needed.
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/CommonHttpSinkHandler.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/CommonHttpSinkHandler.java
index 0907847455..e88707482f 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/CommonHttpSinkHandler.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/CommonHttpSinkHandler.java
@@ -17,8 +17,8 @@
package org.apache.eventmesh.connector.http.sink.handler.impl;
-import org.apache.eventmesh.common.remote.offset.http.HttpRecordOffset;
-import org.apache.eventmesh.connector.http.sink.config.SinkConnectorConfig;
+import org.apache.eventmesh.common.config.connector.http.SinkConnectorConfig;
+import org.apache.eventmesh.common.utils.JsonUtils;
import org.apache.eventmesh.connector.http.sink.data.HttpConnectRecord;
import org.apache.eventmesh.connector.http.sink.data.HttpRetryEvent;
import org.apache.eventmesh.connector.http.sink.data.MultiHttpRequestContext;
@@ -29,8 +29,11 @@
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
import java.net.URI;
+import java.time.ZoneId;
+import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
+import java.util.Set;
import java.util.concurrent.TimeUnit;
import io.netty.handler.codec.http.HttpHeaderNames;
@@ -104,22 +107,25 @@ private void doInitWebClient() {
* @return processing chain
*/
@Override
- public Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes) {
+ public Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes,
+ ConnectRecord connectRecord) {
// create headers
+ Map extensionMap = new HashMap<>();
+ Set extensionKeySet = httpConnectRecord.getExtensions().keySet();
+ for (String extensionKey : extensionKeySet) {
+ Object v = httpConnectRecord.getExtensions().getObject(extensionKey);
+ extensionMap.put(extensionKey, v);
+ }
+
MultiMap headers = HttpHeaders.headers()
.set(HttpHeaderNames.CONTENT_TYPE, "application/json; charset=utf-8")
- .set(HttpHeaderNames.ACCEPT, "application/json; charset=utf-8");
-
+ .set(HttpHeaderNames.ACCEPT, "application/json; charset=utf-8")
+ .set("extension", JsonUtils.toJSONString(extensionMap));
// get timestamp and offset
- Long timestamp = httpConnectRecord.getData().getTimestamp();
- Map offset = null;
- try {
- // May throw NullPointerException.
- offset = ((HttpRecordOffset) httpConnectRecord.getData().getPosition().getRecordOffset()).getOffsetMap();
- } catch (NullPointerException e) {
- // ignore null pointer exception
- }
- final Map finalOffset = offset;
+ Long timestamp = httpConnectRecord.getCreateTime()
+ .atZone(ZoneId.systemDefault())
+ .toInstant()
+ .toEpochMilli();
// send the request
return this.webClient.post(url.getPath())
@@ -127,40 +133,38 @@ public Future> deliver(URI url, HttpConnectRecord httpConne
.port(url.getPort() == -1 ? (Objects.equals(url.getScheme(), "https") ? 443 : 80) : url.getPort())
.putHeaders(headers)
.ssl(Objects.equals(url.getScheme(), "https"))
- .sendJson(httpConnectRecord)
+ .sendJson(httpConnectRecord.getData())
.onSuccess(res -> {
- log.info("Request sent successfully. Record: timestamp={}, offset={}", timestamp, finalOffset);
+ log.info("Request sent successfully. Record: timestamp={}", timestamp);
Exception e = null;
// log the response
if (HttpUtils.is2xxSuccessful(res.statusCode())) {
if (log.isDebugEnabled()) {
- log.debug("Received successful response: statusCode={}. Record: timestamp={}, offset={}, responseBody={}",
- res.statusCode(), timestamp, finalOffset, res.bodyAsString());
+ log.debug("Received successful response: statusCode={}. Record: timestamp={}, responseBody={}",
+ res.statusCode(), timestamp, res.bodyAsString());
} else {
- log.info("Received successful response: statusCode={}. Record: timestamp={}, offset={}", res.statusCode(), timestamp,
- finalOffset);
+ log.info("Received successful response: statusCode={}. Record: timestamp={}", res.statusCode(), timestamp);
}
} else {
if (log.isDebugEnabled()) {
- log.warn("Received non-2xx response: statusCode={}. Record: timestamp={}, offset={}, responseBody={}",
- res.statusCode(), timestamp, finalOffset, res.bodyAsString());
+ log.warn("Received non-2xx response: statusCode={}. Record: timestamp={}, responseBody={}",
+ res.statusCode(), timestamp, res.bodyAsString());
} else {
- log.warn("Received non-2xx response: statusCode={}. Record: timestamp={}, offset={}", res.statusCode(), timestamp,
- finalOffset);
+ log.warn("Received non-2xx response: statusCode={}. Record: timestamp={}", res.statusCode(), timestamp);
}
e = new RuntimeException("Unexpected HTTP response code: " + res.statusCode());
}
// try callback
- tryCallback(httpConnectRecord, e, attributes);
+ tryCallback(httpConnectRecord, e, attributes, connectRecord);
}).onFailure(err -> {
- log.error("Request failed to send. Record: timestamp={}, offset={}", timestamp, finalOffset, err);
+ log.error("Request failed to send. Record: timestamp={}", timestamp, err);
// try callback
- tryCallback(httpConnectRecord, err, attributes);
+ tryCallback(httpConnectRecord, err, attributes, connectRecord);
});
}
@@ -171,7 +175,7 @@ public Future> deliver(URI url, HttpConnectRecord httpConne
* @param e the exception thrown during the request, may be null
* @param attributes additional attributes to be used in processing
*/
- private void tryCallback(HttpConnectRecord httpConnectRecord, Throwable e, Map attributes) {
+ private void tryCallback(HttpConnectRecord httpConnectRecord, Throwable e, Map attributes, ConnectRecord record) {
// get the retry event
HttpRetryEvent retryEvent = getAndUpdateRetryEvent(attributes, httpConnectRecord, e);
@@ -180,7 +184,6 @@ private void tryCallback(HttpConnectRecord httpConnectRecord, Throwable e, Map> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes) {
+ public Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes,
+ ConnectRecord connectRecord) {
// Build the retry policy
RetryPolicy> retryPolicy = RetryPolicy.>builder()
@@ -104,7 +106,7 @@ public Future> deliver(URI url, HttpConnectRecord httpConne
// Handle the ConnectRecord with retry policy
Failsafe.with(retryPolicy)
- .getStageAsync(() -> sinkHandler.deliver(url, httpConnectRecord, attributes).toCompletionStage());
+ .getStageAsync(() -> sinkHandler.deliver(url, httpConnectRecord, attributes, connectRecord).toCompletionStage());
return null;
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/WebhookHttpSinkHandler.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/WebhookHttpSinkHandler.java
index ff8f69d45a..7edd84a967 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/WebhookHttpSinkHandler.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/sink/handler/impl/WebhookHttpSinkHandler.java
@@ -17,15 +17,16 @@
package org.apache.eventmesh.connector.http.sink.handler.impl;
+import org.apache.eventmesh.common.config.connector.http.HttpWebhookConfig;
+import org.apache.eventmesh.common.config.connector.http.SinkConnectorConfig;
import org.apache.eventmesh.common.exception.EventMeshException;
import org.apache.eventmesh.connector.http.common.SynchronizedCircularFifoQueue;
-import org.apache.eventmesh.connector.http.sink.config.HttpWebhookConfig;
-import org.apache.eventmesh.connector.http.sink.config.SinkConnectorConfig;
import org.apache.eventmesh.connector.http.sink.data.HttpConnectRecord;
import org.apache.eventmesh.connector.http.sink.data.HttpExportMetadata;
import org.apache.eventmesh.connector.http.sink.data.HttpExportRecord;
import org.apache.eventmesh.connector.http.sink.data.HttpExportRecordPage;
import org.apache.eventmesh.connector.http.sink.data.HttpRetryEvent;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
import org.apache.commons.lang3.StringUtils;
@@ -209,9 +210,10 @@ public void start() {
* @return processing chain
*/
@Override
- public Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes) {
+ public Future> deliver(URI url, HttpConnectRecord httpConnectRecord, Map attributes,
+ ConnectRecord connectRecord) {
// send the request
- Future> responseFuture = super.deliver(url, httpConnectRecord, attributes);
+ Future> responseFuture = super.deliver(url, httpConnectRecord, attributes, connectRecord);
// store the received data
return responseFuture.onComplete(arr -> {
// get tryEvent from attributes
@@ -260,8 +262,7 @@ private HttpExportMetadata buildHttpExportMetadata(URI url, HttpResponse
.code(response != null ? response.statusCode() : -1)
.message(msg)
.receivedTime(LocalDateTime.now())
- .httpRecordId(httpConnectRecord.getHttpRecordId())
- .recordId(httpConnectRecord.getData().getRecordId())
+ .recordId(httpConnectRecord.getHttpRecordId())
.retryNum(retryEvent.getCurrentRetries())
.build();
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/data/WebhookRequest.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/data/WebhookRequest.java
index 2fe7399da2..9e1dcb7b4c 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/data/WebhookRequest.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/data/WebhookRequest.java
@@ -20,6 +20,8 @@
import java.io.Serializable;
import java.util.Map;
+import io.vertx.ext.web.RoutingContext;
+
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
@@ -42,4 +44,6 @@ public class WebhookRequest implements Serializable {
private Object payload;
+ private RoutingContext routingContext;
+
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/CommonProtocol.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/CommonProtocol.java
index 738f045237..0761170ac0 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/CommonProtocol.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/CommonProtocol.java
@@ -19,21 +19,23 @@
import org.apache.eventmesh.common.Constants;
import org.apache.eventmesh.common.config.connector.http.SourceConnectorConfig;
+import org.apache.eventmesh.common.utils.JsonUtils;
import org.apache.eventmesh.connector.http.common.SynchronizedCircularFifoQueue;
import org.apache.eventmesh.connector.http.source.data.CommonResponse;
import org.apache.eventmesh.connector.http.source.data.WebhookRequest;
import org.apache.eventmesh.connector.http.source.protocol.Protocol;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+import java.util.Base64;
import java.util.Map;
import java.util.stream.Collectors;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.http.HttpMethod;
+import io.vertx.core.json.JsonObject;
import io.vertx.ext.web.Route;
import io.vertx.ext.web.handler.BodyHandler;
-
import lombok.extern.slf4j.Slf4j;
/**
@@ -69,12 +71,13 @@ public void setHandler(Route route, SynchronizedCircularFifoQueue queue)
.handler(BodyHandler.create())
.handler(ctx -> {
// Get the payload
- String payloadStr = ctx.body().asString(Constants.DEFAULT_CHARSET.toString());
+ Object payload = ctx.body().asString(Constants.DEFAULT_CHARSET.toString());
+ payload = JsonUtils.parseObject(payload.toString(), String.class);
// Create and store the webhook request
Map headerMap = ctx.request().headers().entries().stream()
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
- WebhookRequest webhookRequest = new WebhookRequest(PROTOCOL_NAME, ctx.request().absoluteURI(), headerMap, payloadStr);
+ WebhookRequest webhookRequest = new WebhookRequest(PROTOCOL_NAME, ctx.request().absoluteURI(), headerMap, payload, ctx);
if (!queue.offer(webhookRequest)) {
throw new IllegalStateException("Failed to store the request.");
}
@@ -110,7 +113,27 @@ public ConnectRecord convertToConnectRecord(Object message) {
ConnectRecord connectRecord = new ConnectRecord(null, null, System.currentTimeMillis(), request.getPayload());
connectRecord.addExtension("source", request.getProtocolName());
connectRecord.addExtension("url", request.getUrl());
- connectRecord.addExtension("headers", request.getHeaders());
+ request.getHeaders().forEach((k, v) -> {
+ if (k.equalsIgnoreCase("extension")) {
+ JsonObject extension = new JsonObject(v);
+ extension.forEach(e -> connectRecord.addExtension(e.getKey(), e.getValue()));
+ }
+ });
+ // check recordUniqueId
+ if (!connectRecord.getExtensions().containsKey("recordUniqueId")) {
+ connectRecord.addExtension("recordUniqueId", connectRecord.getRecordId());
+ }
+
+ // check data
+ if (connectRecord.getExtensionObj("isBase64") != null) {
+ if (Boolean.parseBoolean(connectRecord.getExtensionObj("isBase64").toString())) {
+ byte[] data = Base64.getDecoder().decode(connectRecord.getData().toString());
+ connectRecord.setData(data);
+ }
+ }
+ if (request.getRoutingContext() != null) {
+ connectRecord.addExtension("routingContext", request.getRoutingContext());
+ }
return connectRecord;
}
}
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/GitHubProtocol.java b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/GitHubProtocol.java
index e86efcbf33..fac8c0d801 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/GitHubProtocol.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/java/org/apache/eventmesh/connector/http/source/protocol/impl/GitHubProtocol.java
@@ -132,7 +132,7 @@ public void setHandler(Route route, SynchronizedCircularFifoQueue queue)
// Create and store the webhook request
Map headerMap = headers.entries().stream()
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
- WebhookRequest webhookRequest = new WebhookRequest(PROTOCOL_NAME, ctx.request().absoluteURI(), headerMap, payloadStr);
+ WebhookRequest webhookRequest = new WebhookRequest(PROTOCOL_NAME, ctx.request().absoluteURI(), headerMap, payloadStr, ctx);
if (!queue.offer(webhookRequest)) {
throw new IllegalStateException("Failed to store the request.");
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService b/eventmesh-connectors/eventmesh-connector-http/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService
new file mode 100644
index 0000000000..d62ff11992
--- /dev/null
+++ b/eventmesh-connectors/eventmesh-connector-http/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+HTTP-Source=org.apache.eventmesh.connector.http.source.HttpSourceConnector
+HTTP-Sink=org.apache.eventmesh.connector.http.sink.HttpSinkConnector
diff --git a/eventmesh-connectors/eventmesh-connector-http/src/test/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnectorTest.java b/eventmesh-connectors/eventmesh-connector-http/src/test/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnectorTest.java
index 7ddba511c4..5f65f0749f 100644
--- a/eventmesh-connectors/eventmesh-connector-http/src/test/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnectorTest.java
+++ b/eventmesh-connectors/eventmesh-connector-http/src/test/java/org/apache/eventmesh/connector/http/sink/HttpSinkConnectorTest.java
@@ -20,8 +20,8 @@
import static org.mockserver.model.HttpRequest.request;
-import org.apache.eventmesh.connector.http.sink.config.HttpSinkConfig;
-import org.apache.eventmesh.connector.http.sink.config.HttpWebhookConfig;
+import org.apache.eventmesh.common.config.connector.http.HttpSinkConfig;
+import org.apache.eventmesh.common.config.connector.http.HttpWebhookConfig;
import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
import org.apache.eventmesh.openconnect.util.ConfigUtil;
diff --git a/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SinkConnectorContext.java b/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SinkConnectorContext.java
index cf1b853474..1ef048b06c 100644
--- a/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SinkConnectorContext.java
+++ b/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SinkConnectorContext.java
@@ -18,6 +18,9 @@
package org.apache.eventmesh.openconnect.api.connector;
import org.apache.eventmesh.common.config.connector.SinkConfig;
+import org.apache.eventmesh.common.remote.job.JobType;
+
+import java.util.Map;
import lombok.Data;
@@ -29,4 +32,8 @@ public class SinkConnectorContext implements ConnectorContext {
public SinkConfig sinkConfig;
+ public Map runtimeConfig;
+
+ public JobType jobType;
+
}
diff --git a/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SourceConnectorContext.java b/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SourceConnectorContext.java
index f70e77248e..957452bb10 100644
--- a/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SourceConnectorContext.java
+++ b/eventmesh-openconnect/eventmesh-openconnect-java/src/main/java/org/apache/eventmesh/openconnect/api/connector/SourceConnectorContext.java
@@ -18,6 +18,7 @@
package org.apache.eventmesh.openconnect.api.connector;
import org.apache.eventmesh.common.config.connector.SourceConfig;
+import org.apache.eventmesh.common.remote.job.JobType;
import org.apache.eventmesh.common.remote.offset.RecordPosition;
import org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetStorageReader;
@@ -38,6 +39,8 @@ public class SourceConnectorContext implements ConnectorContext {
public Map runtimeConfig;
+ public JobType jobType;
+
// initial record position
public List recordPositionList;
diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-api/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/api/data/DefaultKeyValue.java b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-api/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/api/data/DefaultKeyValue.java
index a0390c1892..891df482be 100644
--- a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-api/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/api/data/DefaultKeyValue.java
+++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-api/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/api/data/DefaultKeyValue.java
@@ -23,6 +23,11 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import lombok.Getter;
+import lombok.Setter;
+
+@Getter
+@Setter
public class DefaultKeyValue implements KeyValue {
private final Map properties;
diff --git a/eventmesh-runtime-v2/build.gradle b/eventmesh-runtime-v2/build.gradle
index ecba7bffb4..04b460ade3 100644
--- a/eventmesh-runtime-v2/build.gradle
+++ b/eventmesh-runtime-v2/build.gradle
@@ -35,6 +35,7 @@ dependencies {
implementation project(":eventmesh-openconnect:eventmesh-openconnect-java")
implementation project(":eventmesh-common")
implementation project(":eventmesh-connectors:eventmesh-connector-canal")
+ implementation project(":eventmesh-connectors:eventmesh-connector-http")
implementation project(":eventmesh-meta:eventmesh-meta-api")
implementation project(":eventmesh-meta:eventmesh-meta-nacos")
implementation project(":eventmesh-registry:eventmesh-registry-api")
diff --git a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntime.java b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntime.java
index 501f222fd3..3d3c864b58 100644
--- a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntime.java
+++ b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntime.java
@@ -31,8 +31,10 @@
import org.apache.eventmesh.common.protocol.grpc.adminserver.AdminServiceGrpc.AdminServiceStub;
import org.apache.eventmesh.common.protocol.grpc.adminserver.Metadata;
import org.apache.eventmesh.common.protocol.grpc.adminserver.Payload;
+import org.apache.eventmesh.common.remote.JobState;
import org.apache.eventmesh.common.remote.request.FetchJobRequest;
import org.apache.eventmesh.common.remote.request.ReportHeartBeatRequest;
+import org.apache.eventmesh.common.remote.request.ReportJobRequest;
import org.apache.eventmesh.common.remote.request.ReportVerifyRequest;
import org.apache.eventmesh.common.remote.response.FetchJobResponse;
import org.apache.eventmesh.common.utils.IPUtils;
@@ -129,10 +131,14 @@ public class ConnectorRuntime implements Runtime {
private final ScheduledExecutorService heartBeatExecutor = Executors.newSingleThreadScheduledExecutor();
+ private final ExecutorService reportVerifyExecutor = Executors.newSingleThreadExecutor();
+
private final BlockingQueue queue;
private volatile boolean isRunning = false;
+ private volatile boolean isFailed = false;
+
public static final String CALLBACK_EXTENSION = "callBackExtension";
private String adminServerAddr;
@@ -207,6 +213,8 @@ private void initConnectorService() throws Exception {
FetchJobResponse jobResponse = fetchJobConfig();
if (jobResponse == null) {
+ isFailed = true;
+ stop();
throw new RuntimeException("fetch job config fail");
}
@@ -245,6 +253,7 @@ private void initConnectorService() throws Exception {
SourceConnectorContext sourceConnectorContext = new SourceConnectorContext();
sourceConnectorContext.setSourceConfig(sourceConfig);
sourceConnectorContext.setRuntimeConfig(connectorRuntimeConfig.getRuntimeConfig());
+ sourceConnectorContext.setJobType(jobResponse.getType());
sourceConnectorContext.setOffsetStorageReader(offsetStorageReader);
if (CollectionUtils.isNotEmpty(jobResponse.getPosition())) {
sourceConnectorContext.setRecordPositionList(jobResponse.getPosition());
@@ -258,8 +267,12 @@ private void initConnectorService() throws Exception {
SinkConfig sinkConfig = (SinkConfig) ConfigUtil.parse(connectorRuntimeConfig.getSinkConnectorConfig(), sinkConnector.configClass());
SinkConnectorContext sinkConnectorContext = new SinkConnectorContext();
sinkConnectorContext.setSinkConfig(sinkConfig);
+ sinkConnectorContext.setRuntimeConfig(connectorRuntimeConfig.getRuntimeConfig());
+ sinkConnectorContext.setJobType(jobResponse.getType());
sinkConnector.init(sinkConnectorContext);
+ reportJobRequest(connectorRuntimeConfig.getJobID(), JobState.INIT);
+
}
private FetchJobResponse fetchJobConfig() {
@@ -306,6 +319,7 @@ public void start() throws Exception {
try {
startSinkConnector();
} catch (Exception e) {
+ isFailed = true;
log.error("sink connector [{}] start fail", sinkConnector.name(), e);
try {
this.stop();
@@ -320,6 +334,7 @@ public void start() throws Exception {
try {
startSourceConnector();
} catch (Exception e) {
+ isFailed = true;
log.error("source connector [{}] start fail", sourceConnector.name(), e);
try {
this.stop();
@@ -329,15 +344,25 @@ public void start() throws Exception {
throw new RuntimeException(e);
}
});
+
+ reportJobRequest(connectorRuntimeConfig.getJobID(), JobState.RUNNING);
}
@Override
public void stop() throws Exception {
+ log.info("ConnectorRuntime start stop");
+ isRunning = false;
+ if (isFailed) {
+ reportJobRequest(connectorRuntimeConfig.getJobID(), JobState.FAIL);
+ } else {
+ reportJobRequest(connectorRuntimeConfig.getJobID(), JobState.COMPLETE);
+ }
sourceConnector.stop();
sinkConnector.stop();
sourceService.shutdown();
sinkService.shutdown();
heartBeatExecutor.shutdown();
+ reportVerifyExecutor.shutdown();
requestObserver.onCompleted();
if (channel != null && !channel.isShutdown()) {
channel.shutdown();
@@ -351,6 +376,10 @@ private void startSourceConnector() throws Exception {
// TODO: use producer pub record to storage replace below
if (connectorRecordList != null && !connectorRecordList.isEmpty()) {
for (ConnectRecord record : connectorRecordList) {
+ // check recordUniqueId
+ if (record.getExtensions() == null || !record.getExtensions().containsKey("recordUniqueId")) {
+ record.addExtension("recordUniqueId", record.getRecordId());
+ }
queue.put(record);
@@ -364,10 +393,18 @@ private void startSourceConnector() throws Exception {
record.setCallback(new SendMessageCallback() {
@Override
public void onSuccess(SendResult result) {
+ log.debug("send record to sink callback success, record: {}", record);
// commit record
sourceConnector.commit(record);
- Optional submittedRecordPosition = prepareToUpdateRecordOffset(record);
- submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::ack);
+ if (record.getPosition() != null) {
+ Optional submittedRecordPosition = prepareToUpdateRecordOffset(record);
+ submittedRecordPosition.ifPresent(RecordOffsetManagement.SubmittedPosition::ack);
+ log.debug("start wait all messages to commit");
+ offsetManagement.awaitAllMessages(5000, TimeUnit.MILLISECONDS);
+ // update & commit offset
+ updateCommittableOffsets();
+ commitOffsets();
+ }
Optional callback =
Optional.ofNullable(record.getExtensionObj(CALLBACK_EXTENSION)).map(v -> (SendMessageCallback) v);
callback.ifPresent(cb -> cb.onSuccess(convertToSendResult(record)));
@@ -375,6 +412,7 @@ public void onSuccess(SendResult result) {
@Override
public void onException(SendExceptionContext sendExceptionContext) {
+ isFailed = true;
// handle exception
sourceConnector.onException(record);
log.error("send record to sink callback exception, process will shut down, record: {}", record,
@@ -386,11 +424,6 @@ public void onException(SendExceptionContext sendExceptionContext) {
}
}
});
-
- offsetManagement.awaitAllMessages(5000, TimeUnit.MILLISECONDS);
- // update & commit offset
- updateCommittableOffsets();
- commitOffsets();
}
}
}
@@ -406,24 +439,48 @@ private SendResult convertToSendResult(ConnectRecord record) {
}
private void reportVerifyRequest(ConnectRecord record, ConnectorRuntimeConfig connectorRuntimeConfig, ConnectorStage connectorStage) {
- String md5Str = md5(record.toString());
- ReportVerifyRequest reportVerifyRequest = new ReportVerifyRequest();
- reportVerifyRequest.setTaskID(connectorRuntimeConfig.getTaskID());
- reportVerifyRequest.setRecordID(record.getRecordId());
- reportVerifyRequest.setRecordSig(md5Str);
- reportVerifyRequest.setConnectorName(
- IPUtils.getLocalAddress() + "_" + connectorRuntimeConfig.getJobID() + "_" + connectorRuntimeConfig.getRegion());
- reportVerifyRequest.setConnectorStage(connectorStage.name());
- reportVerifyRequest.setPosition(JsonUtils.toJSONString(record.getPosition()));
-
- Metadata metadata = Metadata.newBuilder().setType(ReportVerifyRequest.class.getSimpleName()).build();
+ reportVerifyExecutor.submit(() -> {
+ try {
+ // use record data + recordUniqueId for md5
+ String md5Str = md5(record.getData().toString() + record.getExtension("recordUniqueId"));
+ ReportVerifyRequest reportVerifyRequest = new ReportVerifyRequest();
+ reportVerifyRequest.setTaskID(connectorRuntimeConfig.getTaskID());
+ reportVerifyRequest.setJobID(connectorRuntimeConfig.getJobID());
+ reportVerifyRequest.setRecordID(record.getRecordId());
+ reportVerifyRequest.setRecordSig(md5Str);
+ reportVerifyRequest.setConnectorName(
+ IPUtils.getLocalAddress() + "_" + connectorRuntimeConfig.getJobID() + "_" + connectorRuntimeConfig.getRegion());
+ reportVerifyRequest.setConnectorStage(connectorStage.name());
+ reportVerifyRequest.setPosition(JsonUtils.toJSONString(record.getPosition()));
+
+ Metadata metadata = Metadata.newBuilder().setType(ReportVerifyRequest.class.getSimpleName()).build();
+
+ Payload request = Payload.newBuilder().setMetadata(metadata)
+ .setBody(
+ Any.newBuilder().setValue(UnsafeByteOperations.unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(reportVerifyRequest))))
+ .build())
+ .build();
+
+ requestObserver.onNext(request);
+ } catch (Exception e) {
+ log.error("Failed to report verify request", e);
+ }
+ });
+ }
- Payload request = Payload.newBuilder().setMetadata(metadata)
- .setBody(Any.newBuilder().setValue(UnsafeByteOperations.unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(reportVerifyRequest))))
+ private void reportJobRequest(String jobId, JobState jobState) throws InterruptedException {
+ ReportJobRequest reportJobRequest = new ReportJobRequest();
+ reportJobRequest.setJobID(jobId);
+ reportJobRequest.setState(jobState);
+ Metadata metadata = Metadata.newBuilder()
+ .setType(ReportJobRequest.class.getSimpleName())
+ .build();
+ Payload payload = Payload.newBuilder()
+ .setMetadata(metadata)
+ .setBody(Any.newBuilder().setValue(UnsafeByteOperations.unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(reportJobRequest))))
.build())
.build();
-
- requestObserver.onNext(request);
+ requestObserver.onNext(payload);
}
private String md5(String input) {
diff --git a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
index ab6fc3aaf5..da1d0beb1c 100644
--- a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
+++ b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
@@ -18,7 +18,10 @@
package org.apache.eventmesh.runtime.connector;
import org.apache.eventmesh.common.config.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
+import org.apache.eventmesh.openconnect.util.ConfigUtil;
+import java.util.HashMap;
import java.util.Map;
import lombok.Data;
From bd3cb46d10fda7742690493552d20a3d26b2713b Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Wed, 21 Aug 2024 19:30:08 +0800
Subject: [PATCH 5/6] fix check style error
---
.../connector/CanalSinkCheckConnector.java | 406 ++++++++++++++++++
.../connector/CanalSourceCheckConnector.java | 186 ++++++++
2 files changed, 592 insertions(+)
create mode 100644 eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkCheckConnector.java
create mode 100644 eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceCheckConnector.java
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkCheckConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkCheckConnector.java
new file mode 100644
index 0000000000..84e01ca85c
--- /dev/null
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkCheckConnector.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.connector.canal.sink.connector;
+
+import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkFullConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.Constants;
+import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.MySQLColumnDef;
+import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.MySQLTableDef;
+import org.apache.eventmesh.common.exception.EventMeshException;
+import org.apache.eventmesh.common.remote.offset.canal.CanalFullRecordOffset;
+import org.apache.eventmesh.connector.canal.DatabaseConnection;
+import org.apache.eventmesh.connector.canal.SqlUtils;
+import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
+import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
+import org.apache.eventmesh.openconnect.api.connector.SinkConnectorContext;
+import org.apache.eventmesh.openconnect.api.sink.Sink;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+
+import org.apache.commons.lang3.StringUtils;
+
+import java.math.BigDecimal;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.time.LocalDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.locks.LockSupport;
+
+import com.alibaba.druid.pool.DruidPooledConnection;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class CanalSinkCheckConnector implements Sink, ConnectorCreateService {
+ private CanalSinkFullConfig config;
+ private RdbTableMgr tableMgr;
+ private final DateTimeFormatter dataTimePattern = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS");
+
+ @Override
+ public void start() throws Exception {
+ tableMgr.start();
+ }
+
+ @Override
+ public void stop() throws Exception {
+
+ }
+
+ @Override
+ public Sink create() {
+ return new CanalSinkCheckConnector();
+ }
+
+ @Override
+ public Class extends Config> configClass() {
+ return CanalSinkFullConfig.class;
+ }
+
+ @Override
+ public void init(Config config) throws Exception {
+ this.config = (CanalSinkFullConfig) config;
+ init();
+ }
+
+ @Override
+ public void init(ConnectorContext connectorContext) throws Exception {
+ this.config = (CanalSinkFullConfig) ((SinkConnectorContext) connectorContext).getSinkConfig();
+ init();
+ }
+
+ private void init() {
+ if (config.getSinkConnectorConfig() == null) {
+ throw new EventMeshException(String.format("[%s] sink config is null", this.getClass()));
+ }
+ DatabaseConnection.sinkConfig = this.config.getSinkConnectorConfig();
+ DatabaseConnection.initSinkConnection();
+ DatabaseConnection.sinkDataSource.setDefaultAutoCommit(false);
+
+ tableMgr = new RdbTableMgr(this.config.getSinkConnectorConfig(), DatabaseConnection.sinkDataSource);
+ }
+
+ @Override
+ public void commit(ConnectRecord record) {
+
+ }
+
+ @Override
+ public String name() {
+ return null;
+ }
+
+ @Override
+ public void onException(ConnectRecord record) {
+
+ }
+
+ @Override
+ public void put(List sinkRecords) {
+ if (sinkRecords == null || sinkRecords.isEmpty() || sinkRecords.get(0) == null) {
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] got sink records are none", this.getClass());
+ }
+ return;
+ }
+ ConnectRecord record = sinkRecords.get(0);
+ List> data = (List>) record.getData();
+ if (data == null || data.isEmpty()) {
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] got rows data is none", this.getClass());
+ }
+ return;
+ }
+ CanalFullRecordOffset offset = (CanalFullRecordOffset) record.getPosition().getRecordOffset();
+ if (offset == null || offset.getPosition() == null) {
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] got canal full offset is none", this.getClass());
+ }
+ return;
+ }
+
+ MySQLTableDef tableDefinition = (MySQLTableDef) tableMgr.getTable(offset.getPosition().getSchema(), offset.getPosition().getTableName());
+ if (tableDefinition == null) {
+ log.warn("target schema [{}] table [{}] is not exists", offset.getPosition().getSchema(), offset.getPosition().getTableName());
+ return;
+ }
+ List cols = new ArrayList<>(tableDefinition.getColumnDefinitions().values());
+ String sql = generateInsertPrepareSql(offset.getPosition().getSchema(), offset.getPosition().getTableName(),
+ cols);
+ DruidPooledConnection connection = null;
+ PreparedStatement statement = null;
+ try {
+ connection = DatabaseConnection.sinkDataSource.getConnection();
+ statement =
+ connection.prepareStatement(sql);
+ for (Map col : data) {
+ setPrepareParams(statement, col, cols);
+ log.info("insert sql {}", statement.toString());
+ statement.addBatch();
+ }
+ statement.executeBatch();
+ connection.commit();
+ } catch (SQLException e) {
+ log.warn("full sink process schema [{}] table [{}] connector write fail", tableDefinition.getSchemaName(), tableDefinition.getTableName(),
+ e);
+ LockSupport.parkNanos(3000 * 1000L);
+ } catch (Exception e) {
+ log.error("full sink process schema [{}] table [{}] catch unknown exception", tableDefinition.getSchemaName(),
+ tableDefinition.getTableName(), e);
+ try {
+ if (connection != null && !connection.isClosed()) {
+ connection.rollback();
+ }
+ } catch (SQLException rollback) {
+ log.warn("full sink process schema [{}] table [{}] rollback fail", tableDefinition.getSchemaName(),
+ tableDefinition.getTableName(), e);
+ }
+ } finally {
+ if (statement != null) {
+ try {
+ statement.close();
+ } catch (SQLException e) {
+ log.info("close prepare statement fail", e);
+ }
+ }
+
+ if (connection != null) {
+ try {
+ connection.close();
+ } catch (SQLException e) {
+ log.info("close db connection fail", e);
+ }
+ }
+ }
+ }
+
+ private void setPrepareParams(PreparedStatement preparedStatement, Map col, List columnDefs) throws Exception {
+ for (int i = 0; i < columnDefs.size(); i++) {
+ writeColumn(preparedStatement, i + 1, columnDefs.get(i), col.get(columnDefs.get(i).getName()));
+ }
+ }
+
+ public void writeColumn(PreparedStatement ps, int index, MySQLColumnDef colType, Object value) throws Exception {
+ if (colType == null) {
+ String colVal = null;
+ if (value != null) {
+ colVal = value.toString();
+ }
+ if (colVal == null) {
+ ps.setNull(index, Types.VARCHAR);
+ } else {
+ ps.setString(index, colVal);
+ }
+ } else if (value == null) {
+ ps.setNull(index, colType.getJdbcType().getVendorTypeNumber());
+ } else {
+ switch (colType.getType()) {
+ case TINYINT:
+ case SMALLINT:
+ case MEDIUMINT:
+ case INT:
+ Long longValue = SqlUtils.toLong(value);
+ if (longValue == null) {
+ ps.setNull(index, 4);
+ return;
+ } else {
+ ps.setLong(index, longValue);
+ return;
+ }
+ case BIGINT:
+ case DECIMAL:
+ BigDecimal bigDecimalValue = SqlUtils.toBigDecimal(value);
+ if (bigDecimalValue == null) {
+ ps.setNull(index, 3);
+ return;
+ } else {
+ ps.setBigDecimal(index, bigDecimalValue);
+ return;
+ }
+ case FLOAT:
+ case DOUBLE:
+ Double doubleValue = SqlUtils.toDouble(value);
+ if (doubleValue == null) {
+ ps.setNull(index, 8);
+ } else {
+ ps.setDouble(index, doubleValue);
+ }
+ return;
+ case DATE:
+ case DATETIME:
+ case TIMESTAMP:
+ LocalDateTime dateValue = null;
+ if (!SqlUtils.isZeroTime(value)) {
+ try {
+ dateValue = SqlUtils.toLocalDateTime(value);
+ } catch (Exception e) {
+ ps.setString(index, SqlUtils.convertToString(value));
+ return;
+ }
+ } else if (StringUtils.isNotBlank(config.getZeroDate())) {
+ dateValue = SqlUtils.toLocalDateTime(config.getZeroDate());
+ } else {
+ ps.setObject(index, value);
+ return;
+ }
+ if (dateValue == null) {
+ ps.setNull(index, Types.TIMESTAMP);
+ } else {
+ ps.setString(index, dataTimePattern.format(dateValue));
+ }
+ return;
+ case TIME:
+ String timeValue = SqlUtils.toMySqlTime(value);
+ if (StringUtils.isBlank(timeValue)) {
+ ps.setNull(index, 12);
+ return;
+ } else {
+ ps.setString(index, timeValue);
+ return;
+ }
+ case YEAR:
+ LocalDateTime yearValue = null;
+ if (!SqlUtils.isZeroTime(value)) {
+ yearValue = SqlUtils.toLocalDateTime(value);
+ } else if (StringUtils.isNotBlank(config.getZeroDate())) {
+ yearValue = SqlUtils.toLocalDateTime(config.getZeroDate());
+ } else {
+ ps.setInt(index, 0);
+ return;
+ }
+ if (yearValue == null) {
+ ps.setNull(index, 4);
+ } else {
+ ps.setInt(index, yearValue.getYear());
+ }
+ return;
+ case CHAR:
+ case VARCHAR:
+ case TINYTEXT:
+ case TEXT:
+ case MEDIUMTEXT:
+ case LONGTEXT:
+ case ENUM:
+ case SET:
+ String strValue = value.toString();
+ if (strValue == null) {
+ ps.setNull(index, Types.VARCHAR);
+ return;
+ } else {
+ ps.setString(index, strValue);
+ return;
+ }
+ case JSON:
+ String jsonValue = value.toString();
+ if (jsonValue == null) {
+ ps.setNull(index, Types.VARCHAR);
+ } else {
+ ps.setString(index, jsonValue);
+ }
+ return;
+ case BIT:
+ if (value instanceof Boolean) {
+ byte[] arrayBoolean = new byte[1];
+ arrayBoolean[0] = (byte) (Boolean.TRUE.equals(value) ? 1 : 0);
+ ps.setBytes(index, arrayBoolean);
+ return;
+ } else if (value instanceof Number) {
+ ps.setBytes(index, SqlUtils.numberToBinaryArray((Number) value));
+ return;
+ } else if ((value instanceof byte[]) || value.toString().startsWith("0x") || value.toString().startsWith("0X")) {
+ byte[] arrayBoolean = SqlUtils.toBytes(value);
+ if (arrayBoolean == null || arrayBoolean.length == 0) {
+ ps.setNull(index, Types.BIT);
+ return;
+ } else {
+ ps.setBytes(index, arrayBoolean);
+ return;
+ }
+ } else {
+ ps.setBytes(index, SqlUtils.numberToBinaryArray(SqlUtils.toInt(value)));
+ return;
+ }
+ case BINARY:
+ case VARBINARY:
+ case TINYBLOB:
+ case BLOB:
+ case MEDIUMBLOB:
+ case LONGBLOB:
+ byte[] binaryValue = SqlUtils.toBytes(value);
+ if (binaryValue == null) {
+ ps.setNull(index, Types.BINARY);
+ return;
+ } else {
+ ps.setBytes(index, binaryValue);
+ return;
+ }
+ case GEOMETRY:
+ case GEOMETRY_COLLECTION:
+ case GEOM_COLLECTION:
+ case POINT:
+ case LINESTRING:
+ case POLYGON:
+ case MULTIPOINT:
+ case MULTILINESTRING:
+ case MULTIPOLYGON:
+ String geoValue = SqlUtils.toGeometry(value);
+ if (geoValue == null) {
+ ps.setNull(index, Types.VARCHAR);
+ return;
+ }
+ ps.setString(index, geoValue);
+ return;
+ default:
+ throw new UnsupportedOperationException("columnType '" + colType + "' Unsupported.");
+ }
+ }
+ }
+
+ private String generateInsertPrepareSql(String schema, String table, List cols) {
+ StringBuilder builder = new StringBuilder();
+ builder.append("INSERT IGNORE INTO ");
+ builder.append(Constants.MySQLQuot);
+ builder.append(schema);
+ builder.append(Constants.MySQLQuot);
+ builder.append(".");
+ builder.append(Constants.MySQLQuot);
+ builder.append(table);
+ builder.append(Constants.MySQLQuot);
+ StringBuilder columns = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ for (MySQLColumnDef colInfo : cols) {
+ if (columns.length() > 0) {
+ columns.append(", ");
+ values.append(", ");
+ }
+ String wrapName = Constants.MySQLQuot + colInfo.getName() + Constants.MySQLQuot;
+ columns.append(wrapName);
+ values.append(colInfo.getType() == null ? "?" : colInfo.getType().genPrepareStatement4Insert());
+ }
+ builder.append("(").append(columns).append(")");
+ builder.append(" VALUES ");
+ builder.append("(").append(values).append(")");
+ return builder.toString();
+ }
+
+
+}
diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceCheckConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceCheckConnector.java
new file mode 100644
index 0000000000..841c9a4814
--- /dev/null
+++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceCheckConnector.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.eventmesh.connector.canal.source.connector;
+
+import org.apache.eventmesh.common.AbstractComponent;
+import org.apache.eventmesh.common.EventMeshThreadFactory;
+import org.apache.eventmesh.common.config.connector.Config;
+import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceFullConfig;
+import org.apache.eventmesh.common.config.connector.rdb.canal.JobRdbFullPosition;
+import org.apache.eventmesh.common.config.connector.rdb.canal.RdbDBDefinition;
+import org.apache.eventmesh.common.config.connector.rdb.canal.RdbTableDefinition;
+import org.apache.eventmesh.common.config.connector.rdb.canal.mysql.MySQLTableDef;
+import org.apache.eventmesh.common.exception.EventMeshException;
+import org.apache.eventmesh.common.utils.JsonUtils;
+import org.apache.eventmesh.connector.canal.DatabaseConnection;
+import org.apache.eventmesh.connector.canal.source.position.CanalFullPositionMgr;
+import org.apache.eventmesh.connector.canal.source.position.TableFullPosition;
+import org.apache.eventmesh.connector.canal.source.table.RdbSimpleTable;
+import org.apache.eventmesh.connector.canal.source.table.RdbTableMgr;
+import org.apache.eventmesh.openconnect.api.ConnectorCreateService;
+import org.apache.eventmesh.openconnect.api.connector.ConnectorContext;
+import org.apache.eventmesh.openconnect.api.connector.SourceConnectorContext;
+import org.apache.eventmesh.openconnect.api.source.Source;
+import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class CanalSourceCheckConnector extends AbstractComponent implements Source, ConnectorCreateService {
+ private CanalSourceFullConfig config;
+ private CanalFullPositionMgr positionMgr;
+ private RdbTableMgr tableMgr;
+ private ThreadPoolExecutor executor;
+ private final BlockingQueue> queue = new LinkedBlockingQueue<>();
+ private final AtomicBoolean flag = new AtomicBoolean(true);
+
+ @Override
+ protected void run() throws Exception {
+ this.tableMgr.start();
+ this.positionMgr.start();
+ if (positionMgr.isFinished()) {
+ log.info("connector [{}] has finished the job", config.getSourceConnectorConfig().getConnectorName());
+ return;
+ }
+ executor = new ThreadPoolExecutor(config.getParallel(), config.getParallel(), 0L, TimeUnit.MILLISECONDS,
+ new LinkedBlockingQueue<>(), new EventMeshThreadFactory("canal-source-full"));
+ List producers = new LinkedList<>();
+ if (config.getSourceConnectorConfig().getDatabases() != null) {
+ for (RdbDBDefinition db : config.getSourceConnectorConfig().getDatabases()) {
+ for (RdbTableDefinition table : db.getTables()) {
+ try {
+ log.info("it will create producer of db [{}] table [{}]", db.getSchemaName(), table.getTableName());
+ RdbSimpleTable simpleTable = new RdbSimpleTable(db.getSchemaName(), table.getTableName());
+ JobRdbFullPosition position = positionMgr.getPosition(simpleTable);
+ if (position == null) {
+ throw new EventMeshException(String.format("db [%s] table [%s] have none position info",
+ db.getSchemaName(), table.getTableName()));
+ }
+ RdbTableDefinition tableDefinition = tableMgr.getTable(simpleTable);
+ if (tableDefinition == null) {
+ throw new EventMeshException(String.format("db [%s] table [%s] have none table definition info",
+ db.getSchemaName(), table.getTableName()));
+ }
+
+ producers.add(new CanalFullProducer(queue, DatabaseConnection.sourceDataSource, (MySQLTableDef) tableDefinition,
+ JsonUtils.parseObject(position.getPrimaryKeyRecords(), TableFullPosition.class),
+ config.getFlushSize()));
+ } catch (Exception e) {
+ log.error("create schema [{}] table [{}] producers fail", db.getSchemaName(),
+ table.getTableName(), e);
+ }
+ }
+ }
+ }
+ producers.forEach(p -> executor.execute(() -> p.start(flag)));
+ }
+
+ @Override
+ protected void shutdown() throws Exception {
+ flag.set(false);
+ if (!executor.isShutdown()) {
+ executor.shutdown();
+ try {
+ if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
+ log.warn("wait thread pool shutdown timeout, it will shutdown now");
+ executor.shutdownNow();
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ log.info("shutdown thread pool fail");
+ }
+ }
+ if (DatabaseConnection.sourceDataSource != null) {
+ DatabaseConnection.sourceDataSource.close();
+ log.info("data source has been closed");
+ }
+ }
+
+ @Override
+ public Source create() {
+ return new CanalSourceCheckConnector();
+ }
+
+ @Override
+ public Class extends Config> configClass() {
+ return CanalSourceFullConfig.class;
+ }
+
+ @Override
+ public void init(Config config) throws Exception {
+ this.config = (CanalSourceFullConfig) config;
+ init();
+ }
+
+ private void init() {
+ DatabaseConnection.sourceConfig = this.config.getSourceConnectorConfig();
+ DatabaseConnection.initSourceConnection();
+ this.tableMgr = new RdbTableMgr(config.getSourceConnectorConfig(), DatabaseConnection.sourceDataSource);
+ this.positionMgr = new CanalFullPositionMgr(config, tableMgr);
+ }
+
+ @Override
+ public void init(ConnectorContext connectorContext) throws Exception {
+ SourceConnectorContext sourceConnectorContext = (SourceConnectorContext) connectorContext;
+ this.config = (CanalSourceFullConfig) sourceConnectorContext.getSourceConfig();
+ init();
+ }
+
+ @Override
+ public void commit(ConnectRecord record) {
+ // nothing
+ }
+
+ @Override
+ public String name() {
+ return this.config.getSourceConnectorConfig().getConnectorName();
+ }
+
+ @Override
+ public void onException(ConnectRecord record) {
+
+ }
+
+ @Override
+ public List poll() {
+ while (flag.get()) {
+ try {
+ List records = queue.poll(5, TimeUnit.SECONDS);
+ if (records == null || records.isEmpty()) {
+ continue;
+ }
+ return records;
+ } catch (InterruptedException ignore) {
+ Thread.currentThread().interrupt();
+ log.info("[{}] thread interrupted", this.getClass());
+ return null;
+ }
+ }
+ log.info("[{}] life flag is stop, so return null", this.getClass());
+ return null;
+ }
+
+}
From d3a798d98ebdf06606fd3066d41b5f099e767fdc Mon Sep 17 00:00:00 2001
From: xwm1992
Date: Wed, 21 Aug 2024 19:46:09 +0800
Subject: [PATCH 6/6] fix check style error
---
.../eventmesh/runtime/connector/ConnectorRuntimeConfig.java | 3 ---
1 file changed, 3 deletions(-)
diff --git a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
index da1d0beb1c..ab6fc3aaf5 100644
--- a/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
+++ b/eventmesh-runtime-v2/src/main/java/org/apache/eventmesh/runtime/connector/ConnectorRuntimeConfig.java
@@ -18,10 +18,7 @@
package org.apache.eventmesh.runtime.connector;
import org.apache.eventmesh.common.config.Config;
-import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig;
-import org.apache.eventmesh.openconnect.util.ConfigUtil;
-import java.util.HashMap;
import java.util.Map;
import lombok.Data;