diff --git a/eventmesh-connectors/eventmesh-connector-canal/build.gradle b/eventmesh-connectors/eventmesh-connector-canal/build.gradle new file mode 100644 index 0000000000..0d914b7ae8 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/build.gradle @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +List canal = [ + "com.alibaba.otter:canal.instance.manager:$canal_version", + "com.alibaba.otter:canal.parse:$canal_version", + "com.alibaba.otter:canal.server:$canal_version" +] + +dependencies { + api project(":eventmesh-openconnect:eventmesh-openconnect-java") + implementation project(":eventmesh-common") + implementation canal + implementation "com.alibaba:druid:1.2.6" +// implementation "org.apache.ddlutils:ddlutils:1.0" + compileOnly 'org.projectlombok:lombok' + annotationProcessor 'org.projectlombok:lombok' + testImplementation "org.mockito:mockito-core" + testImplementation "org.mockito:mockito-junit-jupiter" +} \ No newline at end of file diff --git a/eventmesh-connectors/eventmesh-connector-canal/gradle.properties b/eventmesh-connectors/eventmesh-connector-canal/gradle.properties new file mode 100644 index 0000000000..a439bdacf9 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/gradle.properties @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +canal_version=1.1.7 +pluginType=connector +pluginName=MySQL \ No newline at end of file diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/ByteArrayConverter.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/ByteArrayConverter.java new file mode 100644 index 0000000000..35056f2e48 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/ByteArrayConverter.java @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal; + +import org.apache.commons.beanutils.ConversionException; +import org.apache.commons.beanutils.Converter; +import org.apache.commons.beanutils.converters.ArrayConverter; +import org.apache.commons.beanutils.converters.ByteConverter; + + +public class ByteArrayConverter implements Converter { + + public static final Converter SQL_BYTES = new ByteArrayConverter(null); + private static final Converter converter = new ArrayConverter(byte[].class, new ByteConverter()); + + protected final Object defaultValue; + protected final boolean useDefault; + + public ByteArrayConverter() { + this.defaultValue = null; + this.useDefault = false; + } + + public ByteArrayConverter(Object defaultValue) { + this.defaultValue = defaultValue; + this.useDefault = true; + } + + public Object convert(Class type, Object value) { + if (value == null) { + if (useDefault) { + return (defaultValue); + } else { + throw new ConversionException("No value specified"); + } + } + + if (value instanceof byte[]) { + return (value); + } + + // BLOB类型,canal直接存储为String("ISO-8859-1") + if (value instanceof String) { + try { + return ((String) value).getBytes("ISO-8859-1"); + } catch (Exception e) { + throw new ConversionException(e); + } + } + + return converter.convert(type, value); // byteConvertor进行转化 + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/CanalConnectRecord.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/CanalConnectRecord.java new file mode 100644 index 0000000000..c226ba651b --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/CanalConnectRecord.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal; + +import org.apache.eventmesh.common.remote.job.SyncConsistency; +import org.apache.eventmesh.common.remote.job.SyncMode; +import org.apache.eventmesh.connector.canal.model.EventColumn; +import org.apache.eventmesh.connector.canal.model.EventType; +import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord; + +import java.util.ArrayList; +import java.util.List; + +import lombok.Data; + +@Data +public class CanalConnectRecord { + + private String schemaName; + private String tableName; + + /** + * 变更数据的业务类型(I/U/D/C/A/E),与canal中的EntryProtocol中定义的EventType一致. + */ + private EventType eventType; + + /** + * 变更数据的业务时间. + */ + private long executeTime; + + /** + * 变更前的主键值,如果是insert/delete变更前和变更后的主键值是一样的. + */ + private List oldKeys = new ArrayList(); + + /** + * 变更后的主键值,如果是insert/delete变更前和变更后的主键值是一样的. + */ + private List keys = new ArrayList(); + + /** + * 非主键的其他字段 + */ + private List columns = new ArrayList(); + + // ====================== 运行过程中对数据的附加属性 ============================= + /** + * 预计的size大小,基于binlog event的推算 + */ + private long size = 1024; + + /** + * 同步映射关系的id + */ + private long pairId = -1; + + /** + * 当eventType = CREATE/ALTER/ERASE时,就是对应的sql语句,其他情况为动态生成的INSERT/UPDATE/DELETE sql + */ + private String sql; + + /** + * ddl/query的schemaName,会存在跨库ddl,需要保留执行ddl的当前schemaName + */ + private String ddlSchemaName; + + /** + * 自定义的同步模式, 允许覆盖默认的pipeline parameter,比如针对补救数据同步 + */ + private SyncMode syncMode; + + /** + * 自定义的同步一致性,允许覆盖默认的pipeline parameter,比如针对字段组强制反查数据库 + */ + private SyncConsistency syncConsistency; + + /** + * 是否为remedy补救数据,比如回环补救自动产生的数据,或者是freedom产生的手工订正数据 + */ + private boolean remedy = false; + + /** + * 生成对应的hint内容 + */ + private String hint; + + /** + * 生成sql是否忽略schema,比如针对tddl/drds,需要忽略schema + */ + private boolean withoutSchema = false; + + private String journalName; + + private long binLogOffset; + + public CanalConnectRecord() { + super(); + } + + // ======================== helper method ================= + + /** + * 返回所有待变更的字段 + */ + public List getUpdatedColumns() { + List columns = new ArrayList(); + for (EventColumn column : this.columns) { + if (column.isUpdate()) { + columns.add(column); + } + } + + return columns; + } + + /** + * 返回所有变更的主键字段 + */ + public List getUpdatedKeys() { + List columns = new ArrayList(); + for (EventColumn column : this.keys) { + if (column.isUpdate()) { + columns.add(column); + } + } + + return columns; + } + + private List cloneColumn(List columns) { + if (columns == null) { + return null; + } + + List cloneColumns = new ArrayList(); + for (EventColumn column : columns) { + cloneColumns.add(column.clone()); + } + + return cloneColumns; + } + + public CanalConnectRecord clone() { + CanalConnectRecord record = new CanalConnectRecord(); + record.setTableName(tableName); + record.setSchemaName(schemaName); + record.setDdlSchemaName(ddlSchemaName); + record.setEventType(eventType); + record.setExecuteTime(executeTime); + record.setKeys(cloneColumn(keys)); + record.setColumns(cloneColumn(columns)); + record.setOldKeys(cloneColumn(oldKeys)); + record.setSize(size); + record.setPairId(pairId); + record.setSql(sql); + record.setSyncMode(syncMode); + record.setSyncConsistency(syncConsistency); + record.setRemedy(remedy); + record.setHint(hint); + record.setWithoutSchema(withoutSchema); + return record; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((columns == null) ? 0 : columns.hashCode()); + result = prime * result + ((eventType == null) ? 0 : eventType.hashCode()); + result = prime * result + (int) (executeTime ^ (executeTime >>> 32)); + result = prime * result + ((keys == null) ? 0 : keys.hashCode()); + result = prime * result + ((oldKeys == null) ? 0 : oldKeys.hashCode()); + result = prime * result + (int) (pairId ^ (pairId >>> 32)); + result = prime * result + ((schemaName == null) ? 0 : schemaName.hashCode()); + result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + CanalConnectRecord other = (CanalConnectRecord) obj; + if (columns == null) { + if (other.columns != null) { + return false; + } + } else if (!columns.equals(other.columns)) { + return false; + } + if (eventType != other.eventType) { + return false; + } + if (executeTime != other.executeTime) { + return false; + } + if (keys == null) { + if (other.keys != null) { + return false; + } + } else if (!keys.equals(other.keys)) { + return false; + } + if (oldKeys == null) { + if (other.oldKeys != null) { + return false; + } + } else if (!oldKeys.equals(other.oldKeys)) { + return false; + } + if (pairId != other.pairId) { + return false; + } + if (schemaName == null) { + if (other.schemaName != null) { + return false; + } + } else if (!schemaName.equals(other.schemaName)) { + return false; + } + if (tableName == null) { + if (other.tableName != null) { + return false; + } + } else if (!tableName.equals(other.tableName)) { + return false; + } + return true; + } + + @Override + public String toString() { + return "CanalConnectRecord{" + + "tableName='" + tableName + '\'' + + ", schemaName='" + schemaName + '\'' + + ", eventType=" + eventType + + ", executeTime=" + executeTime + + ", oldKeys=" + oldKeys + + ", keys=" + keys + + ", columns=" + columns + + ", size=" + size + + ", pairId=" + pairId + + ", sql='" + sql + '\'' + + ", ddlSchemaName='" + ddlSchemaName + '\'' + + ", syncMode=" + syncMode + + ", syncConsistency=" + syncConsistency + + ", remedy=" + remedy + + ", hint='" + hint + '\'' + + ", withoutSchema=" + withoutSchema + + '}'; + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/DatabaseConnection.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/DatabaseConnection.java new file mode 100644 index 0000000000..2b64f2eccd --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/DatabaseConnection.java @@ -0,0 +1,68 @@ +package org.apache.eventmesh.connector.canal; + + +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig; +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig; + +import com.alibaba.druid.pool.DruidDataSource; + +import java.sql.Connection; +import java.sql.SQLException; + +public class DatabaseConnection { + + public static DruidDataSource sourceDataSource; + + public static DruidDataSource sinkDataSource; + + public static CanalSourceConfig sourceConfig; + + public static CanalSinkConfig sinkConfig; + + public static void initSourceConnection() { + sourceDataSource = new DruidDataSource(); + sourceDataSource.setUrl(sourceConfig.getSourceConnectorConfig().getUrl()); + sourceDataSource.setUsername(sourceConfig.getSourceConnectorConfig().getUserName()); + sourceDataSource.setPassword(sourceConfig.getSourceConnectorConfig().getPassWord()); + sourceDataSource.setInitialSize(5); + sourceDataSource.setMinIdle(5); + sourceDataSource.setMaxActive(20); + sourceDataSource.setMaxWait(60000); + sourceDataSource.setTimeBetweenEvictionRunsMillis(60000); + sourceDataSource.setMinEvictableIdleTimeMillis(300000); + sourceDataSource.setValidationQuery("SELECT 1"); + sourceDataSource.setTestWhileIdle(true); + sourceDataSource.setTestOnBorrow(false); + sourceDataSource.setTestOnReturn(false); + sourceDataSource.setPoolPreparedStatements(true); + sourceDataSource.setMaxPoolPreparedStatementPerConnectionSize(20); + } + + public static void initSinkConnection() { + sinkDataSource = new DruidDataSource(); + sinkDataSource.setUrl(sinkConfig.getSinkConnectorConfig().getUrl()); + sinkDataSource.setUsername(sinkConfig.getSinkConnectorConfig().getUserName()); + sinkDataSource.setPassword(sinkConfig.getSinkConnectorConfig().getPassWord()); + sinkDataSource.setInitialSize(5); + sinkDataSource.setMinIdle(5); + sinkDataSource.setMaxActive(20); + sinkDataSource.setMaxWait(60000); + sinkDataSource.setTimeBetweenEvictionRunsMillis(60000); + sinkDataSource.setMinEvictableIdleTimeMillis(300000); + sinkDataSource.setValidationQuery("SELECT 1"); + sinkDataSource.setTestWhileIdle(true); + sinkDataSource.setTestOnBorrow(false); + sinkDataSource.setTestOnReturn(false); + sinkDataSource.setPoolPreparedStatements(true); + sinkDataSource.setMaxPoolPreparedStatementPerConnectionSize(20); + } + + + public static Connection getSourceConnection() throws SQLException { + return sourceDataSource.getConnection(); + } + + public static Connection getSinkConnection() throws SQLException { + return sinkDataSource.getConnection(); + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlTimestampConverter.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlTimestampConverter.java new file mode 100644 index 0000000000..4b8a1decf6 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlTimestampConverter.java @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal; + +import org.apache.commons.beanutils.ConversionException; +import org.apache.commons.beanutils.Converter; +import org.apache.commons.lang.time.DateFormatUtils; + +import java.sql.Timestamp; +import java.text.ParseException; +import java.text.ParsePosition; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Locale; + +public class SqlTimestampConverter implements Converter { + + /** + * Field description + */ + public static final String[] DATE_FORMATS = new String[] {"yyyy-MM-dd", "HH:mm:ss", "yyyy-MM-dd HH:mm:ss", + "yyyy-MM-dd hh:mm:ss.fffffffff", "EEE MMM dd HH:mm:ss zzz yyyy", + DateFormatUtils.ISO_DATETIME_FORMAT.getPattern(), + DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.getPattern(), + DateFormatUtils.SMTP_DATETIME_FORMAT.getPattern(),}; + + public static final Converter SQL_TIMESTAMP = new SqlTimestampConverter(null); + + /** + * The default value specified to our Constructor, if any. + */ + private final Object defaultValue; + + /** + * Should we return the default value on conversion errors? + */ + private final boolean useDefault; + + /** + * Create a {@link Converter} that will throw a {@link ConversionException} if a conversion error occurs. + */ + public SqlTimestampConverter() { + this.defaultValue = null; + this.useDefault = false; + } + + /** + * Create a {@link Converter} that will return the specified default value if a conversion error occurs. + * + * @param defaultValue The default value to be returned + */ + public SqlTimestampConverter(Object defaultValue) { + this.defaultValue = defaultValue; + this.useDefault = true; + } + + /** + * Convert the specified input object into an output object of the specified type. + * + * @param type Data type to which this value should be converted + * @param value The input value to be converted + * @throws ConversionException if conversion cannot be performed successfully + */ + public Object convert(Class type, Object value) { + if (value == null) { + if (useDefault) { + return (defaultValue); + } else { + throw new ConversionException("No value specified"); + } + } + + if (value instanceof java.sql.Date && java.sql.Date.class.equals(type)) { + return value; + } else if (value instanceof java.sql.Time && java.sql.Time.class.equals(type)) { + return value; + } else if (value instanceof Timestamp && Timestamp.class.equals(type)) { + return value; + } else { + try { + if (java.sql.Date.class.equals(type)) { + return new java.sql.Date(convertTimestamp2TimeMillis(value.toString())); + } else if (java.sql.Time.class.equals(type)) { + return new java.sql.Time(convertTimestamp2TimeMillis(value.toString())); + } else if (Timestamp.class.equals(type)) { + return new Timestamp(convertTimestamp2TimeMillis(value.toString())); + } else { + return new Timestamp(convertTimestamp2TimeMillis(value.toString())); + } + } catch (Exception e) { + throw new ConversionException("Value format invalid: " + e.getMessage(), e); + } + } + + } + + private Long convertTimestamp2TimeMillis(String input) { + if (input == null) { + return null; + } + + try { + // 先处理Timestamp类型 + return Timestamp.valueOf(input).getTime(); + } catch (Exception nfe) { + try { + try { + return parseDate(input, DATE_FORMATS, Locale.ENGLISH).getTime(); + } catch (Exception err) { + return parseDate(input, DATE_FORMATS, Locale.getDefault()).getTime(); + } + } catch (Exception err) { + // 最后处理long time的情况 + return Long.parseLong(input); + } + } + } + + private Date parseDate(String str, String[] parsePatterns, Locale locale) throws ParseException { + if ((str == null) || (parsePatterns == null)) { + throw new IllegalArgumentException("Date and Patterns must not be null"); + } + + SimpleDateFormat parser = null; + ParsePosition pos = new ParsePosition(0); + + for (int i = 0; i < parsePatterns.length; i++) { + if (i == 0) { + parser = new SimpleDateFormat(parsePatterns[0], locale); + } else { + parser.applyPattern(parsePatterns[i]); + } + pos.setIndex(0); + Date date = parser.parse(str, pos); + if ((date != null) && (pos.getIndex() == str.length())) { + return date; + } + } + + throw new ParseException("Unable to parse the date: " + str, -1); + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlUtils.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlUtils.java new file mode 100644 index 0000000000..930538ace4 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/SqlUtils.java @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal; + +import static org.apache.eventmesh.connector.canal.ByteArrayConverter.SQL_BYTES; +import static org.apache.eventmesh.connector.canal.SqlTimestampConverter.SQL_TIMESTAMP; + +import org.apache.commons.beanutils.ConvertUtilsBean; +import org.apache.commons.lang.StringUtils; + +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Types; +import java.util.HashMap; +import java.util.Map; + +public class SqlUtils { + + public static final String REQUIRED_FIELD_NULL_SUBSTITUTE = " "; + public static final String SQLDATE_FORMAT = "yyyy-MM-dd"; + public static final String TIMESTAMP_FORMAT = "yyyy-MM-dd HH:mm:ss"; + private static final Map> sqlTypeToJavaTypeMap = new HashMap>(); + private static final ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean(); + + static { + // regist Converter + convertUtilsBean.register(SQL_TIMESTAMP, java.sql.Date.class); + convertUtilsBean.register(SQL_TIMESTAMP, java.sql.Time.class); + convertUtilsBean.register(SQL_TIMESTAMP, java.sql.Timestamp.class); + convertUtilsBean.register(SQL_BYTES, byte[].class); + + // bool + sqlTypeToJavaTypeMap.put(Types.BOOLEAN, Boolean.class); + + // int + sqlTypeToJavaTypeMap.put(Types.TINYINT, Integer.class); + sqlTypeToJavaTypeMap.put(Types.SMALLINT, Integer.class); + sqlTypeToJavaTypeMap.put(Types.INTEGER, Integer.class); + + // long + sqlTypeToJavaTypeMap.put(Types.BIGINT, Long.class); + // mysql bit最多64位,无符号 + sqlTypeToJavaTypeMap.put(Types.BIT, BigInteger.class); + + // decimal + sqlTypeToJavaTypeMap.put(Types.REAL, Float.class); + sqlTypeToJavaTypeMap.put(Types.FLOAT, Float.class); + sqlTypeToJavaTypeMap.put(Types.DOUBLE, Double.class); + sqlTypeToJavaTypeMap.put(Types.NUMERIC, BigDecimal.class); + sqlTypeToJavaTypeMap.put(Types.DECIMAL, BigDecimal.class); + + // date + sqlTypeToJavaTypeMap.put(Types.DATE, java.sql.Date.class); + sqlTypeToJavaTypeMap.put(Types.TIME, java.sql.Time.class); + sqlTypeToJavaTypeMap.put(Types.TIMESTAMP, java.sql.Timestamp.class); + + // blob + sqlTypeToJavaTypeMap.put(Types.BLOB, byte[].class); + + // byte[] + sqlTypeToJavaTypeMap.put(Types.REF, byte[].class); + sqlTypeToJavaTypeMap.put(Types.OTHER, byte[].class); + sqlTypeToJavaTypeMap.put(Types.ARRAY, byte[].class); + sqlTypeToJavaTypeMap.put(Types.STRUCT, byte[].class); + sqlTypeToJavaTypeMap.put(Types.SQLXML, byte[].class); + sqlTypeToJavaTypeMap.put(Types.BINARY, byte[].class); + sqlTypeToJavaTypeMap.put(Types.DATALINK, byte[].class); + sqlTypeToJavaTypeMap.put(Types.DISTINCT, byte[].class); + sqlTypeToJavaTypeMap.put(Types.VARBINARY, byte[].class); + sqlTypeToJavaTypeMap.put(Types.JAVA_OBJECT, byte[].class); + sqlTypeToJavaTypeMap.put(Types.LONGVARBINARY, byte[].class); + + // String + sqlTypeToJavaTypeMap.put(Types.CHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.VARCHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.LONGVARCHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.LONGNVARCHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.NCHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.NVARCHAR, String.class); + sqlTypeToJavaTypeMap.put(Types.NCLOB, String.class); + sqlTypeToJavaTypeMap.put(Types.CLOB, String.class); + } + + /** + * 将指定java.sql.Types的ResultSet value转换成相应的String + * + * @param rs + * @param index + * @param sqlType + * @return + * @throws SQLException + */ + public static String sqlValueToString(ResultSet rs, int index, int sqlType) throws SQLException { + Class requiredType = sqlTypeToJavaTypeMap.get(sqlType); + if (requiredType == null) { + throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType); + } + + return getResultSetValue(rs, index, requiredType); + } + + /** + * sqlValueToString方法的逆向过程 + * + * @param value + * @param sqlType + * @param isRequired + * @param isEmptyStringNulled + * @return + */ + public static Object stringToSqlValue(String value, int sqlType, boolean isRequired, boolean isEmptyStringNulled) { + // 设置变量 + if (SqlUtils.isTextType(sqlType)) { + if ((value == null) || (StringUtils.isEmpty(value) && isEmptyStringNulled)) { + return isRequired ? REQUIRED_FIELD_NULL_SUBSTITUTE : null; + } else { + return value; + } + } else { + if (StringUtils.isEmpty(value)) { + return isEmptyStringNulled ? null : value;// oracle的返回null,保持兼容 + } else { + Class requiredType = sqlTypeToJavaTypeMap.get(sqlType); + if (requiredType == null) { + throw new IllegalArgumentException("unknow java.sql.Types - " + sqlType); + } else if (requiredType.equals(String.class)) { + return value; + } else if (isNumeric(sqlType)) { + return convertUtilsBean.convert(value.trim(), requiredType); + } else { + return convertUtilsBean.convert(value, requiredType); + } + } + } + } + + public static String encoding(String source, int sqlType, String sourceEncoding, String targetEncoding) { + switch (sqlType) { + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + case Types.CLOB: + case Types.NCLOB: + if (!StringUtils.isEmpty(source)) { + String fromEncoding = StringUtils.isBlank(sourceEncoding) ? "UTF-8" : sourceEncoding; + String toEncoding = StringUtils.isBlank(targetEncoding) ? "UTF-8" : targetEncoding; + + // if (false == StringUtils.equalsIgnoreCase(fromEncoding, + // toEncoding)) { + try { + return new String(source.getBytes(fromEncoding), toEncoding); + } catch (UnsupportedEncodingException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + // } + } + } + + return source; + } + + /** + * Retrieve a JDBC column value from a ResultSet, using the specified value type. + *

+ * Uses the specifically typed ResultSet accessor methods, falling back to {@link #getResultSetValue(ResultSet, int)} for unknown types. + *

+ * Note that the returned value may not be assignable to the specified required type, in case of an unknown type. Calling code needs to deal with + * this case appropriately, e.g. throwing a corresponding exception. + * + * @param rs is the ResultSet holding the data + * @param index is the column index + * @param requiredType the required value type (may be null) + * @return the value object + * @throws SQLException if thrown by the JDBC API + */ + private static String getResultSetValue(ResultSet rs, int index, Class requiredType) throws SQLException { + if (requiredType == null) { + return getResultSetValue(rs, index); + } + + Object value = null; + boolean wasNullCheck = false; + + // Explicitly extract typed value, as far as possible. + if (String.class.equals(requiredType)) { + value = rs.getString(index); + } else if (boolean.class.equals(requiredType) || Boolean.class.equals(requiredType)) { + value = rs.getBoolean(index); + wasNullCheck = true; + } else if (byte.class.equals(requiredType) || Byte.class.equals(requiredType)) { + value = rs.getByte(index); + wasNullCheck = true; + } else if (short.class.equals(requiredType) || Short.class.equals(requiredType)) { + value = rs.getShort(index); + wasNullCheck = true; + } else if (int.class.equals(requiredType) || Integer.class.equals(requiredType)) { + value = rs.getLong(index); + wasNullCheck = true; + } else if (long.class.equals(requiredType) || Long.class.equals(requiredType)) { + value = rs.getBigDecimal(index); + wasNullCheck = true; + } else if (float.class.equals(requiredType) || Float.class.equals(requiredType)) { + value = rs.getFloat(index); + wasNullCheck = true; + } else if (double.class.equals(requiredType) || Double.class.equals(requiredType) + || Number.class.equals(requiredType)) { + value = rs.getDouble(index); + wasNullCheck = true; + } else if (java.sql.Time.class.equals(requiredType)) { + // try { + // value = rs.getTime(index); + // } catch (SQLException e) { + value = rs.getString(index);// 尝试拿为string对象,0000无法用Time表示 + // if (value == null && !rs.wasNull()) { + // value = "00:00:00"; // + // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null + // } + // } + } else if (java.sql.Timestamp.class.equals(requiredType) || java.sql.Date.class.equals(requiredType)) { + // try { + // value = convertTimestamp(rs.getTimestamp(index)); + // } catch (SQLException e) { + // 尝试拿为string对象,0000-00-00 00:00:00无法用Timestamp 表示 + value = rs.getString(index); + // if (value == null && !rs.wasNull()) { + // value = "0000:00:00 00:00:00"; // + // mysql设置了zeroDateTimeBehavior=convertToNull,出现0值时返回为null + // } + // } + } else if (BigDecimal.class.equals(requiredType)) { + value = rs.getBigDecimal(index); + } else if (BigInteger.class.equals(requiredType)) { + value = rs.getBigDecimal(index); + } else if (Blob.class.equals(requiredType)) { + value = rs.getBlob(index); + } else if (Clob.class.equals(requiredType)) { + value = rs.getClob(index); + } else if (byte[].class.equals(requiredType)) { + byte[] bytes = rs.getBytes(index); + if (bytes != null) { + value = new String(bytes, StandardCharsets.ISO_8859_1);// 将binary转化为iso-8859-1的字符串 + } + } else { + // Some unknown type desired -> rely on getObject. + value = getResultSetValue(rs, index); + } + + // Perform was-null check if demanded (for results that the + // JDBC driver returns as primitives). + if (wasNullCheck && (value != null) && rs.wasNull()) { + value = null; + } + + return (value == null) ? null : convertUtilsBean.convert(value); + } + + /** + * Retrieve a JDBC column value from a ResultSet, using the most appropriate value type. The returned value should be a detached value object, not + * having any ties to the active ResultSet: in particular, it should not be a Blob or Clob object but rather a byte array respectively String + * representation. + *

+ * Uses the getObject(index) method, but includes additional "hacks" to get around Oracle 10g returning a non-standard object for its + * TIMESTAMP datatype and a java.sql.Date for DATE columns leaving out the time portion: These columns will explicitly be extracted + * as standard java.sql.Timestamp object. + * + * @param rs is the ResultSet holding the data + * @param index is the column index + * @return the value object + * @throws SQLException if thrown by the JDBC API + * @see Blob + * @see Clob + * @see java.sql.Timestamp + */ + private static String getResultSetValue(ResultSet rs, int index) throws SQLException { + Object obj = rs.getObject(index); + return (obj == null) ? null : convertUtilsBean.convert(obj); + } + + // private static Object convertTimestamp(Timestamp timestamp) { + // return (timestamp == null) ? null : timestamp.getTime(); + // } + + /** + * Check whether the given SQL type is numeric. + */ + public static boolean isNumeric(int sqlType) { + return (Types.BIT == sqlType) || (Types.BIGINT == sqlType) || (Types.DECIMAL == sqlType) + || (Types.DOUBLE == sqlType) || (Types.FLOAT == sqlType) || (Types.INTEGER == sqlType) + || (Types.NUMERIC == sqlType) || (Types.REAL == sqlType) || (Types.SMALLINT == sqlType) + || (Types.TINYINT == sqlType); + } + + public static boolean isTextType(int sqlType) { + return sqlType == Types.CHAR || sqlType == Types.VARCHAR || sqlType == Types.CLOB || sqlType == Types.LONGVARCHAR + || sqlType == Types.NCHAR || sqlType == Types.NVARCHAR || sqlType == Types.NCLOB + || sqlType == Types.LONGNVARCHAR; + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/config/CanalServerConfig.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/config/CanalServerConfig.java new file mode 100644 index 0000000000..b28982f7b6 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/config/CanalServerConfig.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.config; + +import org.apache.eventmesh.common.config.connector.Config; + +import lombok.Data; +import lombok.EqualsAndHashCode; + +@Data +@EqualsAndHashCode(callSuper = true) +public class CanalServerConfig extends Config { + + private boolean sourceEnable; + + private boolean sinkEnable; + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/AbstractDbDialect.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/AbstractDbDialect.java new file mode 100644 index 0000000000..8b11da0aa9 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/AbstractDbDialect.java @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.dialect; + +import org.apache.eventmesh.connector.canal.template.SqlTemplate; + +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.datasource.DataSourceTransactionManager; +import org.springframework.jdbc.support.lob.LobHandler; +import org.springframework.transaction.TransactionDefinition; +import org.springframework.transaction.support.TransactionTemplate; + +import lombok.extern.slf4j.Slf4j; + + +@Slf4j +public abstract class AbstractDbDialect implements DbDialect { + + protected int databaseMajorVersion; + protected int databaseMinorVersion; + protected String databaseName; + protected SqlTemplate sqlTemplate; + protected JdbcTemplate jdbcTemplate; + protected TransactionTemplate transactionTemplate; + protected LobHandler lobHandler; +// protected Map, Table> tables; + + public AbstractDbDialect(final JdbcTemplate jdbcTemplate, LobHandler lobHandler) { + this.jdbcTemplate = jdbcTemplate; + this.lobHandler = lobHandler; + // 初始化transction + this.transactionTemplate = new TransactionTemplate(); + transactionTemplate.setTransactionManager(new DataSourceTransactionManager(jdbcTemplate.getDataSource())); + transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW); + + // 初始化一些数据 +// jdbcTemplate.execute(new ConnectionCallback() { +// +// public Object doInConnection(Connection c) throws SQLException, DataAccessException { +// DatabaseMetaData meta = c.getMetaData(); +// databaseName = meta.getDatabaseProductName(); +// databaseMajorVersion = meta.getDatabaseMajorVersion(); +// databaseMinorVersion = meta.getDatabaseMinorVersion(); +// +// return null; +// } +// }); +// +// initTables(jdbcTemplate); + } + + public AbstractDbDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler, String name, int majorVersion, + int minorVersion) { + this.jdbcTemplate = jdbcTemplate; + this.lobHandler = lobHandler; + // 初始化transction + this.transactionTemplate = new TransactionTemplate(); + transactionTemplate.setTransactionManager(new DataSourceTransactionManager(jdbcTemplate.getDataSource())); + transactionTemplate.setPropagationBehavior(TransactionDefinition.PROPAGATION_REQUIRES_NEW); + + this.databaseName = name; + this.databaseMajorVersion = majorVersion; + this.databaseMinorVersion = minorVersion; + +// initTables(jdbcTemplate); + } + +// public Table findTable(String schema, String table, boolean useCache) { +// List key = Arrays.asList(schema, table); +// if (useCache == false) { +// tables.remove(key); +// } +// +// return tables.get(key); +// } +// +// public Table findTable(String schema, String table) { +// return findTable(schema, table, true); +// } + +// public void reloadTable(String schema, String table) { +// if (StringUtils.isNotEmpty(table)) { +// tables.remove(Arrays.asList(schema, table)); +// } else { +// // 如果没有存在表名,则直接清空所有的table,重新加载 +// tables.clear(); +// } +// } + + public String getName() { + return databaseName; + } + + public int getMajorVersion() { + return databaseMajorVersion; + } + + @Override + public int getMinorVersion() { + return databaseMinorVersion; + } + + public String getVersion() { + return databaseMajorVersion + "." + databaseMinorVersion; + } + + public LobHandler getLobHandler() { + return lobHandler; + } + + public JdbcTemplate getJdbcTemplate() { + return jdbcTemplate; + } + + public TransactionTemplate getTransactionTemplate() { + return transactionTemplate; + } + + public SqlTemplate getSqlTemplate() { + return sqlTemplate; + } + + public boolean isDRDS() { + return false; + } + + public String getShardColumns(String schema, String table) { + return null; + } + + public void destory() { + } + + // ================================ helper method ========================== + +// private void initTables(final JdbcTemplate jdbcTemplate) { +// this.tables = new ConcurrentMap<>((Function, Table>) names -> { +// Assert.isTrue(names.size() == 2); +// try { +// beforeFindTable(jdbcTemplate, names.get(0), names.get(0), names.get(1)); +//// DdlUtilsFilter filter = getDdlUtilsFilter(jdbcTemplate, names.get(0), names.get(0), names.get(1)); +// Table table = DdlUtils.findTable(jdbcTemplate, names.get(0), names.get(0), names.get(1)); +// afterFindTable(table, jdbcTemplate, names.get(0), names.get(0), names.get(1)); +// if (table == null) { +// throw new NestableRuntimeException("no found table [" + names.get(0) + "." + names.get(1) +// + "] , pls check"); +// } else { +// return table; +// } +// } catch (Exception e) { +// throw new NestableRuntimeException("find table [" + names.get(0) + "." + names.get(1) + "] error", +// e); +// } +// }); +// } + +// protected DdlUtilsFilter getDdlUtilsFilter(JdbcTemplate jdbcTemplate, String catalogName, String schemaName, +// String tableName) { +// // we need to return null for backward compatibility +// return null; +// } + +// protected void beforeFindTable(JdbcTemplate jdbcTemplate, String catalogName, String schemaName, String tableName) { +// // for subclass to extend +// } +// +// protected void afterFindTable(Table table, JdbcTemplate jdbcTemplate, String catalogName, String schemaName, +// String tableName) { +// // for subclass to extend +// } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/DbDialect.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/DbDialect.java new file mode 100644 index 0000000000..88b896e018 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/DbDialect.java @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.dialect; + +import org.apache.eventmesh.connector.canal.template.SqlTemplate; + +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.support.lob.LobHandler; +import org.springframework.transaction.support.TransactionTemplate; + +/** + * 数据库方言定义接口 + */ +public interface DbDialect { + + public String getName(); + + public String getVersion(); + + public int getMajorVersion(); + + public int getMinorVersion(); + + public String getDefaultSchema(); + + public String getDefaultCatalog(); + + public boolean isCharSpacePadded(); + + public boolean isCharSpaceTrimmed(); + + public boolean isEmptyStringNulled(); + + public boolean isSupportMergeSql(); + + public boolean isDRDS(); + + public LobHandler getLobHandler(); + + public JdbcTemplate getJdbcTemplate(); + + public TransactionTemplate getTransactionTemplate(); + + public SqlTemplate getSqlTemplate(); + + public String getShardColumns(String schema, String table); + + public void destory(); +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/MysqlDialect.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/MysqlDialect.java new file mode 100644 index 0000000000..438cb9102f --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/dialect/MysqlDialect.java @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.dialect; + +import org.apache.eventmesh.connector.canal.template.MysqlSqlTemplate; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.support.lob.LobHandler; + + +/** + * 基于mysql的一些特殊处理定义 + * + * @author jianghang 2011-10-27 下午01:46:57 + * @version 4.0.0 + */ +public class MysqlDialect extends AbstractDbDialect { + + private Map, String> shardColumns; + + public MysqlDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler) { + super(jdbcTemplate, lobHandler); + sqlTemplate = new MysqlSqlTemplate(); + } + + public MysqlDialect(JdbcTemplate jdbcTemplate, LobHandler lobHandler, String name, String databaseVersion, + int majorVersion, int minorVersion) { + super(jdbcTemplate, lobHandler, name, majorVersion, minorVersion); + sqlTemplate = new MysqlSqlTemplate(); + } + + public boolean isCharSpacePadded() { + return false; + } + + public boolean isCharSpaceTrimmed() { + return true; + } + + public boolean isEmptyStringNulled() { + return false; + } + + public boolean isSupportMergeSql() { + return true; + } + + public String getDefaultSchema() { + return null; + } + + public boolean isDRDS() { + return false; + } + + public String getShardColumns(String schema, String table) { + if (isDRDS()) { + return shardColumns.get(Arrays.asList(schema, table)); + } else { + return null; + } + } + + public String getDefaultCatalog() { + return (String) jdbcTemplate.queryForObject("select database()", String.class); + } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java new file mode 100644 index 0000000000..ddcaef3886 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/interceptor/SqlBuilderLoadInterceptor.java @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.interceptor; + +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig; +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.template.SqlTemplate; +import org.apache.eventmesh.connector.canal.dialect.DbDialect; +import org.apache.eventmesh.connector.canal.model.EventColumn; +import org.apache.eventmesh.connector.canal.model.EventType; + +import java.util.List; + +import org.springframework.util.CollectionUtils; + +/** + * 计算下最新的sql语句 + */ +public class SqlBuilderLoadInterceptor { + + private DbDialect dbDialect; + + public boolean before(CanalSinkConfig sinkConfig, CanalConnectRecord record) { + // 初步构建sql + SqlTemplate sqlTemplate = dbDialect.getSqlTemplate(); + EventType type = record.getEventType(); + String sql = null; + + String schemaName = (record.isWithoutSchema() ? null : record.getSchemaName()); + + /** + * 针对DRDS数据库 + */ + String shardColumns = null; + + // 注意insert/update语句对应的字段数序都是将主键排在后面 + if (type.isInsert()) { + if (CollectionUtils.isEmpty(record.getColumns()) + && (dbDialect.isDRDS())) { // 如果表为全主键,直接进行insert + // sql + sql = sqlTemplate.getInsertSql(schemaName, + record.getTableName(), + buildColumnNames(record.getKeys()), + buildColumnNames(record.getColumns())); + } else { + sql = sqlTemplate.getMergeSql(schemaName, + record.getTableName(), + buildColumnNames(record.getKeys()), + buildColumnNames(record.getColumns()), + new String[] {}, + !dbDialect.isDRDS(), + shardColumns); + } + } else if (type.isUpdate()) { + + boolean existOldKeys = !CollectionUtils.isEmpty(record.getOldKeys()); + boolean rowMode = sinkConfig.getSyncMode().isRow(); + String[] keyColumns = null; + String[] otherColumns = null; + if (existOldKeys) { + // 需要考虑主键变更的场景 + // 构造sql如下:update table xxx set pk = newPK where pk = oldPk + keyColumns = buildColumnNames(record.getOldKeys()); + // 这里需要精确获取变更的主键,因为目标为DRDS时主键会包含拆分键,正常的原主键变更只更新对应的单主键列即可 + if (dbDialect.isDRDS()) { + otherColumns = buildColumnNames(record.getUpdatedColumns(), record.getUpdatedKeys()); + } else { + otherColumns = buildColumnNames(record.getUpdatedColumns(), record.getKeys()); + } + } else { + keyColumns = buildColumnNames(record.getKeys()); + otherColumns = buildColumnNames(record.getUpdatedColumns()); + } + + if (rowMode && !existOldKeys) {// 如果是行记录,并且不存在主键变更,考虑merge sql + sql = sqlTemplate.getMergeSql(schemaName, + record.getTableName(), + keyColumns, + otherColumns, + new String[] {}, + !dbDialect.isDRDS(), + shardColumns); + } else {// 否则进行update sql + sql = sqlTemplate.getUpdateSql(schemaName, record.getTableName(), keyColumns, otherColumns, !dbDialect.isDRDS(), shardColumns); + } + } else if (type.isDelete()) { + sql = sqlTemplate.getDeleteSql(schemaName, + record.getTableName(), + buildColumnNames(record.getKeys())); + } + + // 处理下hint sql + if (record.getHint() != null) { + record.setSql(record.getHint() + sql); + } else { + record.setSql(sql); + } + return false; + } + + private String[] buildColumnNames(List columns) { + String[] result = new String[columns.size()]; + for (int i = 0; i < columns.size(); i++) { + EventColumn column = columns.get(i); + result[i] = column.getColumnName(); + } + return result; + } + + private String[] buildColumnNames(List columns1, List columns2) { + String[] result = new String[columns1.size() + columns2.size()]; + int i = 0; + for (i = 0; i < columns1.size(); i++) { + EventColumn column = columns1.get(i); + result[i] = column.getColumnName(); + } + + for (; i < columns1.size() + columns2.size(); i++) { + EventColumn column = columns2.get(i - columns1.size()); + result[i] = column.getColumnName(); + } + return result; + } + + public DbDialect getDbDialect() { + return dbDialect; + } + + public void setDbDialect(DbDialect dbDialect) { + this.dbDialect = dbDialect; + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumn.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumn.java new file mode 100644 index 0000000000..0b15c25aca --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumn.java @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.model; + +import org.apache.commons.lang.builder.ToStringBuilder; + +import java.io.Serializable; + +public class EventColumn implements Serializable { + + private int index; + + private int columnType; + + private String columnName; + + /** + * timestamp,Datetime是一个long型的数字. + */ + private String columnValue; + + private boolean isNull; + + private boolean isKey; + + /** + * 2012.08.09 add by ljh , 新加字段,用于表明是否为真实变更字段,只针对非主键字段有效
因为FileResolver/EventProcessor会需要所有字段数据做分析,但又想保留按需字段同步模式 + * + *

+     * 可以简单理解isUpdate代表是否需要在目标库执行数据变更,针对update有效,默认insert/delete为true
+     * 1. row模式,所有字段均为updated
+     * 2. field模式,通过db反查得到的结果,均为updated
+     * 3. 其余场景,根据判断是否变更过,设置updated数据
+     * 
+ */ + private boolean isUpdate = true; + + public int getColumnType() { + return columnType; + } + + public void setColumnType(int columnType) { + this.columnType = columnType; + } + + public String getColumnName() { + return columnName; + } + + public void setColumnName(String columnName) { + this.columnName = columnName; + } + + public String getColumnValue() { + if (isNull) { + // 如果为null值,强制设置为null, canal主要是走protobuf协议,String值默认为空字符,无法标示为null对象 + columnValue = null; + return null; + } else { + return columnValue; + } + } + + public void setColumnValue(String columnValue) { + this.columnValue = columnValue; + } + + public boolean isNull() { + return isNull; + } + + public void setNull(boolean isNull) { + this.isNull = isNull; + } + + public boolean isKey() { + return isKey; + } + + public void setKey(boolean isKey) { + this.isKey = isKey; + } + + public int getIndex() { + return index; + } + + public void setIndex(int index) { + this.index = index; + } + + public boolean isUpdate() { + return isUpdate; + } + + public void setUpdate(boolean isUpdate) { + this.isUpdate = isUpdate; + } + + public EventColumn clone() { + EventColumn column = new EventColumn(); + column.setIndex(index); + column.setColumnName(columnName); + column.setColumnType(columnType); + column.setColumnValue(columnValue); + column.setKey(isKey); + column.setNull(isNull); + column.setUpdate(isUpdate); + return column; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((columnName == null) ? 0 : columnName.hashCode()); + result = prime * result + columnType; + result = prime * result + ((columnValue == null) ? 0 : columnValue.hashCode()); + result = prime * result + index; + result = prime * result + (isKey ? 1231 : 1237); + result = prime * result + (isNull ? 1231 : 1237); + result = prime * result + (isUpdate ? 1231 : 1237); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + EventColumn other = (EventColumn) obj; + if (columnName == null) { + if (other.columnName != null) { + return false; + } + } else if (!columnName.equals(other.columnName)) { + return false; + } + if (columnType != other.columnType) { + return false; + } + if (columnValue == null) { + if (other.columnValue != null) { + return false; + } + } else if (!columnValue.equals(other.columnValue)) { + return false; + } + if (index != other.index) { + return false; + } + if (isKey != other.isKey) { + return false; + } + if (isNull != other.isNull) { + return false; + } + if (isUpdate != other.isUpdate) { + return false; + } + return true; + } + + @Override + public String toString() { + return "EventColumn{" + + "index=" + index + + ", columnType=" + columnType + + ", columnName='" + columnName + '\'' + + ", columnValue='" + columnValue + '\'' + + ", isNull=" + isNull + + ", isKey=" + isKey + + ", isUpdate=" + isUpdate + + '}'; + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumnIndexComparable.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumnIndexComparable.java new file mode 100644 index 0000000000..11b8515a1d --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventColumnIndexComparable.java @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.model; + +import java.util.Comparator; + +/** + * 按照EventColumn的index进行排序. + * + */ +public class EventColumnIndexComparable implements Comparator { + + public int compare(EventColumn o1, EventColumn o2) { + return Integer.compare(o1.getIndex(), o2.getIndex()); + } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventType.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventType.java new file mode 100644 index 0000000000..63c627d3d2 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/model/EventType.java @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.model; + +/** + * chang the eventtype num to I/U/D/C/A/E. + * + */ +public enum EventType { + + /** + * Insert row. + */ + INSERT("I"), + + /** + * Update row. + */ + UPDATE("U"), + + /** + * Delete row. + */ + DELETE("D"), + + /** + * Create table. + */ + CREATE("C"), + + /** + * Alter table. + */ + ALTER("A"), + + /** + * Erase table. + */ + ERASE("E"), + + /** + * Query. + */ + QUERY("Q"), + + /** + * Truncate. + */ + TRUNCATE("T"), + + /** + * rename. + */ + RENAME("R"), + + /** + * create index. + */ + CINDEX("CI"), + + /** + * drop index. + */ + DINDEX("DI"); + + private String value; + + private EventType(String value){ + this.value = value; + } + + public boolean isInsert() { + return this.equals(EventType.INSERT); + } + + public boolean isUpdate() { + return this.equals(EventType.UPDATE); + } + + public boolean isDelete() { + return this.equals(EventType.DELETE); + } + + public boolean isCreate() { + return this.equals(EventType.CREATE); + } + + public boolean isAlter() { + return this.equals(EventType.ALTER); + } + + public boolean isErase() { + return this.equals(EventType.ERASE); + } + + public boolean isQuery() { + return this.equals(EventType.QUERY); + } + + public boolean isTruncate() { + return this.equals(EventType.TRUNCATE); + } + + public boolean isRename() { + return this.equals(EventType.RENAME); + } + + public boolean isCindex() { + return this.equals(EventType.CINDEX); + } + + public boolean isDindex() { + return this.equals(EventType.DINDEX); + } + + public boolean isDdl() { + return isCreate() || isAlter() || isErase() || isTruncate() || isRename() || isCindex() || isDindex(); + } + + public boolean isDml() { + return isInsert() || isUpdate() || isDelete(); + } + + public static EventType valuesOf(String value) { + EventType[] eventTypes = values(); + for (EventType eventType : eventTypes) { + if (eventType.value.equalsIgnoreCase(value)) { + return eventType; + } + } + return null; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/server/CanalConnectServer.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/server/CanalConnectServer.java new file mode 100644 index 0000000000..6cc3d013dd --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/server/CanalConnectServer.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.server; + +import org.apache.eventmesh.connector.canal.config.CanalServerConfig; +import org.apache.eventmesh.connector.canal.sink.connector.CanalSinkConnector; +import org.apache.eventmesh.connector.canal.source.connector.CanalSourceConnector; +import org.apache.eventmesh.openconnect.Application; +import org.apache.eventmesh.openconnect.util.ConfigUtil; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CanalConnectServer { + + public static void main(String[] args) throws Exception { + + CanalServerConfig serverConfig = ConfigUtil.parse(CanalServerConfig.class, "server-config.yml"); + + if (serverConfig.isSourceEnable()) { + Application canalSourceApp = new Application(); + canalSourceApp.run(CanalSourceConnector.class); + } + + if (serverConfig.isSinkEnable()) { + Application canalSinkApp = new Application(); + canalSinkApp.run(CanalSinkConnector.class); + } + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadContext.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadContext.java new file mode 100644 index 0000000000..82d4837eef --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadContext.java @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.sink; + +import org.apache.eventmesh.connector.canal.CanalConnectRecord; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +import lombok.Data; + +/** + * 数据库处理上下文 + */ +@Data +public class DbLoadContext { + + private List lastProcessedRecords; // 上一轮的已录入的记录,可能会有多次失败需要合并多次已录入的数据 + + private List prepareRecords; // 准备处理的数据 + + private List processedRecords; // 已处理完成的数据 + + private List failedRecords; + + public DbLoadContext() { + lastProcessedRecords = Collections.synchronizedList(new LinkedList<>()); + prepareRecords = Collections.synchronizedList(new LinkedList<>()); + processedRecords = Collections.synchronizedList(new LinkedList<>()); + failedRecords = Collections.synchronizedList(new LinkedList<>()); + } + + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadData.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadData.java new file mode 100644 index 0000000000..5f73920576 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadData.java @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.sink; + +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.model.EventType; + +import java.util.ArrayList; +import java.util.List; + +/** + * 将数据归类,按表和insert/update/delete类型进行分类 + * + *
+ * 归类用途:对insert语句进行batch优化
+ * 1. mysql索引的限制,需要避免insert并发执行
+ * 
+ */ +public class DbLoadData { + + private List tables = new ArrayList(); + + public DbLoadData() { + // nothing + } + + public DbLoadData(List records) { + for (CanalConnectRecord record : records) { + merge(record); + } + } + + public void merge(CanalConnectRecord record) { + TableLoadData tableData = findTableData(record); + + EventType type = record.getEventType(); + if (type.isInsert()) { + tableData.getInsertDatas().add(record); + } else if (type.isUpdate()) { + tableData.getUpdateDatas().add(record); + } else if (type.isDelete()) { + tableData.getDeleteDatas().add(record); + } + } + + public List getTables() { + return tables; + } + + private synchronized TableLoadData findTableData(CanalConnectRecord record) { + for (TableLoadData table : tables) { + if (table.getSchemaName().equals(record.getSchemaName()) && + table.getTableName().equals(record.getTableName())) { + return table; + } + } + + TableLoadData data = new TableLoadData(record.getSchemaName(), record.getTableName()); + tables.add(data); + return data; + } + + /** + * 按table进行分类 + */ + public static class TableLoadData { + + private String schemaName; + + private String tableName; + private List insertDatas = new ArrayList<>(); + private List upadateDatas = new ArrayList<>(); + private List deleteDatas = new ArrayList<>(); + + public TableLoadData(String schemaName, String tableName) { + this.schemaName = schemaName; + this.tableName = tableName; + } + + public List getInsertDatas() { + return insertDatas; + } + + public void setInsertDatas(List insertDatas) { + this.insertDatas = insertDatas; + } + + public List getUpdateDatas() { + return upadateDatas; + } + + public void setUpdateDatas(List upadateDatas) { + this.upadateDatas = upadateDatas; + } + + public List getDeleteDatas() { + return deleteDatas; + } + + public void setDeleteDatas(List deleteDatas) { + this.deleteDatas = deleteDatas; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadMerger.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadMerger.java new file mode 100644 index 0000000000..de532abcd0 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/DbLoadMerger.java @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.sink; + +import javafx.fxml.LoadException; + +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.model.EventColumn; +import org.apache.eventmesh.connector.canal.model.EventColumnIndexComparable; +import org.apache.eventmesh.connector.canal.model.EventType; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.util.CollectionUtils; + +import lombok.extern.slf4j.Slf4j; + + +/** + *
+ * 合并相同schema-table的变更记录.
+ * pk相同的多条变更数据合并后的结果是:
+ * 1, I
+ * 2, U
+ * 3, D
+ * 如果有一条I,多条U,merge成I;
+ * 如果有多条U,取最晚的那条;
+ * 
+ */ +@Slf4j +public class DbLoadMerger { + + /** + * 将一批数据进行根据table+主键信息进行合并,保证一个表的一个pk记录只有一条结果 + * + * @param eventDatas + * @return + */ + public static List merge(List eventDatas) { + Map result = new LinkedHashMap(); + for (CanalConnectRecord eventData : eventDatas) { + merge(eventData, result); + } + return new LinkedList<>(result.values()); + } + + public static void merge(CanalConnectRecord record, Map result) { + EventType eventType = record.getEventType(); + switch (eventType) { + case INSERT: + mergeInsert(record, result); + break; + case UPDATE: + mergeUpdate(record, result); + break; + case DELETE: + mergeDelete(record, result); + break; + default: + break; + } + } + + private static void mergeInsert(CanalConnectRecord record, Map result) { + // insert无主键变更的处理 + RowKey rowKey = new RowKey(record.getSchemaName(), record.getTableName(), + record.getKeys()); + if (!result.containsKey(rowKey)) { + result.put(rowKey, record); + } else { + CanalConnectRecord oldRecord = result.get(rowKey); + record.setSize(oldRecord.getSize() + record.getSize()); + // 如果上一条变更是delete的,就直接用insert替换 + if (oldRecord.getEventType() == EventType.DELETE) { + result.put(rowKey, record); + } else if (record.getEventType() == EventType.UPDATE + || record.getEventType() == EventType.INSERT) { + // insert之前出现了update逻辑上不可能,唯一的可能性主要是Freedom的介入,人为的插入了一条Insert记录 + // 不过freedom一般不建议Insert操作,只建议执行update/delete操作. update默认会走merge + // sql,不存在即插入 + log.warn("update-insert/insert-insert happend. before[{}] , after[{}]", oldRecord, record); + // 如果上一条变更是update的,就用insert替换,并且把上一条存在而这一条不存在的字段值拷贝到这一条中 + CanalConnectRecord mergeEventData = replaceColumnValue(record, oldRecord); + mergeEventData.getOldKeys().clear();// 清空oldkeys,insert记录不需要 + result.put(rowKey, mergeEventData); + } + } + } + + private static void mergeUpdate(CanalConnectRecord record, Map result) { + RowKey rowKey = new RowKey(record.getSchemaName(), record.getTableName(), record.getKeys()); + if (!CollectionUtils.isEmpty(record.getOldKeys())) {// 存在主键变更 + // 需要解决(1->2 , 2->3)级联主键变更的问题 + RowKey oldKey = new RowKey(record.getSchemaName(), record.getTableName(), + record.getOldKeys()); + if (!result.containsKey(oldKey)) {// 不需要级联 + result.put(rowKey, record); + } else { + CanalConnectRecord oldRecord = result.get(oldKey); + record.setSize(oldRecord.getSize() + record.getSize()); + // 如果上一条变更是insert的,就把这一条的eventType改成insert,并且把上一条存在而这一条不存在的字段值拷贝到这一条中 + if (oldRecord.getEventType() == EventType.INSERT) { + record.setEventType(EventType.INSERT); + // 删除当前变更数据老主键的记录. + result.remove(oldKey); + + CanalConnectRecord mergeEventData = replaceColumnValue(record, oldRecord); + mergeEventData.getOldKeys().clear();// 清空oldkeys,insert记录不需要 + result.put(rowKey, mergeEventData); + } else if (oldRecord.getEventType() == EventType.UPDATE) { + // 删除当前变更数据老主键的记录. + result.remove(oldKey); + + // 如果上一条变更是update的,把上一条存在而这一条不存在的数据拷贝到这一条中 + CanalConnectRecord mergeEventData = replaceColumnValue(record, oldRecord); + result.put(rowKey, mergeEventData); + } else { + throw new RuntimeException("delete(has old pks) + update impossible happed!"); + } + } + } else { + if (!result.containsKey(rowKey)) {// 没有主键变更 + result.put(rowKey, record); + } else { + CanalConnectRecord oldRecord = result.get(rowKey); + // 如果上一条变更是insert的,就把这一条的eventType改成insert,并且把上一条存在而这一条不存在的字段值拷贝到这一条中 + if (oldRecord.getEventType() == EventType.INSERT) { + oldRecord.setEventType(EventType.INSERT); + + CanalConnectRecord mergeEventData = replaceColumnValue(record, oldRecord); + result.put(rowKey, mergeEventData); + } else if (oldRecord.getEventType() == EventType.UPDATE) {// 可能存在 + // 1->2 + // , + // 2update的问题 + + // 如果上一条变更是update的,把上一条存在而这一条不存在的数据拷贝到这一条中 + CanalConnectRecord mergeEventData = replaceColumnValue(record, oldRecord); + result.put(rowKey, mergeEventData); + } else if (oldRecord.getEventType() == EventType.DELETE) { + //异常情况,出现 delete + update,那就直接更新为update + result.put(rowKey, record); + } + } + } + } + + private static void mergeDelete(CanalConnectRecord record, Map result) { + // 只保留pks,把columns去掉. 以后针对数据仓库可以开放delete columns记录 + RowKey rowKey = new RowKey(record.getSchemaName(), record.getTableName(), + record.getKeys()); + if (!result.containsKey(rowKey)) { + result.put(rowKey, record); + } else { + CanalConnectRecord oldRecord = result.get(rowKey); + record.setSize(oldRecord.getSize() + record.getSize()); + if (!CollectionUtils.isEmpty(oldRecord.getOldKeys())) {// 存在主键变更 + // insert/update -> delete记录组合时,delete的对应的pk为上一条记录的pk + record.setKeys(oldRecord.getOldKeys()); + record.getOldKeys().clear();// 清除oldKeys + + result.remove(rowKey);// 删除老的对象 + result.put(new RowKey(record.getSchemaName(), record.getTableName(), + record.getKeys()), record); // key发生变化,需要重新构造一个RowKey + } else { + record.getOldKeys().clear();// 清除oldKeys + result.put(rowKey, record); + } + + } + } + + /** + * 把old中的值存在而new中不存在的值合并到new中,并且把old中的变更前的主键保存到new中的变更前的主键. + * + * @param newRecord + * @param oldRecord + * @return + */ + private static CanalConnectRecord replaceColumnValue(CanalConnectRecord newRecord, CanalConnectRecord oldRecord) { + List newColumns = newRecord.getColumns(); + List oldColumns = oldRecord.getColumns(); + List temp = new ArrayList<>(); + for (EventColumn oldColumn : oldColumns) { + boolean contain = false; + for (EventColumn newColumn : newColumns) { + if (oldColumn.getColumnName().equalsIgnoreCase(newColumn.getColumnName())) { + newColumn.setUpdate(newColumn.isUpdate() || oldColumn.isUpdate());// 合并isUpdate字段 + contain = true; + } + } + + if (!contain) { + temp.add(oldColumn); + } + } + newColumns.addAll(temp); + Collections.sort(newColumns, new EventColumnIndexComparable()); // 排序 + // 把上一次变更的旧主键传递到这次变更的旧主键. + newRecord.setOldKeys(oldRecord.getOldKeys()); + if (oldRecord.getSyncConsistency() != null) { + newRecord.setSyncConsistency(oldRecord.getSyncConsistency()); + } + if (oldRecord.getSyncMode() != null) { + newRecord.setSyncMode(oldRecord.getSyncMode()); + } + + if (oldRecord.isRemedy()) { + newRecord.setRemedy(oldRecord.isRemedy()); + } + newRecord.setSize(oldRecord.getSize() + newRecord.getSize()); + return newRecord; + } + + public static class RowKey implements Serializable { + + private static final long serialVersionUID = -7369951798499581038L; + private String schemaName; // tableId代表统配符时,需要指定schemaName + private String tableName; // tableId代表统配符时,需要指定tableName + + public RowKey(String schemaName, String tableName, List keys) { + this.schemaName = schemaName; + this.tableName = tableName; + this.keys = keys; + } + + public RowKey(List keys) { + this.keys = keys; + } + + private List keys = new ArrayList(); + + public List getKeys() { + return keys; + } + + public void setKeys(List keys) { + this.keys = keys; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((keys == null) ? 0 : keys.hashCode()); + result = prime * result + ((schemaName == null) ? 0 : schemaName.hashCode()); + result = prime * result + ((tableName == null) ? 0 : tableName.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof RowKey)) { + return false; + } + RowKey other = (RowKey) obj; + if (keys == null) { + if (other.keys != null) { + return false; + } + } else if (!keys.equals(other.keys)) { + return false; + } + if (schemaName == null) { + if (other.schemaName != null) { + return false; + } + } else if (!schemaName.equals(other.schemaName)) { + return false; + } + if (tableName == null) { + if (other.tableName != null) { + return false; + } + } else if (!tableName.equals(other.tableName)) { + return false; + } + return true; + } + + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java new file mode 100644 index 0000000000..853fe39a9e --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/sink/connector/CanalSinkConnector.java @@ -0,0 +1,689 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.sink.connector; + +import org.apache.eventmesh.common.config.connector.Config; + +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig; +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.DatabaseConnection; +import org.apache.eventmesh.connector.canal.SqlUtils; +import org.apache.eventmesh.connector.canal.dialect.DbDialect; +import org.apache.eventmesh.connector.canal.dialect.MysqlDialect; +import org.apache.eventmesh.connector.canal.interceptor.SqlBuilderLoadInterceptor; +import org.apache.eventmesh.connector.canal.model.EventColumn; +import org.apache.eventmesh.connector.canal.model.EventType; +import org.apache.eventmesh.connector.canal.sink.DbLoadContext; +import org.apache.eventmesh.connector.canal.sink.DbLoadData; +import org.apache.eventmesh.connector.canal.sink.DbLoadData.TableLoadData; +import org.apache.eventmesh.connector.canal.sink.DbLoadMerger; +import org.apache.eventmesh.openconnect.api.ConnectorCreateService; +import org.apache.eventmesh.openconnect.api.connector.ConnectorContext; +import org.apache.eventmesh.openconnect.api.connector.SinkConnectorContext; +import org.apache.eventmesh.openconnect.api.sink.Sink; +import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord; + + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.exception.ExceptionUtils; + +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.springframework.dao.DataAccessException; +import org.springframework.dao.DeadlockLoserDataAccessException; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.core.PreparedStatementSetter; +import org.springframework.jdbc.core.StatementCallback; +import org.springframework.jdbc.core.StatementCreatorUtils; +import org.springframework.jdbc.support.lob.DefaultLobHandler; +import org.springframework.jdbc.support.lob.LobCreator; +import org.springframework.transaction.support.TransactionCallback; +import org.springframework.util.CollectionUtils; + +import com.alibaba.otter.canal.common.utils.NamedThreadFactory; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CanalSinkConnector implements Sink, ConnectorCreateService { + + private CanalSinkConfig sinkConfig; + + private JdbcTemplate jdbcTemplate; + + private SqlBuilderLoadInterceptor interceptor; + + private DbDialect dbDialect; + + private ExecutorService executor; + + private int batchSize = 50; + + private boolean useBatch = true; + + @Override + public Class configClass() { + return CanalSinkConfig.class; + } + + @Override + public void init(Config config) throws Exception { + // init config for canal source connector + this.sinkConfig = (CanalSinkConfig) config; + } + + @Override + public void init(ConnectorContext connectorContext) throws Exception { + // init config for canal source connector + SinkConnectorContext sinkConnectorContext = (SinkConnectorContext) connectorContext; + this.sinkConfig = (CanalSinkConfig) sinkConnectorContext.getSinkConfig(); + this.batchSize = sinkConfig.getBatchsize(); + this.useBatch = sinkConfig.getUseBatch(); + DatabaseConnection.sinkConfig = this.sinkConfig; + DatabaseConnection.initSinkConnection(); + jdbcTemplate = new JdbcTemplate(DatabaseConnection.sinkDataSource); + dbDialect = new MysqlDialect(jdbcTemplate, new DefaultLobHandler()); + interceptor = new SqlBuilderLoadInterceptor(); + interceptor.setDbDialect(dbDialect); + executor = new ThreadPoolExecutor(sinkConfig.getPoolSize(), + sinkConfig.getPoolSize(), + 0L, + TimeUnit.MILLISECONDS, + new ArrayBlockingQueue<>(sinkConfig.getPoolSize() * 4), + new NamedThreadFactory("canalSink"), + new ThreadPoolExecutor.CallerRunsPolicy()); + } + + @Override + public void start() throws Exception { + + } + + @Override + public void commit(ConnectRecord record) { + + } + + @Override + public String name() { + return this.sinkConfig.getSinkConnectorConfig().getConnectorName(); + } + + @Override + public void stop() { + executor.shutdown(); + } + + @Override + public void put(List sinkRecords) { + DbLoadContext context = new DbLoadContext(); + for (ConnectRecord connectRecord : sinkRecords) { + List canalConnectRecordList = (List) connectRecord.getData(); + canalConnectRecordList = filterRecord(canalConnectRecordList, sinkConfig); + if (isDdlDatas(canalConnectRecordList)) { + doDdl(context, canalConnectRecordList); + } else { + // 进行一次数据合并,合并相同pk的多次I/U/D操作 + canalConnectRecordList = DbLoadMerger.merge(canalConnectRecordList); + // 按I/U/D进行归并处理 + DbLoadData loadData = new DbLoadData(); + doBefore(canalConnectRecordList, loadData); + // 执行load操作 + doLoad(context, sinkConfig, loadData); + + } + + } + } + + @Override + public Sink create() { + return new CanalSinkConnector(); + } + + /** + * 分析整个数据,将datas划分为多个批次. ddl sql前的DML并发执行,然后串行执行ddl后,再并发执行DML + * + * @return + */ + private boolean isDdlDatas(List canalConnectRecordList) { + boolean result = false; + for (CanalConnectRecord canalConnectRecord : canalConnectRecordList) { + result |= canalConnectRecord.getEventType().isDdl(); + if (result && !canalConnectRecord.getEventType().isDdl()) { + throw new RuntimeException("ddl/dml can't be in one batch, it's may be a bug , pls submit issues."); + } + } + return result; + } + + /** + * 过滤掉不需要处理的数据 + */ + private List filterRecord(List canalConnectRecordList, CanalSinkConfig sinkConfig) { + return canalConnectRecordList.stream() + .filter(record -> sinkConfig.getSinkConnectorConfig().getSchemaName().equalsIgnoreCase(record.getSchemaName()) && + sinkConfig.getSinkConnectorConfig().getTableName().equalsIgnoreCase(record.getTableName())) + .collect(Collectors.toList()); + } + + /** + * 执行ddl的调用,处理逻辑比较简单: 串行调用 + */ + private void doDdl(DbLoadContext context, List canalConnectRecordList) { + for (final CanalConnectRecord record : canalConnectRecordList) { + try { + Boolean result = jdbcTemplate.execute(new StatementCallback() { + + public Boolean doInStatement(Statement stmt) throws SQLException, DataAccessException { + boolean result = true; + if (StringUtils.isNotEmpty(record.getDdlSchemaName())) { + // 如果mysql,执行ddl时,切换到在源库执行的schema上 + // result &= stmt.execute("use " + + // data.getDdlSchemaName()); + + // 解决当数据库名称为关键字如"Order"的时候,会报错,无法同步 + result &= stmt.execute("use `" + record.getDdlSchemaName() + "`"); + } + result &= stmt.execute(record.getSql()); + return result; + } + }); + if (Boolean.TRUE.equals(result)) { + context.getProcessedRecords().add(record); + } else { + context.getFailedRecords().add(record); + } + } catch (Throwable e) { + throw new RuntimeException(e); + } + } + } + + /** + * 执行数据处理,比如数据冲突检测 + */ + private void doBefore(List canalConnectRecordList, final DbLoadData loadData) { + for (final CanalConnectRecord record : canalConnectRecordList) { + boolean filter = interceptor.before(sinkConfig, record); + if (!filter) { + loadData.merge(record);// 进行分类 + } + } + } + + private void doLoad(DbLoadContext context, CanalSinkConfig sinkConfig, DbLoadData loadData) { + // 优先处理delete,可以利用batch优化 + List> batchDatas = new ArrayList<>(); + for (TableLoadData tableData : loadData.getTables()) { + if (useBatch) { + // 优先执行delete语句,针对unique更新,一般会进行delete + insert的处理模式,避免并发更新 + batchDatas.addAll(split(tableData.getDeleteDatas())); + } else { + // 如果不可以执行batch,则按照单条数据进行并行提交 + // 优先执行delete语句,针对unique更新,一般会进行delete + insert的处理模式,避免并发更新 + for (CanalConnectRecord data : tableData.getDeleteDatas()) { + batchDatas.add(Arrays.asList(data)); + } + } + } + + doTwoPhase(context, sinkConfig, batchDatas, true); + + batchDatas.clear(); + + // 处理下insert/update + for (TableLoadData tableData : loadData.getTables()) { + if (useBatch) { + // 执行insert + update语句 + batchDatas.addAll(split(tableData.getInsertDatas())); + batchDatas.addAll(split(tableData.getUpdateDatas()));// 每条记录分为一组,并行加载 + } else { + // 执行insert + update语句 + for (CanalConnectRecord data : tableData.getInsertDatas()) { + batchDatas.add(Arrays.asList(data)); + } + for (CanalConnectRecord data : tableData.getUpdateDatas()) { + batchDatas.add(Arrays.asList(data)); + } + } + } + + doTwoPhase(context, sinkConfig, batchDatas, true); + + batchDatas.clear(); + } + + /** + * 将对应的数据按照sql相同进行batch组合 + */ + private List> split(List records) { + List> result = new ArrayList<>(); + if (records == null || records.isEmpty()) { + return result; + } else { + int[] bits = new int[records.size()];// 初始化一个标记,用于标明对应的记录是否已分入某个batch + for (int i = 0; i < bits.length; i++) { + // 跳过已经被分入batch的 + while (i < bits.length && bits[i] == 1) { + i++; + } + + if (i >= bits.length) { // 已处理完成,退出 + break; + } + + // 开始添加batch,最大只加入batchSize个数的对象 + List batch = new ArrayList<>(); + bits[i] = 1; + batch.add(records.get(i)); + for (int j = i + 1; j < bits.length && batch.size() < batchSize; j++) { + if (bits[j] == 0 && canBatch(records.get(i), records.get(j))) { + batch.add(records.get(j)); + bits[j] = 1;// 修改为已加入 + } + } + result.add(batch); + } + + return result; + } + } + + /** + * 判断两条记录是否可以作为一个batch提交,主要判断sql是否相等. 可优先通过schemaName进行判断 + */ + private boolean canBatch(CanalConnectRecord source, CanalConnectRecord target) { + return StringUtils.equals(source.getSchemaName(), + target.getSchemaName()) + && StringUtils.equals(source.getTableName(), target.getTableName()) + && StringUtils.equals(source.getSql(), target.getSql()); + } + + /** + * 首先进行并行执行,出错后转为串行执行 + */ + private void doTwoPhase(DbLoadContext context, CanalSinkConfig sinkConfig, List> totalRows, boolean canBatch) { + // 预处理下数据 + List> results = new ArrayList>(); + for (List rows : totalRows) { + if (CollectionUtils.isEmpty(rows)) { + continue; // 过滤空记录 + } + results.add(executor.submit(new DbLoadWorker(context, rows, dbDialect, canBatch))); + } + + boolean partFailed = false; + for (Future result : results) { + Exception ex = null; + try { + ex = result.get(); +// for (CanalConnectRecord data : totalRows.get(i)) { +// interceptor.after(context, data);// 通知加载完成 +// } + } catch (Exception e) { + ex = e; + } + + if (ex != null) { + log.warn("##load phase one failed!", ex); + partFailed = true; + } + } + + if (partFailed) { + + // 尝试的内容换成phase one跑的所有数据,避免因failed datas计算错误而导致丢数据 + List retryRecords = new ArrayList<>(); + for (List rows : totalRows) { + retryRecords.addAll(rows); + } + + context.getFailedRecords().clear(); // 清理failed data数据 + + // 可能为null,manager老版本数据序列化传输时,因为数据库中没有skipLoadException变量配置 + Boolean skipException = sinkConfig.getSkipException(); + if (skipException != null && skipException) {// 如果设置为允许跳过单条异常,则一条条执行数据load,准确过滤掉出错的记录,并进行日志记录 + for (CanalConnectRecord retryRecord : retryRecords) { + DbLoadWorker worker = new DbLoadWorker(context, Arrays.asList(retryRecord), dbDialect, false);// 强制设置batch为false + try { + Exception ex = worker.call(); + if (ex != null) { + // do skip + log.warn("skip exception for data : {} , caused by {}", + retryRecord, + ExceptionUtils.getFullStackTrace(ex)); + } + } catch (Exception ex) { + // do skip + log.warn("skip exception for data : {} , caused by {}", + retryRecord, + ExceptionUtils.getFullStackTrace(ex)); + } + } + } else { + // 直接一批进行处理,减少线程调度 + DbLoadWorker worker = new DbLoadWorker(context, retryRecords, dbDialect, false);// 强制设置batch为false + try { + Exception ex = worker.call(); + if (ex != null) { + throw ex; // 自己抛自己接 + } + } catch (Exception ex) { + log.error("##load phase two failed!", ex); + throw new RuntimeException(ex); + } + } + + // 清理failed data数据 +// for (CanalConnectRecord retryRecord : retryRecords) { +// interceptor.after(context, retryRecord);// 通知加载完成 +// } + } + } + + enum ExecuteResult { + SUCCESS, ERROR, RETRY + } + + class DbLoadWorker implements Callable { + + private final DbLoadContext context; + private final DbDialect dbDialect; + private final List records; + private final boolean canBatch; + private final List allFailedRecords = new ArrayList<>(); + private final List allProcessedRecords = new ArrayList<>(); + private final List processedRecords = new ArrayList<>(); + private final List failedRecords = new ArrayList<>(); + + public DbLoadWorker(DbLoadContext context, List records, DbDialect dbDialect, boolean canBatch) { + this.context = context; + this.records = records; + this.canBatch = canBatch; + this.dbDialect = dbDialect; + } + + public Exception call() throws Exception { + try { + return doCall(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private Exception doCall() { + RuntimeException error = null; + ExecuteResult exeResult = null; + int index = 0;// 记录下处理成功的记录下标 + while (index < records.size()) { + // 处理数据切分 + final List splitDatas = new ArrayList<>(); + if (useBatch && canBatch) { + int end = Math.min(index + batchSize, records.size()); + splitDatas.addAll(records.subList(index, end)); + index = end;// 移动到下一批次 + } else { + splitDatas.add(records.get(index)); + index = index + 1;// 移动到下一条 + } + + int retryCount = 0; + while (true) { + try { + if (!CollectionUtils.isEmpty(failedRecords)) { + splitDatas.clear(); + splitDatas.addAll(failedRecords); // 下次重试时,只处理错误的记录 + } else { + failedRecords.addAll(splitDatas); // 先添加为出错记录,可能获取lob,datasource会出错 + } + + final LobCreator lobCreator = dbDialect.getLobHandler().getLobCreator(); + if (useBatch && canBatch) { + // 处理batch + final String sql = splitDatas.get(0).getSql(); + int[] affects = new int[splitDatas.size()]; + affects = (int[]) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> { + // 初始化一下内容 + try { + failedRecords.clear(); // 先清理 + processedRecords.clear(); + JdbcTemplate template = dbDialect.getJdbcTemplate(); + int[] affects1 = template.batchUpdate(sql, new BatchPreparedStatementSetter() { + + public void setValues(PreparedStatement ps, int idx) throws SQLException { + doPreparedStatement(ps, dbDialect, lobCreator, splitDatas.get(idx)); + } + + public int getBatchSize() { + return splitDatas.size(); + } + }); + return affects1; + } finally { + lobCreator.close(); + } + }); + + // 更新统计信息 + for (int i = 0; i < splitDatas.size(); i++) { + assert affects != null; + processStat(splitDatas.get(i), affects[i], true); + } + } else { + final CanalConnectRecord record = splitDatas.get(0);// 直接取第一条 + int affect = 0; + affect = (Integer) dbDialect.getTransactionTemplate().execute((TransactionCallback) status -> { + try { + failedRecords.clear(); // 先清理 + processedRecords.clear(); + JdbcTemplate template = dbDialect.getJdbcTemplate(); + int affect1 = template.update(record.getSql(), new PreparedStatementSetter() { + + public void setValues(PreparedStatement ps) throws SQLException { + doPreparedStatement(ps, dbDialect, lobCreator, record); + } + }); + return affect1; + } finally { + lobCreator.close(); + } + }); + // 更新统计信息 + processStat(record, affect, false); + } + + error = null; + exeResult = ExecuteResult.SUCCESS; + } catch (DeadlockLoserDataAccessException ex) { + error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex)); + exeResult = ExecuteResult.RETRY; + } catch (Throwable ex) { + error = new RuntimeException(ExceptionUtils.getFullStackTrace(ex)); + exeResult = ExecuteResult.ERROR; + } + + if (ExecuteResult.SUCCESS == exeResult) { + allFailedRecords.addAll(failedRecords);// 记录一下异常到all记录中 + allProcessedRecords.addAll(processedRecords); + failedRecords.clear();// 清空上一轮的处理 + processedRecords.clear(); + break; // do next eventData + } else if (ExecuteResult.RETRY == exeResult) { + retryCount = retryCount + 1;// 计数一次 + // 出现异常,理论上当前的批次都会失败 + processedRecords.clear(); + failedRecords.clear(); + failedRecords.addAll(splitDatas); + int retry = 3; + if (retryCount >= retry) { + processFailedDatas(index);// 重试已结束,添加出错记录并退出 + throw new RuntimeException(String.format("execute retry %s times failed", retryCount), error); + } else { + try { + int retryWait = 3000; + int wait = retryCount * retryWait; + wait = Math.max(wait, retryWait); + Thread.sleep(wait); + } catch (InterruptedException ex) { + Thread.interrupted(); + processFailedDatas(index);// 局部处理出错了 + throw new RuntimeException(ex); + } + } + } else { + // 出现异常,理论上当前的批次都会失败 + processedRecords.clear(); + failedRecords.clear(); + failedRecords.addAll(splitDatas); + processFailedDatas(index);// 局部处理出错了 + throw error; + } + } + } + + // 记录一下当前处理过程中失败的记录,affect = 0的记录 + context.getFailedRecords().addAll(allFailedRecords); + context.getProcessedRecords().addAll(allProcessedRecords); + return null; + } + + private void doPreparedStatement(PreparedStatement ps, DbDialect dbDialect, LobCreator lobCreator, + CanalConnectRecord record) throws SQLException { + EventType type = record.getEventType(); + // 注意insert/update语句对应的字段数序都是将主键排在后面 + List columns = new ArrayList(); + if (type.isInsert()) { + columns.addAll(record.getColumns()); // insert为所有字段 + columns.addAll(record.getKeys()); + } else if (type.isDelete()) { + columns.addAll(record.getKeys()); + } else if (type.isUpdate()) { + boolean existOldKeys = !CollectionUtils.isEmpty(record.getOldKeys()); + columns.addAll(record.getUpdatedColumns());// 只更新带有isUpdate=true的字段 + if (existOldKeys && dbDialect.isDRDS()) { + // DRDS需要区分主键是否有变更 + columns.addAll(record.getUpdatedKeys()); + } else { + columns.addAll(record.getKeys()); + } + if (existOldKeys) { + columns.addAll(record.getOldKeys()); + } + } + + for (int i = 0; i < columns.size(); i++) { + int paramIndex = i + 1; + EventColumn column = columns.get(i); + int sqlType = column.getColumnType(); + + Object param = null; + if (dbDialect instanceof MysqlDialect + && (sqlType == Types.TIME || sqlType == Types.TIMESTAMP || sqlType == Types.DATE)) { + // 解决mysql的0000-00-00 00:00:00问题,直接依赖mysql + // driver进行处理,如果转化为Timestamp会出错 + param = column.getColumnValue(); + } else { + param = SqlUtils.stringToSqlValue(column.getColumnValue(), + sqlType, + false, + dbDialect.isEmptyStringNulled()); + } + + try { + switch (sqlType) { + case Types.CLOB: + lobCreator.setClobAsString(ps, paramIndex, (String) param); + break; + + case Types.BLOB: + lobCreator.setBlobAsBytes(ps, paramIndex, (byte[]) param); + break; + case Types.TIME: + case Types.TIMESTAMP: + case Types.DATE: + // 只处理mysql的时间类型,oracle的进行转化处理 + if (dbDialect instanceof MysqlDialect) { + // 解决mysql的0000-00-00 00:00:00问题,直接依赖mysql + // driver进行处理,如果转化为Timestamp会出错 + ps.setObject(paramIndex, param); + } else { + StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param); + } + break; + case Types.BIT: + // 只处理mysql的bit类型,bit最多存储64位,所以需要使用BigInteger进行处理才能不丢精度 + // mysql driver将bit按照setInt进行处理,会导致数据越界 + if (dbDialect instanceof MysqlDialect) { + StatementCreatorUtils.setParameterValue(ps, paramIndex, Types.DECIMAL, null, param); + } else { + StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param); + } + break; + default: + StatementCreatorUtils.setParameterValue(ps, paramIndex, sqlType, null, param); + break; + } + } catch (SQLException ex) { + log.error("## SetParam error , [pairId={}, sqltype={}, value={}]", + record.getPairId(), sqlType, param); + throw ex; + } + } + } + + private void processStat(CanalConnectRecord record, int affect, boolean batch) { + if (batch && (affect < 1 && affect != Statement.SUCCESS_NO_INFO)) { + failedRecords.add(record); // 记录到错误的临时队列,进行重试处理 + } else if (!batch && affect < 1) { + failedRecords.add(record);// 记录到错误的临时队列,进行重试处理 + } else { + processedRecords.add(record); // 记录到成功的临时队列,commit也可能会失败。所以这记录也可能需要进行重试 +// this.processStat(record, context); + } + } + + // 出现异常回滚了,记录一下异常记录 + private void processFailedDatas(int index) { + allFailedRecords.addAll(failedRecords);// 添加失败记录 + context.getFailedRecords().addAll(allFailedRecords);// 添加历史出错记录 + for (; index < records.size(); index++) { // 记录一下未处理的数据 + context.getFailedRecords().add(records.get(index)); + } + // 这里不需要添加当前成功记录,出现异常后会rollback所有的成功记录,比如processDatas有记录,但在commit出现失败 + // (bugfix) + allProcessedRecords.addAll(processedRecords); + context.getProcessedRecords().addAll(allProcessedRecords);// 添加历史成功记录 + } + } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java new file mode 100644 index 0000000000..e1b9c9b2a9 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/EntryParser.java @@ -0,0 +1,356 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.source; + +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig; +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.model.EventColumn; +import org.apache.eventmesh.connector.canal.model.EventColumnIndexComparable; +import org.apache.eventmesh.connector.canal.model.EventType; +import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord; + +import org.apache.commons.lang.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import javax.xml.crypto.dsig.TransformException; + +import org.springframework.util.CollectionUtils; + +import com.alibaba.otter.canal.protocol.CanalEntry; +import com.alibaba.otter.canal.protocol.CanalEntry.Column; +import com.alibaba.otter.canal.protocol.CanalEntry.Entry; +import com.alibaba.otter.canal.protocol.CanalEntry.RowChange; +import com.alibaba.otter.canal.protocol.CanalEntry.RowData; + +import lombok.extern.slf4j.Slf4j; + +/** + * data object parse + */ +@Slf4j +public class EntryParser { + + /** + * 将对应canal送出来的Entry对象解析为ConnectRecord + * + *
+     * 需要处理数据过滤:
+     * 1. Transaction Begin/End过滤
+     * 2. retl.retl_client/retl.retl_mark 回环标记处理以及后续的回环数据过滤
+     * 3. retl.xdual canal心跳表数据过滤
+     * 
+ */ + public List parse(CanalSourceConfig sourceConfig, List datas) { + List recordList = new ArrayList<>(); + List transactionDataBuffer = new ArrayList<>(); + try { + for (Entry entry : datas) { + switch (entry.getEntryType()) { + case TRANSACTIONBEGIN: + break; + case ROWDATA: + transactionDataBuffer.add(entry); + break; + case TRANSACTIONEND: + // 添加数据解析 + for (Entry bufferEntry : transactionDataBuffer) { + List recordParsedList = internParse(sourceConfig, bufferEntry); + if (CollectionUtils.isEmpty(recordParsedList)) {// 可能为空,针对ddl返回时就为null + continue; + } + // 初步计算一下事件大小 + long totalSize = bufferEntry.getHeader().getEventLength(); + long eachSize = totalSize / recordParsedList.size(); + for (CanalConnectRecord record : recordParsedList) { + if (record == null) { + continue; + } + record.setSize(eachSize);// 记录一下大小 + recordList.add(record); + } + } + transactionDataBuffer.clear(); + break; + default: + break; + } + } + + // 添加最后一次的数据,可能没有TRANSACTIONEND + // 添加数据解析 + for (Entry bufferEntry : transactionDataBuffer) { + List recordParsedList = internParse(sourceConfig, bufferEntry); + if (CollectionUtils.isEmpty(recordParsedList)) {// 可能为空,针对ddl返回时就为null + continue; + } + + // 初步计算一下事件大小 + long totalSize = bufferEntry.getHeader().getEventLength(); + long eachSize = totalSize / recordParsedList.size(); + for (CanalConnectRecord record : recordParsedList) { + if (record == null) { + continue; + } + record.setSize(eachSize);// 记录一下大小 + recordList.add(record); + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return recordList; + } + + private List internParse(CanalSourceConfig sourceConfig, Entry entry) { + String schemaName = entry.getHeader().getSchemaName(); + String tableName = entry.getHeader().getTableName(); + if (!schemaName.equalsIgnoreCase(sourceConfig.getSourceConnectorConfig().getSchemaName()) || + !tableName.equalsIgnoreCase(sourceConfig.getSourceConnectorConfig().getTableName())) { + return null; + } + + RowChange rowChange = null; + try { + rowChange = RowChange.parseFrom(entry.getStoreValue()); + } catch (Exception e) { + throw new RuntimeException("parser of canal-event has an error , data:" + entry.toString(), e); + } + + if (rowChange == null) { + return null; + } + + EventType eventType = EventType.valueOf(rowChange.getEventType().name()); + + // 处理下DDL操作 + if (eventType.isQuery()) { + // 直接忽略query事件 + return null; + } + + // 首先判断是否为系统表 + if (StringUtils.equalsIgnoreCase(sourceConfig.getSystemSchema(), schemaName)) { + // do noting + if (eventType.isDdl()) { + return null; + } + + if (StringUtils.equalsIgnoreCase(sourceConfig.getSystemDualTable(), tableName)) { + // 心跳表数据直接忽略 + return null; + } + } else { + if (eventType.isDdl()) { + log.warn("unsupported ddl event type: {}", eventType); + return null; + } + } + + List recordList = new ArrayList<>(); + for (RowData rowData : rowChange.getRowDatasList()) { + CanalConnectRecord record = internParse(sourceConfig, entry, rowChange, rowData); + recordList.add(record); + } + + return recordList; + } + + /** + * 解析出从canal中获取的Event事件
Oracle:有变更的列值.
+ * insert:从afterColumns中获取所有的变更数据
+ * delete:从beforeColumns中获取所有的变更数据
+ * update:在before中存放所有的主键和变化前的非主键值,在after中存放变化后的主键和非主键值,如果是复合主键,只会存放变化的主键
+ * Mysql:可以得到所有变更前和变更后的数据.
+ * insert:从afterColumns中获取所有的变更数据
+ * delete:从beforeColumns中获取所有的变更数据
+ * update:在beforeColumns中存放变更前的所有数据,在afterColumns中存放变更后的所有数据
+ */ + private CanalConnectRecord internParse(CanalSourceConfig canalSourceConfig, Entry entry, RowChange rowChange, RowData rowData) { + CanalConnectRecord canalConnectRecord = new CanalConnectRecord(); + canalConnectRecord.setTableName(entry.getHeader().getTableName()); + canalConnectRecord.setSchemaName(entry.getHeader().getSchemaName()); + canalConnectRecord.setEventType(EventType.valueOf(rowChange.getEventType().name())); + canalConnectRecord.setExecuteTime(entry.getHeader().getExecuteTime()); + canalConnectRecord.setJournalName(entry.getHeader().getLogfileName()); + canalConnectRecord.setBinLogOffset(entry.getHeader().getLogfileOffset()); + EventType eventType = canalConnectRecord.getEventType(); + + List beforeColumns = rowData.getBeforeColumnsList(); + List afterColumns = rowData.getAfterColumnsList(); + String tableName = canalConnectRecord.getSchemaName() + "." + canalConnectRecord.getTableName(); + + // 判断一下是否需要all columns + boolean isRowMode = canalSourceConfig.getSyncMode().isRow(); // 如果是rowMode模式,所有字段都需要标记为updated + + // 变更后的主键 + Map keyColumns = new LinkedHashMap(); + // 变更前的主键 + Map oldKeyColumns = new LinkedHashMap(); + // 有变化的非主键 + Map notKeyColumns = new LinkedHashMap(); + + if (eventType.isInsert()) { + for (Column column : afterColumns) { + if (column.getIsKey()) { + keyColumns.put(column.getName(), copyEventColumn(column, true)); + } else { + // mysql 有效 + notKeyColumns.put(column.getName(), copyEventColumn(column, true)); + } + } + } else if (eventType.isDelete()) { + for (Column column : beforeColumns) { + if (column.getIsKey()) { + keyColumns.put(column.getName(), copyEventColumn(column, true)); + } else { + // mysql 有效 + notKeyColumns.put(column.getName(), copyEventColumn(column, true)); + } + } + } else if (eventType.isUpdate()) { + // 获取变更前的主键. + for (Column column : beforeColumns) { + if (column.getIsKey()) { + oldKeyColumns.put(column.getName(), copyEventColumn(column, true)); + // 同时记录一下new + // key,因为mysql5.6之后出现了minimal模式,after里会没有主键信息,需要在before记录中找 + keyColumns.put(column.getName(), copyEventColumn(column, true)); + } else { + if (isRowMode && entry.getHeader().getSourceType() == CanalEntry.Type.ORACLE) { + // 针对行记录同步时,针对oracle记录一下非主键的字段,因为update时针对未变更的字段在aftercolume里没有 + notKeyColumns.put(column.getName(), copyEventColumn(column, isRowMode)); + } + } + } + for (Column column : afterColumns) { + if (column.getIsKey()) { + // 获取变更后的主键 + keyColumns.put(column.getName(), copyEventColumn(column, true)); + } else if (isRowMode || entry.getHeader().getSourceType() == CanalEntry.Type.ORACLE + || column.getUpdated()) { + // 在update操作时,oracle和mysql存放变更的非主键值的方式不同,oracle只有变更的字段; + // mysql会把变更前和变更后的字段都发出来,只需要取有变更的字段. + // 如果是oracle库,after里一定为对应的变更字段 + + boolean isUpdate = true; + if (entry.getHeader().getSourceType() == CanalEntry.Type.MYSQL) { // mysql的after里部分数据为未变更,oracle里after里为变更字段 + isUpdate = column.getUpdated(); + } + + notKeyColumns.put(column.getName(), copyEventColumn(column, isUpdate));// 如果是rowMode,所有字段都为updated + } + } + + if (entry.getHeader().getSourceType() == CanalEntry.Type.ORACLE) { // 针对oracle进行特殊处理 + checkUpdateKeyColumns(oldKeyColumns, keyColumns); + } + } + + List keys = new ArrayList<>(keyColumns.values()); + List oldKeys = new ArrayList<>(oldKeyColumns.values()); + List columns = new ArrayList<>(notKeyColumns.values()); + + keys.sort(new EventColumnIndexComparable()); + oldKeys.sort(new EventColumnIndexComparable()); + columns.sort(new EventColumnIndexComparable()); + if (!keyColumns.isEmpty()) { + canalConnectRecord.setKeys(keys); + if (canalConnectRecord.getEventType().isUpdate() && !oldKeys.equals(keys)) { // update类型,如果存在主键不同,则记录下old + // keys为变更前的主键 + canalConnectRecord.setOldKeys(oldKeys); + } + canalConnectRecord.setColumns(columns); + } else { + throw new RuntimeException("this row data has no pks , entry: " + entry.toString() + " and rowData: " + + rowData); + } + + return canalConnectRecord; + } + + /** + * 在oracle中,补充没有变更的主键
如果变更后的主键为空,直接从old中拷贝
如果变更前后的主键数目不相等,把old中存在而new中不存在的主键拷贝到new中. + * + * @param oldKeyColumns + * @param keyColumns + */ + private void checkUpdateKeyColumns(Map oldKeyColumns, Map keyColumns) { + // 在变更前没有主键的情况 + if (oldKeyColumns.size() == 0) { + return; + } + // 变更后的主键数据大于变更前的,不符合 + if (keyColumns.size() > oldKeyColumns.size()) { + return; + } + // 主键没有变更,把所有变更前的主键拷贝到变更后的主键中. + if (keyColumns.size() == 0) { + keyColumns.putAll(oldKeyColumns); + return; + } + + // 把old中存在而new中不存在的主键拷贝到new中 + if (oldKeyColumns.size() != keyColumns.size()) { + for (String oldKey : oldKeyColumns.keySet()) { + if (keyColumns.get(oldKey) == null) { + keyColumns.put(oldKey, oldKeyColumns.get(oldKey)); + } + } + } + } + + /** + * 把 erosa-protocol's Column 转化成 otter's model EventColumn. + * + * @param column + * @return + */ + private EventColumn copyEventColumn(Column column, boolean isUpdate) { + EventColumn eventColumn = new EventColumn(); + eventColumn.setIndex(column.getIndex()); + eventColumn.setKey(column.getIsKey()); + eventColumn.setNull(column.getIsNull()); + eventColumn.setColumnName(column.getName()); + eventColumn.setColumnValue(column.getValue()); + eventColumn.setUpdate(isUpdate); + eventColumn.setColumnType(column.getSqlType()); + + return eventColumn; + } + +// private String buildName(String name, ModeValue sourceModeValue, ModeValue targetModeValue) { +// if (targetModeValue.getMode().isWildCard()) { +// return name; // 通配符,认为源和目标一定是一致的 +// } else if (targetModeValue.getMode().isMulti()) { +// int index = ConfigHelper.indexIgnoreCase(sourceModeValue.getMultiValue(), name); +// if (index == -1) { +// throw new TransformException("can not found namespace or name in media:" + sourceModeValue.toString()); +// } +// +// return targetModeValue.getMultiValue().get(index); +// } else { +// return targetModeValue.getSingleValue(); +// } +// } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java new file mode 100644 index 0000000000..8b6d95abfc --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/source/connector/CanalSourceConnector.java @@ -0,0 +1,378 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.source.connector; + +import org.apache.eventmesh.common.config.connector.Config; +import org.apache.eventmesh.common.config.connector.offset.OffsetStorageConfig; +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSinkConfig; +import org.apache.eventmesh.common.config.connector.rdb.canal.CanalSourceConfig; +import org.apache.eventmesh.common.config.connector.rdb.canal.SinkConnectorConfig; +import org.apache.eventmesh.common.config.connector.rdb.canal.SourceConnectorConfig; +import org.apache.eventmesh.common.remote.job.SyncConsistency; +import org.apache.eventmesh.common.remote.job.SyncMode; +import org.apache.eventmesh.common.remote.offset.RecordPosition; +import org.apache.eventmesh.common.utils.JsonUtils; +import org.apache.eventmesh.connector.canal.CanalConnectRecord; +import org.apache.eventmesh.connector.canal.DatabaseConnection; +import org.apache.eventmesh.connector.canal.source.EntryParser; +import org.apache.eventmesh.openconnect.api.ConnectorCreateService; +import org.apache.eventmesh.openconnect.api.connector.ConnectorContext; +import org.apache.eventmesh.openconnect.api.connector.SourceConnectorContext; +import org.apache.eventmesh.openconnect.api.source.Source; +import org.apache.eventmesh.openconnect.offsetmgmt.api.data.ConnectRecord; +import org.apache.eventmesh.common.remote.offset.canal.CanalRecordOffset; +import org.apache.eventmesh.common.remote.offset.canal.CanalRecordPartition; +import org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetStorageReader; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.LockSupport; + + +import com.alibaba.otter.canal.instance.core.CanalInstance; +import com.alibaba.otter.canal.instance.core.CanalInstanceGenerator; +import com.alibaba.otter.canal.instance.manager.CanalInstanceWithManager; +import com.alibaba.otter.canal.instance.manager.model.Canal; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.ClusterMode; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.HAMode; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.IndexMode; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.MetaMode; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.RunMode; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.SourcingType; +import com.alibaba.otter.canal.instance.manager.model.CanalParameter.StorageMode; +import com.alibaba.otter.canal.parse.CanalEventParser; +import com.alibaba.otter.canal.parse.ha.CanalHAController; +import com.alibaba.otter.canal.parse.inbound.mysql.MysqlEventParser; +import com.alibaba.otter.canal.protocol.CanalEntry; +import com.alibaba.otter.canal.protocol.CanalEntry.Entry; +import com.alibaba.otter.canal.protocol.ClientIdentity; +import com.alibaba.otter.canal.server.embedded.CanalServerWithEmbedded; +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CanalSourceConnector implements Source, ConnectorCreateService { + + private CanalSourceConfig sourceConfig; + + private CanalServerWithEmbedded canalServer; + + private ClientIdentity clientIdentity; + + private String filter = null; + + private volatile boolean running = false; + + private static final int maxEmptyTimes = 10; + + @Override + public Class configClass() { + return CanalSourceConfig.class; + } + + @Override + public void init(Config config) throws Exception { + // init config for canal source connector + this.sourceConfig = (CanalSourceConfig) config; + } + + @Override + public void init(ConnectorContext connectorContext) throws Exception { + SourceConnectorContext sourceConnectorContext = (SourceConnectorContext) connectorContext; + this.sourceConfig = (CanalSourceConfig) sourceConnectorContext.getSourceConfig(); +// this.offsetStorageReader = sourceConnectorContext.getOffsetStorageReader(); + // init source database connection +// DatabaseConnection.sourceConfig = sourceConfig; +// DatabaseConnection.initSourceConnection(); + + canalServer = CanalServerWithEmbedded.instance(); + + canalServer.setCanalInstanceGenerator(new CanalInstanceGenerator() { + @Override + public CanalInstance generate(String destination) { + Canal canal = buildCanal(sourceConfig); + + CanalInstanceWithManager instance = new CanalInstanceWithManager(canal, filter) { + + protected CanalHAController initHaController() { + return super.initHaController(); + } + + protected void startEventParserInternal(CanalEventParser parser, boolean isGroup) { + super.startEventParserInternal(parser, isGroup); + + if (eventParser instanceof MysqlEventParser) { + // set eventParser support type + ((MysqlEventParser) eventParser).setSupportBinlogFormats("ROW"); + ((MysqlEventParser) eventParser).setSupportBinlogImages("FULL"); + MysqlEventParser mysqlEventParser = (MysqlEventParser) eventParser; + mysqlEventParser.setParallel(false); + + CanalHAController haController = mysqlEventParser.getHaController(); + if (!haController.isStart()) { + haController.start(); + } + } + } + }; + return instance; + } + }); + } + + private Canal buildCanal(CanalSourceConfig sourceConfig) { + // 设置下slaveId,保证多个piplineId下重复引用时不重复 + long slaveId = 10000;// 默认基数 + if (sourceConfig.getSlaveId() != null) { + slaveId = sourceConfig.getSlaveId(); + } + + Canal canal = new Canal(); + canal.setId(sourceConfig.getCanalInstanceId()); + canal.setName(sourceConfig.getDestination()); + canal.setDesc(sourceConfig.getDesc()); + + CanalParameter parameter = new CanalParameter(); + + parameter.setRunMode(RunMode.EMBEDDED); + parameter.setClusterMode(ClusterMode.STANDALONE); + parameter.setMetaMode(MetaMode.MEMORY); + parameter.setHaMode(HAMode.HEARTBEAT); + parameter.setIndexMode(IndexMode.MEMORY); + parameter.setStorageMode(StorageMode.MEMORY); + parameter.setMemoryStorageBufferSize(32 * 1024); + + parameter.setSourcingType(SourcingType.MYSQL); + parameter.setDbAddresses(Collections.singletonList(new InetSocketAddress(sourceConfig.getSourceConnectorConfig().getDbAddress(), + sourceConfig.getSourceConnectorConfig().getDbPort()))); + parameter.setDbUsername(sourceConfig.getSourceConnectorConfig().getUserName()); + parameter.setDbPassword(sourceConfig.getSourceConnectorConfig().getPassWord()); + + // check positions + // example: Arrays.asList("{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}", + // "{\"journalName\":\"mysql-bin.000001\",\"position\":6163L,\"timestamp\":1322803601000L}") + if (sourceConfig.getRecordPositions() != null && !sourceConfig.getRecordPositions().isEmpty()) { + List recordPositions = sourceConfig.getRecordPositions(); + List positions = new ArrayList<>(); + recordPositions.forEach(recordPosition -> { + Map recordPositionMap = new HashMap<>(); + CanalRecordPartition canalRecordPartition = (CanalRecordPartition) (recordPosition.getRecordPartition()); + CanalRecordOffset canalRecordOffset = (CanalRecordOffset) (recordPosition.getRecordOffset()); + recordPositionMap.put("journalName", canalRecordPartition.getJournalName()); + recordPositionMap.put("timestamp", canalRecordPartition.getTimeStamp()); + recordPositionMap.put("position", canalRecordOffset.getOffset()); + positions.add(JsonUtils.toJSONString(recordPositionMap)); + }); + parameter.setPositions(positions); + } + + parameter.setSlaveId(slaveId); + + parameter.setDefaultConnectionTimeoutInSeconds(30); + parameter.setConnectionCharset("UTF-8"); + parameter.setConnectionCharsetNumber((byte) 33); + parameter.setReceiveBufferSize(8 * 1024); + parameter.setSendBufferSize(8 * 1024); + + // heartbeat detect + parameter.setDetectingEnable(false); + + parameter.setDdlIsolation(sourceConfig.isDdlSync()); + parameter.setFilterTableError(sourceConfig.isFilterTableError()); + parameter.setMemoryStorageRawEntry(false); + + canal.setCanalParameter(parameter); + return canal; + } + + + @Override + public void start() throws Exception { + if (running) { + return; + } + canalServer.start(); + + canalServer.start(sourceConfig.getDestination()); + this.clientIdentity = new ClientIdentity(sourceConfig.getDestination(), sourceConfig.getClientId(), filter); + canalServer.subscribe(clientIdentity); + + running = true; + } + + + @Override + public void commit(ConnectRecord record) { + long batchId = Long.parseLong(record.getExtension("messageId")); + canalServer.ack(clientIdentity, batchId); + } + + @Override + public String name() { + return this.sourceConfig.getSourceConnectorConfig().getConnectorName(); + } + + @Override + public void stop() { + if (!running) { + return; + } + running = false; + canalServer.stop(sourceConfig.getDestination()); + canalServer.stop(); + } + + @Override + public List poll() { + int emptyTimes = 0; + com.alibaba.otter.canal.protocol.Message message = null; + if (sourceConfig.getBatchTimeout() < 0) {// perform polling + while (running) { + message = canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize()); + if (message == null || message.getId() == -1L) { // empty + applyWait(emptyTimes++); + } else { + break; + } + } + } else { // perform with timeout + while (running) { + message = + canalServer.getWithoutAck(clientIdentity, sourceConfig.getBatchSize(), sourceConfig.getBatchTimeout(), TimeUnit.MILLISECONDS); + if (message == null || message.getId() == -1L) { // empty + continue; + } + break; + } + } + + List entries; + assert message != null; + if (message.isRaw()) { + entries = new ArrayList<>(message.getRawEntries().size()); + for (ByteString entry : message.getRawEntries()) { + try { + entries.add(CanalEntry.Entry.parseFrom(entry)); + } catch (InvalidProtocolBufferException e) { + throw new RuntimeException(e); + } + } + } else { + entries = message.getEntries(); + } + + EntryParser entryParser = new EntryParser(); + + List result = new ArrayList<>(); + + List connectRecordList = entryParser.parse(sourceConfig, entries); + + if (connectRecordList != null && !connectRecordList.isEmpty()) { + CanalConnectRecord lastRecord = connectRecordList.get(connectRecordList.size() - 1); + + CanalRecordPartition canalRecordPartition = new CanalRecordPartition(); + canalRecordPartition.setJournalName(lastRecord.getJournalName()); + canalRecordPartition.setTimeStamp(lastRecord.getExecuteTime()); + + CanalRecordOffset canalRecordOffset = new CanalRecordOffset(); + canalRecordOffset.setOffset(lastRecord.getBinLogOffset()); + + ConnectRecord connectRecord = new ConnectRecord(canalRecordPartition, canalRecordOffset, System.currentTimeMillis()); + connectRecord.addExtension("messageId", String.valueOf(message.getId())); + connectRecord.setData(connectRecordList); + result.add(connectRecord); + } + + return result; + } + + // Handle the situation of no data and avoid empty loop death + private void applyWait(int emptyTimes) { + int newEmptyTimes = Math.min(emptyTimes, maxEmptyTimes); + if (emptyTimes <= 3) { + Thread.yield(); + } else { + LockSupport.parkNanos(1000 * 1000L * newEmptyTimes); + } + } + + @Override + public Source create() { + return new CanalSourceConnector(); + } + + public static void main(String[] args) { + CanalSourceConfig canalSourceConfig = new CanalSourceConfig(); + canalSourceConfig.setCanalInstanceId(12L); + canalSourceConfig.setDesc("canalSourceDemo"); + canalSourceConfig.setSlaveId(123L); + canalSourceConfig.setClientId((short) 1); + canalSourceConfig.setDestination("destinationGroup"); + canalSourceConfig.setDdlSync(false); + canalSourceConfig.setFilterTableError(false); + canalSourceConfig.setSyncMode(SyncMode.ROW); + canalSourceConfig.setSyncConsistency(SyncConsistency.BASE); + + SourceConnectorConfig sourceConnectorConfig = new SourceConnectorConfig(); + sourceConnectorConfig.setConnectorName("canalSourceConnector"); + sourceConnectorConfig.setDbAddress("127.0.0.1"); + sourceConnectorConfig.setDbPort(3306); + sourceConnectorConfig.setUrl("jdbc:mysql://127.0.0.1:3306/test_db?serverTimezone=GMT%2B8&characterEncoding=utf-8&useSSL=false"); + sourceConnectorConfig.setSchemaName("test_db"); + sourceConnectorConfig.setTableName("people"); + sourceConnectorConfig.setUserName("root"); + sourceConnectorConfig.setPassWord("mike920830"); + + OffsetStorageConfig offsetStorageConfig = new OffsetStorageConfig(); + offsetStorageConfig.setOffsetStorageAddr("127.0.0.1:8081"); + offsetStorageConfig.setOffsetStorageType("admin"); + Map extensionMap = new HashMap<>(); + extensionMap.put("jobId", "1"); + offsetStorageConfig.setExtensions(extensionMap); + + canalSourceConfig.setSourceConnectorConfig(sourceConnectorConfig); + canalSourceConfig.setOffsetStorageConfig(offsetStorageConfig); + + System.out.println(JsonUtils.toJSONString(canalSourceConfig)); + + CanalSinkConfig canalSinkConfig = new CanalSinkConfig(); + canalSinkConfig.setSyncMode(SyncMode.ROW); + + SinkConnectorConfig sinkConnectorConfig = new SinkConnectorConfig(); + sinkConnectorConfig.setConnectorName("canalSinkConnector"); + sinkConnectorConfig.setDbAddress("127.0.0.1"); + sinkConnectorConfig.setDbPort(25000); + sinkConnectorConfig.setUrl("jdbc:mysql://127.0.0.1:25000/test_db?serverTimezone=GMT%2B8&characterEncoding=utf-8&useSSL=false"); + sinkConnectorConfig.setSchemaName("test_db"); + sinkConnectorConfig.setTableName("people"); + sinkConnectorConfig.setUserName("clougence"); + sinkConnectorConfig.setPassWord("123456"); + + canalSinkConfig.setSinkConnectorConfig(sinkConnectorConfig); + + System.out.println(JsonUtils.toJSONString(canalSinkConfig)); + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/AbstractSqlTemplate.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/AbstractSqlTemplate.java new file mode 100644 index 0000000000..8329b476cc --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/AbstractSqlTemplate.java @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.template; + +/** + * 默认的基于标准SQL实现的CRUD sql封装 + */ +public abstract class AbstractSqlTemplate implements SqlTemplate { + + private static final String DOT = "."; + + public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) { + StringBuilder sql = new StringBuilder("select "); + int size = columnNames.length; + for (int i = 0; i < size; i++) { + sql.append(appendEscape(columnNames[i])).append((i + 1 < size) ? " , " : ""); + } + + sql.append(" from ").append(getFullName(schemaName, tableName)).append(" where ( "); + appendColumnEquals(sql, pkNames, "and"); + sql.append(" ) "); + return sql.toString().intern();// 不使用intern,避免方法区内存消耗过多 + } + + public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames, boolean updatePks, String shardColumn) { + StringBuilder sql = new StringBuilder("update " + getFullName(schemaName, tableName) + " set "); + appendExcludeSingleShardColumnEquals(sql, columnNames, ",", updatePks, shardColumn); + sql.append(" where ("); + appendColumnEquals(sql, pkNames, "and"); + sql.append(")"); + return sql.toString().intern(); // 不使用intern,避免方法区内存消耗过多 + } + + public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames) { + StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "("); + String[] allColumns = new String[pkNames.length + columnNames.length]; + System.arraycopy(columnNames, 0, allColumns, 0, columnNames.length); + System.arraycopy(pkNames, 0, allColumns, columnNames.length, pkNames.length); + + int size = allColumns.length; + for (int i = 0; i < size; i++) { + sql.append(appendEscape(allColumns[i])).append((i + 1 < size) ? "," : ""); + } + + sql.append(") values ("); + appendColumnQuestions(sql, allColumns); + sql.append(")"); + return sql.toString().intern();// intern优化,避免出现大量相同的字符串 + } + + public String getDeleteSql(String schemaName, String tableName, String[] pkNames) { + StringBuilder sql = new StringBuilder("delete from " + getFullName(schemaName, tableName) + " where "); + appendColumnEquals(sql, pkNames, "and"); + return sql.toString().intern();// intern优化,避免出现大量相同的字符串 + } + + protected String getFullName(String schemaName, String tableName) { + StringBuilder sb = new StringBuilder(); + if (schemaName != null) { + sb.append(appendEscape(schemaName)).append(DOT); + } + sb.append(appendEscape(tableName)); + return sb.toString().intern(); + } + + // ================ helper method ============ + + protected String appendEscape(String columnName) { + return columnName; + } + + protected void appendColumnQuestions(StringBuilder sql, String[] columns) { + int size = columns.length; + for (int i = 0; i < size; i++) { + sql.append("?").append((i + 1 < size) ? " , " : ""); + } + } + + protected void appendColumnEquals(StringBuilder sql, String[] columns, String separator) { + int size = columns.length; + for (int i = 0; i < size; i++) { + sql.append(" ").append(appendEscape(columns[i])).append(" = ").append("? "); + if (i != size - 1) { + sql.append(separator); + } + } + } + + /** + * 针对DRDS改造, 在 update set 集合中, 排除 单个拆分键 的赋值操作 + * + * @param sql + * @param columns + * @param separator + * @param excludeShardColumn 需要排除的 拆分列 + */ + protected void appendExcludeSingleShardColumnEquals(StringBuilder sql, String[] columns, String separator, boolean updatePks, + String excludeShardColumn) { + int size = columns.length; + for (int i = 0; i < size; i++) { + // 如果是DRDS数据库, 并且存在拆分键 且 等于当前循环列, 跳过 + if (!updatePks && columns[i].equals(excludeShardColumn)) { + continue; + } + sql.append(" ").append(appendEscape(columns[i])).append(" = ").append("? "); + if (i != size - 1) { + sql.append(separator); + } + } + } +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/MysqlSqlTemplate.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/MysqlSqlTemplate.java new file mode 100644 index 0000000000..92920ea5e2 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/MysqlSqlTemplate.java @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.template; + +/** + * mysql sql生成模板 + */ +public class MysqlSqlTemplate extends AbstractSqlTemplate { + + private static final String ESCAPE = "`"; + + public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames, + String[] viewColumnNames, boolean includePks, String shardColumn) { + StringBuilder sql = new StringBuilder("insert into " + getFullName(schemaName, tableName) + "("); + int size = columnNames.length; + for (int i = 0; i < size; i++) { + sql.append(appendEscape(columnNames[i])).append(" , "); + } + size = pkNames.length; + for (int i = 0; i < size; i++) { + sql.append(appendEscape(pkNames[i])).append((i + 1 < size) ? " , " : ""); + } + + sql.append(") values ("); + size = columnNames.length; + for (int i = 0; i < size; i++) { + sql.append("?").append(" , "); + } + size = pkNames.length; + for (int i = 0; i < size; i++) { + sql.append("?").append((i + 1 < size) ? " , " : ""); + } + sql.append(")"); + sql.append(" on duplicate key update "); + + size = columnNames.length; + for (int i = 0; i < size; i++) { + // 如果是DRDS数据库, 并且存在拆分键 且 等于当前循环列, 跳过 + if (!includePks && shardColumn != null && columnNames[i].equals(shardColumn)) { + continue; + } + + sql.append(appendEscape(columnNames[i])) + .append("=values(") + .append(appendEscape(columnNames[i])) + .append(")"); + if (includePks) { + sql.append(" , "); + } else { + sql.append((i + 1 < size) ? " , " : ""); + } + } + + if (includePks) { + // mysql merge sql匹配了uniqe / primary key时都会执行update,所以需要更新pk信息 + size = pkNames.length; + for (int i = 0; i < size; i++) { + sql.append(appendEscape(pkNames[i])).append("=values(").append(appendEscape(pkNames[i])).append(")"); + sql.append((i + 1 < size) ? " , " : ""); + } + } + + return sql.toString().intern();// intern优化,避免出现大量相同的字符串 + } + + protected String appendEscape(String columnName) { + return ESCAPE + columnName + ESCAPE; + } + +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/SqlTemplate.java b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/SqlTemplate.java new file mode 100644 index 0000000000..6b3fd4f637 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/java/org/apache/eventmesh/connector/canal/template/SqlTemplate.java @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2010-2101 Alibaba Group Holding Limited. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.canal.template; + +/** + * sql构造模板操作 + */ +public interface SqlTemplate { + + public String getSelectSql(String schemaName, String tableName, String[] pkNames, String[] columnNames); + + public String getUpdateSql(String schemaName, String tableName, String[] pkNames, String[] columnNames, boolean updatePks, String shardColumn); + + public String getDeleteSql(String schemaName, String tableName, String[] pkNames); + + public String getInsertSql(String schemaName, String tableName, String[] pkNames, String[] columnNames); + + /** + * 获取对应的mergeSql + */ + public String getMergeSql(String schemaName, String tableName, String[] pkNames, String[] columnNames, + String[] viewColumnNames, boolean updatePks, String shardColumn); +} diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService new file mode 100644 index 0000000000..f55b34d852 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.api.ConnectorCreateService @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +MySQL-Source=org.apache.eventmesh.connector.canal.source.connector.CanalSourceConnector +MySQL-Sink=org.apache.eventmesh.connector.canal.sink.connector.CanalSinkConnector diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/server-config.yml b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/server-config.yml new file mode 100644 index 0000000000..5f66dd0f68 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/server-config.yml @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +sourceEnable: true +sinkEnable: true diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/sink-config.yml b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/sink-config.yml new file mode 100644 index 0000000000..210361dc28 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/sink-config.yml @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pubSubConfig: + meshAddress: 127.0.0.1:10000 + subject: TopicTest + idc: FT + env: PRD + group: rocketmqSink + appId: 5031 + userName: rocketmqSinkUser + passWord: rocketmqPassWord +connectorConfig: + connectorName: rocketmqSink + nameServer: 127.0.0.1:9876 + topic: TopicTest diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/source-config.yml b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/source-config.yml new file mode 100644 index 0000000000..7a7880b877 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/main/resources/source-config.yml @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pubSubConfig: + meshAddress: 127.0.0.1:10000 + subject: TopicTest + idc: FT + env: PRD + group: rocketmqSource + appId: 5032 + userName: rocketmqSourceUser + passWord: rocketmqPassWord +connectorConfig: + connectorName: rocketmqSource + nameserver: 127.0.0.1:9876 + topic: TopicTest + commitOffsetIntervalMs: 5000 +offsetStorageConfig: + offsetStorageType: nacos + offsetStorageAddr: 127.0.0.1:8848 + extensions: { + #same with topic + dataId: TopicTest, + #same with group + group: rocketmqSource + } diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/sink-config.yml b/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/sink-config.yml new file mode 100644 index 0000000000..210361dc28 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/sink-config.yml @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pubSubConfig: + meshAddress: 127.0.0.1:10000 + subject: TopicTest + idc: FT + env: PRD + group: rocketmqSink + appId: 5031 + userName: rocketmqSinkUser + passWord: rocketmqPassWord +connectorConfig: + connectorName: rocketmqSink + nameServer: 127.0.0.1:9876 + topic: TopicTest diff --git a/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/source-config.yml b/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/source-config.yml new file mode 100644 index 0000000000..7a7880b877 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-canal/src/test/resources/source-config.yml @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +pubSubConfig: + meshAddress: 127.0.0.1:10000 + subject: TopicTest + idc: FT + env: PRD + group: rocketmqSource + appId: 5032 + userName: rocketmqSourceUser + passWord: rocketmqPassWord +connectorConfig: + connectorName: rocketmqSource + nameserver: 127.0.0.1:9876 + topic: TopicTest + commitOffsetIntervalMs: 5000 +offsetStorageConfig: + offsetStorageType: nacos + offsetStorageAddr: 127.0.0.1:8848 + extensions: { + #same with topic + dataId: TopicTest, + #same with group + group: rocketmqSource + } diff --git a/eventmesh-connectors/eventmesh-connector-lark/src/main/java/org/apache/eventmesh/connector/lark/ConfigUtils.java b/eventmesh-connectors/eventmesh-connector-lark/src/main/java/org/apache/eventmesh/connector/lark/ConfigUtils.java new file mode 100644 index 0000000000..f0017397e4 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-lark/src/main/java/org/apache/eventmesh/connector/lark/ConfigUtils.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.connector.lark; + +import org.apache.eventmesh.common.config.connector.lark.SinkConnectorConfig; + +import org.apache.commons.lang3.StringUtils; + +import com.lark.oapi.service.im.v1.enums.ReceiveIdTypeEnum; + +public class ConfigUtils { + + public static void validateSinkConfiguration(SinkConnectorConfig sinkConnectorConfig) { + // validate blank + if (StringUtils.isAnyBlank(sinkConnectorConfig.getAppId(), sinkConnectorConfig.getAppSecret(), sinkConnectorConfig.getReceiveId())) { + throw new IllegalArgumentException("appId or appSecret or receiveId is blank,please check it."); + } + + // validate receiveIdType + if (!StringUtils.containsAny(sinkConnectorConfig.getReceiveIdType(), ReceiveIdTypeEnum.CHAT_ID.getValue(), + ReceiveIdTypeEnum.EMAIL.getValue(), + ReceiveIdTypeEnum.OPEN_ID.getValue(), + ReceiveIdTypeEnum.USER_ID.getValue(), + ReceiveIdTypeEnum.UNION_ID.getValue())) { + throw new IllegalArgumentException( + String.format("sinkConnectorConfig.receiveIdType=[%s], Invalid.", sinkConnectorConfig.getReceiveIdType())); + } + } +} diff --git a/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordOffset.java b/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordOffset.java new file mode 100644 index 0000000000..4331774ab1 --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordOffset.java @@ -0,0 +1,10 @@ +package org.apache.eventmesh.connector.wecom.connector; + +import org.apache.eventmesh.common.remote.offset.RecordOffset; + +public class MockRecordOffset extends RecordOffset { + @Override + public Class getRecordOffsetClass() { + return MockRecordOffset.class; + } +} diff --git a/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordPartition.java b/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordPartition.java new file mode 100644 index 0000000000..d11e5cb00a --- /dev/null +++ b/eventmesh-connectors/eventmesh-connector-wecom/src/test/java/org/apache/eventmesh/connector/wecom/connector/MockRecordPartition.java @@ -0,0 +1,10 @@ +package org.apache.eventmesh.connector.wecom.connector; + +import org.apache.eventmesh.common.remote.offset.RecordPartition; + +public class MockRecordPartition extends RecordPartition { + @Override + public Class getRecordPartitionClass() { + return MockRecordPartition.class; + } +} diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/build.gradle b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/build.gradle new file mode 100644 index 0000000000..70defef627 --- /dev/null +++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/build.gradle @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +dependencies { + implementation project(":eventmesh-openconnect:eventmesh-openconnect-offsetmgmt-plugin:eventmesh-openconnect-offsetmgmt-api") + implementation project(":eventmesh-common") + testImplementation "org.mockito:mockito-core" + + compileOnly 'org.projectlombok:lombok' + annotationProcessor 'org.projectlombok:lombok' + + implementation "io.grpc:grpc-core" + implementation "io.grpc:grpc-protobuf" + implementation "io.grpc:grpc-stub" + implementation "io.grpc:grpc-netty" + implementation "io.grpc:grpc-netty-shaded" +} diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/gradle.properties b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/gradle.properties new file mode 100644 index 0000000000..09957a9d24 --- /dev/null +++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/gradle.properties @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +pluginType=offsetMgmt +pluginName=admin \ No newline at end of file diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java new file mode 100644 index 0000000000..a9e778f9df --- /dev/null +++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/java/org/apache/eventmesh/openconnect/offsetmgmt/admin/AdminOffsetService.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.eventmesh.openconnect.offsetmgmt.admin; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.protobuf.Any; +import com.google.protobuf.UnsafeByteOperations; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.apache.eventmesh.common.config.connector.offset.OffsetStorageConfig; +import org.apache.eventmesh.common.protocol.grpc.adminserver.AdminServiceGrpc; +import org.apache.eventmesh.common.protocol.grpc.adminserver.AdminServiceGrpc.AdminServiceBlockingStub; +import org.apache.eventmesh.common.protocol.grpc.adminserver.AdminServiceGrpc.AdminServiceStub; +import org.apache.eventmesh.common.protocol.grpc.adminserver.Metadata; +import org.apache.eventmesh.common.protocol.grpc.adminserver.Payload; +import org.apache.eventmesh.common.remote.JobState; +import org.apache.eventmesh.common.remote.job.DataSourceType; +import org.apache.eventmesh.common.remote.offset.RecordOffset; +import org.apache.eventmesh.common.remote.offset.RecordPartition; +import org.apache.eventmesh.common.remote.offset.RecordPosition; +import org.apache.eventmesh.common.remote.request.FetchPositionRequest; +import org.apache.eventmesh.common.remote.request.ReportPositionRequest; +import org.apache.eventmesh.common.remote.response.FetchPositionResponse; +import org.apache.eventmesh.common.utils.IPUtils; +import org.apache.eventmesh.common.utils.JsonUtils; +import org.apache.eventmesh.openconnect.offsetmgmt.api.storage.KeyValueStore; +import org.apache.eventmesh.openconnect.offsetmgmt.api.storage.MemoryBasedKeyValueStore; +import org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetManagementService; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +@Slf4j +public class AdminOffsetService implements OffsetManagementService { + + private String adminServerAddr; + + private ManagedChannel channel; + + private AdminServiceStub adminServiceStub; + + private AdminServiceBlockingStub adminServiceBlockingStub; + + StreamObserver responseObserver; + + StreamObserver requestObserver; + + public KeyValueStore positionStore; + + private String jobId; + + private JobState jobState; + + private DataSourceType dataSourceType; + + private DataSourceType dataSinkType; + + + @Override + public void start() { + + } + + @Override + public void stop() { + + } + + @Override + public void configure(OffsetStorageConfig config) { + OffsetManagementService.super.configure(config); + } + + @Override + public void persist() { + Map recordMap = positionStore.getKVMap(); + + List recordToSyncList = new ArrayList<>(); + for (Map.Entry entry : recordMap.entrySet()) { + RecordPosition recordPosition = new RecordPosition(entry.getKey(), entry.getValue()); + recordToSyncList.add(recordPosition); + } + + ReportPositionRequest reportPositionRequest = new ReportPositionRequest(); + reportPositionRequest.setJobID(jobId); + reportPositionRequest.setState(jobState); + reportPositionRequest.setAddress(IPUtils.getLocalAddress()); + + reportPositionRequest.setRecordPositionList(recordToSyncList); + + Metadata metadata = Metadata.newBuilder() + .setType(ReportPositionRequest.class.getSimpleName()) + .build(); + Payload payload = Payload.newBuilder() + .setMetadata(metadata) + .setBody(Any.newBuilder().setValue(UnsafeByteOperations. + unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(reportPositionRequest)))).build()) + .build(); + requestObserver.onNext(payload); + } + + @Override + public void load() { + + } + + @Override + public void synchronize() { + + } + + @Override + public Map getPositionMap() { + // get from memory storage first + if (positionStore.getKVMap() == null || positionStore.getKVMap().isEmpty()) { + log.info("fetch position from admin server"); + FetchPositionRequest fetchPositionRequest = new FetchPositionRequest(); + fetchPositionRequest.setJobID(jobId); + fetchPositionRequest.setAddress(IPUtils.getLocalAddress()); + fetchPositionRequest.setDataSourceType(dataSourceType); + + Metadata metadata = Metadata.newBuilder() + .setType(FetchPositionRequest.class.getSimpleName()) + .build(); + + Payload request = Payload.newBuilder() + .setMetadata(metadata) + .setBody(Any.newBuilder().setValue(UnsafeByteOperations. + unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(fetchPositionRequest)))).build()) + .build(); + Payload response = adminServiceBlockingStub.invoke(request); + if (response.getMetadata().getType().equals(FetchPositionResponse.class.getSimpleName())) { + FetchPositionResponse fetchPositionResponse = JsonUtils.parseObject(response.getBody().getValue().toStringUtf8(), FetchPositionResponse.class); + assert fetchPositionResponse != null; + if (fetchPositionResponse.isSuccess()) { + positionStore.put(fetchPositionResponse.getRecordPosition().getRecordPartition(), fetchPositionResponse.getRecordPosition().getRecordOffset()); + } + } + } + log.info("memory position map {}", positionStore.getKVMap()); + return positionStore.getKVMap(); + } + + @Override + public RecordOffset getPosition(RecordPartition partition) { + // get from memory storage first + if (positionStore.get(partition) == null) { + log.info("fetch position from admin server"); + FetchPositionRequest fetchPositionRequest = new FetchPositionRequest(); + fetchPositionRequest.setJobID(jobId); + fetchPositionRequest.setAddress(IPUtils.getLocalAddress()); + fetchPositionRequest.setDataSourceType(dataSourceType); + RecordPosition recordPosition = new RecordPosition(); + recordPosition.setRecordPartition(partition); + fetchPositionRequest.setRecordPosition(recordPosition); + + Metadata metadata = Metadata.newBuilder() + .setType(FetchPositionRequest.class.getSimpleName()) + .build(); + + Payload request = Payload.newBuilder() + .setMetadata(metadata) + .setBody(Any.newBuilder().setValue(UnsafeByteOperations. + unsafeWrap(Objects.requireNonNull(JsonUtils.toJSONBytes(fetchPositionRequest)))).build()) + .build(); + Payload response = adminServiceBlockingStub.invoke(request); + if (response.getMetadata().getType().equals(FetchPositionResponse.class.getSimpleName())) { + FetchPositionResponse fetchPositionResponse = JsonUtils.parseObject(response.getBody().getValue().toStringUtf8(), FetchPositionResponse.class); + assert fetchPositionResponse != null; + if (fetchPositionResponse.isSuccess()) { + positionStore.put(fetchPositionResponse.getRecordPosition().getRecordPartition(), fetchPositionResponse.getRecordPosition().getRecordOffset()); + } + } + } + log.info("memory record position {}", positionStore.get(partition)); + return positionStore.get(partition); + } + + @Override + public void putPosition(Map positions) { + positionStore.putAll(positions); + } + + @Override + public void putPosition(RecordPartition partition, RecordOffset position) { + positionStore.put(partition, position); + } + + @Override + public void removePosition(List partitions) { + if (partitions == null) { + return; + } + for (RecordPartition partition : partitions) { + positionStore.remove(partition); + } + } + + @Override + public void initialize(OffsetStorageConfig offsetStorageConfig) { + this.dataSourceType = offsetStorageConfig.getDataSourceType(); + this.dataSinkType = offsetStorageConfig.getDataSinkType(); + + this.adminServerAddr = offsetStorageConfig.getOffsetStorageAddr(); + this.channel = ManagedChannelBuilder.forTarget(adminServerAddr) + .usePlaintext() + .build(); + this.adminServiceStub = AdminServiceGrpc.newStub(channel).withWaitForReady(); + this.adminServiceBlockingStub = AdminServiceGrpc.newBlockingStub(channel).withWaitForReady(); + + responseObserver = new StreamObserver() { + @Override + public void onNext(Payload response) { + log.info("receive message: {} ", response); + } + + @Override + public void onError(Throwable t) { + log.error("receive error message: {}", t.getMessage()); + } + + @Override + public void onCompleted() { + log.info("finished receive message and completed"); + } + }; + + requestObserver = adminServiceStub.invokeBiStream(responseObserver); + + this.positionStore = new MemoryBasedKeyValueStore<>(); + String offset = offsetStorageConfig.getExtensions().get("offset"); + if (offset != null) { + Map initialRecordOffsetMap = JsonUtils.parseTypeReferenceObject(offset, + new TypeReference>(){ + }); + log.info("init record offset {}", initialRecordOffsetMap); + positionStore.putAll(initialRecordOffsetMap); + } + this.jobState = JobState.RUNNING; + this.jobId = offsetStorageConfig.getExtensions().get("jobId"); + } +} diff --git a/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetManagementService b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetManagementService new file mode 100644 index 0000000000..11b4466d79 --- /dev/null +++ b/eventmesh-openconnect/eventmesh-openconnect-offsetmgmt-plugin/eventmesh-openconnect-offsetmgmt-admin/src/main/resources/META-INF/eventmesh/org.apache.eventmesh.openconnect.offsetmgmt.api.storage.OffsetManagementService @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +admin=org.apache.eventmesh.openconnect.offsetmgmt.admin.AdminOffsetService \ No newline at end of file