From c30025e79445b769ab4d21a49c8bccc58d61cf56 Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Sun, 24 Dec 2017 23:10:11 +0800 Subject: [PATCH 01/22] =?UTF-8?q?=E6=94=AF=E6=8C=81schemaType=3DDBINMultiS?= =?UTF-8?q?erver=E6=A8=A1=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../java/io/mycat/mycat2/MycatConfig.java | 37 ++ .../java/io/mycat/mycat2/MycatSession.java | 17 +- .../io/mycat/mycat2/beans/conf/DNBean.java | 20 +- .../mycat/mycat2/beans/conf/SchemaBean.java | 12 +- .../mycat/mycat2/beans/conf/SchemaConfig.java | 15 +- .../mycat/mycat2/beans/conf/TableDefBean.java | 15 +- .../cmds/strategy/AbstractCmdStrategy.java | 17 +- .../strategy/DBINMultiServerCmdStrategy.java | 113 ++++- .../io/mycat/mycat2/route/RouteResultset.java | 415 ++++++++++++++++++ .../mycat2/route/RouteResultsetNode.java | 303 +++++++++++++ .../io/mycat/mycat2/route/RouteStrategy.java | 13 + .../java/io/mycat/mycat2/route/SQLMerge.java | 87 ++++ .../route/impl/AbstractRouteStrategy.java | 126 ++++++ .../impl/DBInMultiServerRouteStrategy.java | 59 +++ .../mycat2/tasks/BackendSynchemaTask.java | 69 ++- .../io/mycat/proxy/MycatReactorThread.java | 5 +- .../main/java/io/mycat/util/FormatUtil.java | 154 +++++++ 17 files changed, 1434 insertions(+), 43 deletions(-) create mode 100644 source/src/main/java/io/mycat/mycat2/route/RouteResultset.java create mode 100644 source/src/main/java/io/mycat/mycat2/route/RouteResultsetNode.java create mode 100644 source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java create mode 100644 source/src/main/java/io/mycat/mycat2/route/SQLMerge.java create mode 100644 source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java create mode 100644 source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java create mode 100644 source/src/main/java/io/mycat/util/FormatUtil.java diff --git a/source/src/main/java/io/mycat/mycat2/MycatConfig.java b/source/src/main/java/io/mycat/mycat2/MycatConfig.java index 2211af3..fcdc80b 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatConfig.java +++ b/source/src/main/java/io/mycat/mycat2/MycatConfig.java @@ -3,11 +3,15 @@ import java.util.HashMap; import java.util.Map; +import org.apache.commons.lang.StringUtils; + import io.mycat.mycat2.beans.GlobalBean; import io.mycat.mycat2.beans.MySQLRepBean; +import io.mycat.mycat2.beans.conf.DNBean; import io.mycat.mycat2.beans.conf.DatasourceConfig; import io.mycat.mycat2.beans.conf.SchemaBean; import io.mycat.mycat2.beans.conf.SchemaConfig; +import io.mycat.mycat2.beans.conf.TableDefBean; import io.mycat.proxy.ConfigEnum; import io.mycat.proxy.Configurable; @@ -25,6 +29,14 @@ public class MycatConfig { * 系统中所有SchemaBean的Map */ private Map mycatSchemaMap = new HashMap(); + /** + * 系统中所有DataNode的Map + */ + private Map mycatDataNodeMap = new HashMap(); + /** + * 系统中所有TableDefBean的Map + */ + private Map mycatTableMap = new HashMap(); /** * 默认Schema,取配置文件种第一个Schema */ @@ -41,12 +53,25 @@ public void initRepMap() { public void initSchemaMap() { SchemaConfig schemaConfig = getConfig(ConfigEnum.SCHEMA); + schemaConfig.getDataNodes().forEach(dataNode -> { + mycatDataNodeMap.put(dataNode.getName(), dataNode); + }); schemaConfig.getSchemas().forEach(schema -> { if (defaultSchemaBean == null) { defaultSchemaBean = schema; } mycatSchemaMap.put(schema.getName(), schema); + String defaultDnName = schema.getDefaultDataNode(); + if (StringUtils.isNotEmpty(defaultDnName) + && mycatDataNodeMap.containsKey(defaultDnName)) { + schema.setDefaultDN(mycatDataNodeMap.get(defaultDnName)); + } + schema.getTables().forEach(table -> { + mycatTableMap.put(table.getName(), table); + }); }); + + } public MySQLRepBean getMySQLRepBean(String repName) { @@ -57,6 +82,14 @@ public SchemaBean getSchemaBean(String schemaName) { return mycatSchemaMap.get(schemaName); } + public TableDefBean getTableDefBean(String tableName) { + return mycatTableMap.get(tableName); + } + + public DNBean getDNBean(String dataNodeName) { + return mycatDataNodeMap.get(dataNodeName); + } + /** * 获取指定的配置对象 */ @@ -98,6 +131,10 @@ public Map getMysqlRepMap() { return mysqlRepMap; } + public Map getMycatDataNodeMap() { + return mycatDataNodeMap; + } + public SchemaBean getDefaultSchemaBean() { return defaultSchemaBean; } diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java index d35e8c6..5c3ca94 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatSession.java +++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java @@ -14,11 +14,14 @@ import io.mycat.mycat2.beans.MySQLMetaBean; import io.mycat.mycat2.beans.MySQLRepBean; +import io.mycat.mycat2.beans.conf.DNBean; import io.mycat.mycat2.beans.conf.SchemaBean; import io.mycat.mycat2.cmds.strategy.AnnotateRouteCmdStrategy; import io.mycat.mycat2.cmds.strategy.DBINMultiServerCmdStrategy; import io.mycat.mycat2.cmds.strategy.DBInOneServerCmdStrategy; import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mycat2.route.RouteResultsetNode; import io.mycat.mycat2.sqlparser.BufferSQLContext; import io.mycat.mycat2.sqlparser.NewSQLContext; import io.mycat.mycat2.sqlparser.TokenHash; @@ -46,6 +49,8 @@ public class MycatSession extends AbstractMySQLSession { public MySQLSession curBackend; + public RouteResultset curRouteResultset; + //所有处理cmd中,用来向前段写数据,或者后端写数据的cmd的 public MySQLCommand curSQLCommand; @@ -86,7 +91,7 @@ public boolean matchMySqlCommand(){ case DB_IN_ONE_SERVER: return DBInOneServerCmdStrategy.INSTANCE.matchMySqlCommand(this); case DB_IN_MULTI_SERVER: - DBINMultiServerCmdStrategy.INSTANCE.matchMySqlCommand(this); + return DBINMultiServerCmdStrategy.INSTANCE.matchMySqlCommand(this); case ANNOTATION_ROUTE: AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); // case SQL_PARSE_ROUTE: @@ -366,6 +371,16 @@ private String getbackendName(){ case ANNOTATION_ROUTE: break; case DB_IN_MULTI_SERVER: + RouteResultsetNode[] nodes = this.curRouteResultset.getNodes(); + if (nodes != null && nodes.length > 0) { + String dataNodeName = nodes[0].getName(); + DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); + if (dnBean != null) { + backendName = dnBean.getReplica(); + } + } else { + backendName = schema.getDefaultDN().getReplica(); + } break; // case SQL_PARSE_ROUTE: // break; diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/DNBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/DNBean.java index 5990fdb..00e41e8 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/DNBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/DNBean.java @@ -7,9 +7,18 @@ * @author: gaozhiwen */ public class DNBean { + private String name; private String database; private String replica; + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + public String getDatabase() { return database; } @@ -26,11 +35,14 @@ public void setReplica(String replica) { this.replica = replica; } + + @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((database == null) ? 0 : database.hashCode()); + result = prime * result + ((name == null) ? 0 : name.hashCode()); result = prime * result + ((replica == null) ? 0 : replica.hashCode()); return result; } @@ -49,6 +61,11 @@ public boolean equals(Object obj) { return false; } else if (!database.equals(other.database)) return false; + if (name == null) { + if (other.name != null) + return false; + } else if (!name.equals(other.name)) + return false; if (replica == null) { if (other.replica != null) return false; @@ -59,6 +76,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return "DNBean [database=" + database + ", mysqlReplica=" + replica + "]"; + return "DNBean [name=" + name + ", database=" + database + ", replica=" + replica + "]"; } + } diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java index 14caab9..06ac5bf 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java @@ -1,5 +1,6 @@ package io.mycat.mycat2.beans.conf; +import java.util.ArrayList; import java.util.List; /** @@ -22,8 +23,9 @@ public enum SchemaTypeEnum { public String name; public SchemaTypeEnum schemaType; + private String defaultDataNode; private DNBean defaultDN; - private List tables; + private List tables = new ArrayList(); public String getName() { return name; @@ -41,6 +43,14 @@ public void setSchemaType(SchemaTypeEnum schemaType) { this.schemaType = schemaType; } + public String getDefaultDataNode() { + return defaultDataNode; + } + + public void setDefaultDataNode(String defaultDataNode) { + this.defaultDataNode = defaultDataNode; + } + public DNBean getDefaultDN() { return defaultDN; } diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaConfig.java b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaConfig.java index 60a5300..a97acd2 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaConfig.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaConfig.java @@ -1,9 +1,10 @@ package io.mycat.mycat2.beans.conf; -import io.mycat.proxy.Configurable; - +import java.util.ArrayList; import java.util.List; +import io.mycat.proxy.Configurable; + /** * Desc: 对应schema.yml文件 * @@ -13,6 +14,8 @@ public class SchemaConfig implements Configurable { private List schemas; + private List dataNodes = new ArrayList(); + public List getSchemas() { return schemas; } @@ -20,4 +23,12 @@ public List getSchemas() { public void setSchemas(List schemas) { this.schemas = schemas; } + + public List getDataNodes() { + return dataNodes; + } + + public void setDataNodes(List dataNodes) { + this.dataNodes = dataNodes; + } } diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java index 057f26b..8f3b986 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java @@ -16,6 +16,7 @@ public enum TableTypeEnum { private String shardingKey; private String shardingRule; private String store; + private String dataNode; public String getName() { return name; @@ -57,9 +58,19 @@ public void setStore(String store) { this.store = store; } + public String getDataNode() { + return dataNode; + } + + public void setDataNode(String dataNode) { + this.dataNode = dataNode; + } + @Override public String toString() { - return "TableDefBean [name=" + name + ", tableType=" + tableType + ", store=" + store + ", shardingKey=" + shardingKey + ", shardingRule=" - + shardingRule + "]"; + return "TableDefBean [name=" + name + ", tableType=" + tableType + ", shardingKey=" + + shardingKey + ", shardingRule=" + shardingRule + ", store=" + store + + ", dataNode=" + dataNode + "]"; } + } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java index 9c7220c..7ccaa35 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java @@ -67,8 +67,20 @@ private void initStaticAnnotation(){ protected abstract void initMySqlCmdHandler(); + /** + * + * 需要做路由的子类重写该方法. + * + * @param session + * @return + * @since 1.0 + */ + protected boolean handleRoute(MycatSession session) { + return true; + }; + @Override - final public boolean matchMySqlCommand(MycatSession session) { + public boolean matchMySqlCommand(MycatSession session) { MySQLCommand command = null; if(MySQLPacket.COM_QUERY==(byte)session.curMSQLPackgInf.pkgType){ @@ -117,6 +129,9 @@ final public boolean matchMySqlCommand(MycatSession session) { .processDynamicAnno(session) .processStaticAnno(session, staticAnnontationMap) .build(); + if (!handleRoute(session)) { + return false; + } return true; } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java index 31a1e0d..3ce95cd 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java @@ -1,19 +1,112 @@ package io.mycat.mycat2.cmds.strategy; +import java.io.IOException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.cmds.ComChangeUserCmd; +import io.mycat.mycat2.cmds.ComFieldListCmd; +import io.mycat.mycat2.cmds.ComInitDB; +import io.mycat.mycat2.cmds.ComPingCmd; +import io.mycat.mycat2.cmds.ComQuitCmd; +import io.mycat.mycat2.cmds.ComStatisticsCmd; +import io.mycat.mycat2.cmds.DirectPassthrouhCmd; +import io.mycat.mycat2.cmds.NotSupportCmd; +import io.mycat.mycat2.cmds.sqlCmds.SqlComBeginCmd; +import io.mycat.mycat2.cmds.sqlCmds.SqlComCommitCmd; +import io.mycat.mycat2.cmds.sqlCmds.SqlComRollBackCmd; +import io.mycat.mycat2.cmds.sqlCmds.SqlComShutdownCmd; +import io.mycat.mycat2.cmds.sqlCmds.SqlComStartCmd; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mycat2.route.RouteStrategy; +import io.mycat.mycat2.route.impl.DBInMultiServerRouteStrategy; +import io.mycat.mycat2.sqlparser.BufferSQLContext; +import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.util.ErrorCode; + public class DBINMultiServerCmdStrategy extends AbstractCmdStrategy { + private static final Logger logger = LoggerFactory.getLogger(DBINMultiServerCmdStrategy.class); + public static final DBINMultiServerCmdStrategy INSTANCE = new DBINMultiServerCmdStrategy(); - @Override - protected void initMyCmdHandler() { - // TODO Auto-generated method stub - - } + private RouteStrategy routeStrategy = new DBInMultiServerRouteStrategy(); + + @Override + protected void initMyCmdHandler() { + MYCOMMANDMAP.put(MySQLPacket.COM_QUIT, ComQuitCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_INIT_DB, ComInitDB.INSTANCE); +// MYCOMMANDMAP.put(MySQLPacket.COM_QUERY, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_FIELD_LIST, ComFieldListCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_CREATE_DB, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_DROP_DB, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_REFRESH, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_SHUTDOWN, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STATISTICS, ComStatisticsCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_PROCESS_INFO, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_CONNECT, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_PROCESS_KILL, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_DEBUG, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_PING, ComPingCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_TIME, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_DELAYED_INSERT, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_CHANGE_USER, ComChangeUserCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_BINLOG_DUMP, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_TABLE_DUMP, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_CONNECT_OUT, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_REGISTER_SLAVE, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_PREPARE, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_EXECUTE, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_SEND_LONG_DATA, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_CLOSE, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_RESET, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_SET_OPTION, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_STMT_FETCH, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_DAEMON, NotSupportCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_BINLOG_DUMP_GTID, DirectPassthrouhCmd.INSTANCE); + MYCOMMANDMAP.put(MySQLPacket.COM_RESET_CONNECTION, DirectPassthrouhCmd.INSTANCE); + } + + @Override + protected void initMySqlCmdHandler() { + MYSQLCOMMANDMAP.put(BufferSQLContext.INSERT_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.UPDATE_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.COMMIT_SQL, SqlComCommitCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.ROLLBACK_SQL, SqlComRollBackCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.SELECT_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.BEGIN_SQL, SqlComBeginCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.START_SQL, SqlComStartCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.USE_SQL, SqlComStartCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.SHUTDOWN_SQL, SqlComShutdownCmd.INSTANCE); + } + + - @Override - protected void initMySqlCmdHandler() { - // TODO Auto-generated method stub - - } + @Override + protected boolean handleRoute(MycatSession session) { + byte sqltype = session.sqlContext.getSQLType() != 0 ? session.sqlContext.getSQLType() + : session.sqlContext.getCurSQLType(); + RouteResultset rrs = routeStrategy.route(session.schema, sqltype, + session.sqlContext.getRealSQL(0), null, session); + if (rrs.getNodes() != null && rrs.getNodes().length > 1) { + session.curRouteResultset = null; + try { + logger.error( + "Multi node error! Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType.\n" + + "Original SQL:[{}]", + session.sqlContext.getRealSQL(0)); + session.sendErrorMsg(ErrorCode.ERR_MULTI_NODE_FAILED, + "Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType."); + } catch (IOException e) { + session.close(false, e.getMessage()); + } + return false; + } else { + session.curRouteResultset = rrs; + } + return true; + } } diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java new file mode 100644 index 0000000..71ed7dd --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.mycat2.route; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import io.mycat.mycat2.sqlengine.mpp.HavingCols; +import io.mycat.util.FormatUtil; + +/** + * @author mycat + */ +public final class RouteResultset implements Serializable { + private String statement; // 原始语句 + private final int sqlType; + private RouteResultsetNode[] nodes; // 路由结果节点 + private Set subTables; + + + private int limitStart; + private boolean cacheAble; + // used to store table's ID->datanodes cache + // format is table.primaryKey + private String primaryKey; + // limit output total + private int limitSize; + private SQLMerge sqlMerge; + + private boolean callStatement = false; // 处理call关键字 + + // 是否为全局表,只有在insert、update、delete、ddl里会判断并修改。默认不是全局表,用于修正全局表修改数据的反馈。 + private boolean globalTableFlag = false; + + //是否完成了路由 + private boolean isFinishedRoute = false; + + //是否自动提交,此属性主要用于记录ServerConnection上的autocommit状态 + private boolean autocommit = true; + + private boolean isLoadData=false; + + //是否可以在从库运行,此属性主要供RouteResultsetNode获取 + private Boolean canRunInReadDB; + + // 强制走 master,可以通过 RouteResultset的属性canRunInReadDB=false + // 传给 RouteResultsetNode 来实现,但是 强制走 slave需要增加一个属性来实现: + private Boolean runOnSlave = null; // 默认null表示不施加影响 + + //key=dataNode value=slot + private Map dataNodeSlotMap=new HashMap<>(); + + private boolean selectForUpdate; + + public boolean isSelectForUpdate() { + return selectForUpdate; + } + + public void setSelectForUpdate(boolean selectForUpdate) { + this.selectForUpdate = selectForUpdate; + } + + + private List tables; + + public List getTables() { + return tables; + } + + public void setTables(List tables) { + this.tables = tables; + } + + public Map getDataNodeSlotMap() { + return dataNodeSlotMap; + } + + public void setDataNodeSlotMap(Map dataNodeSlotMap) { + this.dataNodeSlotMap = dataNodeSlotMap; + } + + public Boolean getRunOnSlave() { + return runOnSlave; + } + + public void setRunOnSlave(Boolean runOnSlave) { + this.runOnSlave = runOnSlave; + } + // TODO 待支持存储过程 by zhangsiwei + /* + * private Procedure procedure; + * + * public Procedure getProcedure() { return procedure; } + * + * public void setProcedure(Procedure procedure) { this.procedure = procedure; } + */ + + public boolean isLoadData() + { + return isLoadData; + } + + public void setLoadData(boolean isLoadData) + { + this.isLoadData = isLoadData; + } + + public boolean isFinishedRoute() { + return isFinishedRoute; + } + + public void setFinishedRoute(boolean isFinishedRoute) { + this.isFinishedRoute = isFinishedRoute; + } + + public boolean isGlobalTable() { + return globalTableFlag; + } + + public void setGlobalTable(boolean globalTableFlag) { + this.globalTableFlag = globalTableFlag; + } + + public RouteResultset(String stmt, int sqlType) { + this.statement = stmt; + this.limitSize = -1; + this.sqlType = sqlType; + } + + public void resetNodes() { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { + node.resetStatement(); + } + } + } + + public void copyLimitToNodes() { + + if(nodes!=null) + { + for (RouteResultsetNode node : nodes) + { + if(node.getLimitSize()==-1&&node.getLimitStart()==0) + { + node.setLimitStart(limitStart); + node.setLimitSize(limitSize); + } + } + + } + } + + + public SQLMerge getSqlMerge() { + return sqlMerge; + } + + public boolean isCacheAble() { + return cacheAble; + } + + public void setCacheAble(boolean cacheAble) { + this.cacheAble = cacheAble; + } + + public boolean needMerge() { + return limitSize > 0 || sqlMerge != null; + } + + public int getSqlType() { + return sqlType; + } + + public boolean isHasAggrColumn() { + return (sqlMerge != null) && sqlMerge.isHasAggrColumn(); + } + + public int getLimitStart() { + return limitStart; + } + + public String[] getGroupByCols() { + return (sqlMerge != null) ? sqlMerge.getGroupByCols() : null; + } + + private SQLMerge createSQLMergeIfNull() { + if (sqlMerge == null) { + sqlMerge = new SQLMerge(); + } + return sqlMerge; + } + + public Map getMergeCols() { + return (sqlMerge != null) ? sqlMerge.getMergeCols() : null; + } + + public void setLimitStart(int limitStart) { + this.limitStart = limitStart; + } + + public String getPrimaryKey() { + return primaryKey; + } + + public boolean hasPrimaryKeyToCache() { + return primaryKey != null; + } + + public void setPrimaryKey(String primaryKey) { + if (!primaryKey.contains(".")) { + throw new java.lang.IllegalArgumentException( + "must be table.primarykey fomat :" + primaryKey); + } + this.primaryKey = primaryKey; + } + + /** + * return primary key items ,first is table name ,seconds is primary key + * + * @return + */ + public String[] getPrimaryKeyItems() { + return primaryKey.split("\\."); + } + + public void setOrderByCols(LinkedHashMap orderByCols) { + if (orderByCols != null && !orderByCols.isEmpty()) { + createSQLMergeIfNull().setOrderByCols(orderByCols); + } + } + + public void setHasAggrColumn(boolean hasAggrColumn) { + if (hasAggrColumn) { + createSQLMergeIfNull().setHasAggrColumn(true); + } + } + + public void setGroupByCols(String[] groupByCols) { + if (groupByCols != null && groupByCols.length > 0) { + createSQLMergeIfNull().setGroupByCols(groupByCols); + } + } + + public void setMergeCols(Map mergeCols) { + if (mergeCols != null && !mergeCols.isEmpty()) { + createSQLMergeIfNull().setMergeCols(mergeCols); + } + + } + + public LinkedHashMap getOrderByCols() { + return (sqlMerge != null) ? sqlMerge.getOrderByCols() : null; + + } + + public String getStatement() { + return statement; + } + + public RouteResultsetNode[] getNodes() { + return nodes; + } + + public void setNodes(RouteResultsetNode[] nodes) { + if(nodes!=null) + { + int nodeSize=nodes.length; + for (RouteResultsetNode node : nodes) + { + node.setTotalNodeSize(nodeSize); + } + + } + this.nodes = nodes; + } + + /** + * @return -1 if no limit + */ + public int getLimitSize() { + return limitSize; + } + + public void setLimitSize(int limitSize) { + this.limitSize = limitSize; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public boolean isCallStatement() { + return callStatement; + } + + public void setCallStatement(boolean callStatement) { + this.callStatement = callStatement; + if(nodes!=null) + { + for (RouteResultsetNode node : nodes) + { + node.setCallStatement(callStatement); + } + + } + } + + /* + * public void changeNodeSqlAfterAddLimit(SchemaConfig schemaConfig, String sourceDbType, String + * sql, int offset, int count, boolean isNeedConvert) { if (nodes != null) { + * + * Map dataNodeDbTypeMap = schemaConfig.getDataNodeDbTypeMap(); Map sqlMapCache = new HashMap<>(); for (RouteResultsetNode node : nodes) { String dbType + * = dataNodeDbTypeMap.get(node.getName()); if (dbType.equalsIgnoreCase("mysql")) { + * node.setStatement(sql); //mysql之前已经加好limit } else if (sqlMapCache.containsKey(dbType)) { + * node.setStatement(sqlMapCache.get(dbType)); } else if(isNeedConvert) { String nativeSql = + * PageSQLUtil.convertLimitToNativePageSql(dbType, sql, offset, count); sqlMapCache.put(dbType, + * nativeSql); node.setStatement(nativeSql); } else { node.setStatement(sql); } + * + * node.setLimitStart(offset); node.setLimitSize(count); } + * + * + * } } + */ + + public boolean isAutocommit() { + return autocommit; + } + + public void setAutocommit(boolean autocommit) { + this.autocommit = autocommit; + } + + public Boolean getCanRunInReadDB() { + return canRunInReadDB; + } + + public void setCanRunInReadDB(Boolean canRunInReadDB) { + this.canRunInReadDB = canRunInReadDB; + } + + public HavingCols getHavingCols() { + return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; + } + + public void setSubTables(Set subTables) { + this.subTables = subTables; + } + + public void setHavings(HavingCols havings) { + if (havings != null) { + createSQLMergeIfNull().setHavingCols(havings); + } + } + + // Added by winbill, 20160314, for having clause, Begin ==> + public void setHavingColsName(Object[] names) { + if (names != null && names.length > 0) { + createSQLMergeIfNull().setHavingColsName(names); + } + } + // Added by winbill, 20160314, for having clause, End <== + + public Set getSubTables() { + return this.subTables; + } + + public boolean isDistTable(){ + if(this.getSubTables()!=null && !this.getSubTables().isEmpty() ){ + return true; + } + return false; + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append(statement).append(", route={"); + if (nodes != null) { + for (int i = 0; i < nodes.length; ++i) { + s.append("\n ").append(FormatUtil.format(i + 1, 3)); + s.append(" -> ").append(nodes[i]); + } + } + s.append("\n}"); + return s.toString(); + } + +} diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteResultsetNode.java b/source/src/main/java/io/mycat/mycat2/route/RouteResultsetNode.java new file mode 100644 index 0000000..f34e0d0 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/RouteResultsetNode.java @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.mycat2.route; + +import java.io.Serializable; +import java.util.Map; + +import io.mycat.mycat2.sqlparser.BufferSQLContext; + +/** + * @author mycat + */ +public final class RouteResultsetNode implements Serializable , Comparable { + /** + * + */ + private static final long serialVersionUID = 1L; + private final String name; // 数据节点名称 + private String statement; // 执行的语句 + private final String srcStatement; + private final int sqlType; + private volatile boolean canRunInReadDB; + private final boolean hasBlanceFlag; + private boolean callStatement = false; // 处理call关键字 + private int limitStart; + private int limitSize; + private int totalNodeSize =0; //方便后续jdbc批量获取扩展 + // TODO 待支持存储过程 by zhangsiwei + // private Procedure procedure; + // TODO 待支持loadData by zhangsiwei + // private LoadData loadData; + private RouteResultset source; + + // 强制走 master,可以通过 RouteResultset的属性canRunInReadDB(false) + // 传给 RouteResultsetNode 来实现,但是 强制走 slave需要增加一个属性来实现: + private Boolean runOnSlave = null; // 默认null表示不施加影响, true走slave,false走master + + private String subTableName; // 分表的表名 + + //迁移算法用 -2代表不是slot分片 ,-1代表扫描所有分片 + private int slot=-2; + + public RouteResultsetNode(String name, int sqlType, String srcStatement) { + this.name = name; + limitStart=0; + this.limitSize = -1; + this.sqlType = sqlType; + this.srcStatement = srcStatement; + this.statement = srcStatement; + canRunInReadDB = + (sqlType == BufferSQLContext.SELECT_SQL || sqlType == BufferSQLContext.SHOW_SQL); + // TODO 待处理balance静态注解 /* mycat:balance type=master*/ by zhangsiwei + hasBlanceFlag = (statement != null) + && statement.startsWith("/*balance*/"); + } + + public Boolean getRunOnSlave() { + return runOnSlave; + } + public boolean isUpdateSql() { + int type=sqlType; + return BufferSQLContext.INSERT_SQL == type || BufferSQLContext.UPDATE_SQL == type + || BufferSQLContext.DELETE_SQL == type || BufferSQLContext.CREATE_SQL == type + || BufferSQLContext.ALTER_SQL == type || BufferSQLContext.DROP_SQL == type + || BufferSQLContext.TRUNCATE_SQL == type; + } + public void setRunOnSlave(Boolean runOnSlave) { + this.runOnSlave = runOnSlave; + } + private Map hintMap; + + public Map getHintMap() + { + return hintMap; + } + + public void setHintMap(Map hintMap) + { + this.hintMap = hintMap; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public void setCanRunInReadDB(boolean canRunInReadDB) { + this.canRunInReadDB = canRunInReadDB; + } + + public boolean getCanRunInReadDB() { + return this.canRunInReadDB; + } + + public void resetStatement() { + this.statement = srcStatement; + } + + /** + * 这里的逻辑是为了优化,实现:非业务sql可以在负载均衡走slave的效果。因为业务sql一般是非自动提交, + * 而非业务sql一般默认是自动提交,比如mysql client,还有SQLJob, heartbeat都可以使用 + * 了Leader-us优化的query函数,该函数实现为自动提交; + * + * 在非自动提交的情况下(有事物),除非使用了 balance 注解的情况下,才可以走slave. + * + * 当然还有一个大前提,必须是 select 或者 show 语句(canRunInReadDB=true) + * @param autocommit + * @return + */ + public boolean canRunnINReadDB(boolean autocommit) { + return canRunInReadDB && ( autocommit || (!autocommit && hasBlanceFlag) ); + } + +// public boolean canRunnINReadDB(boolean autocommit) { +// return canRunInReadDB && autocommit && !hasBlanceFlag +// || canRunInReadDB && !autocommit && hasBlanceFlag; +// } + /* + * public Procedure getProcedure() { return procedure; } + */ + + public int getSlot() { + return slot; + } + + public void setSlot(int slot) { + this.slot = slot; + } + + /* + * public void setProcedure(Procedure procedure) { this.procedure = procedure; } + */ + + public boolean isCallStatement() + { + return callStatement; + } + + public void setCallStatement(boolean callStatement) + { + this.callStatement = callStatement; + } + public String getName() { + return name; + } + + public int getSqlType() { + return sqlType; + } + + public String getStatement() { + return statement; + } + + public int getLimitStart() + { + return limitStart; + } + + public void setLimitStart(int limitStart) + { + this.limitStart = limitStart; + } + + public int getLimitSize() + { + return limitSize; + } + + public void setLimitSize(int limitSize) + { + this.limitSize = limitSize; + } + + public int getTotalNodeSize() + { + return totalNodeSize; + } + + public void setTotalNodeSize(int totalNodeSize) + { + this.totalNodeSize = totalNodeSize; + } + + /* + * public LoadData getLoadData() { return loadData; } + * + * public void setLoadData(LoadData loadData) { this.loadData = loadData; } + */ + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof RouteResultsetNode) { + RouteResultsetNode rrn = (RouteResultsetNode) obj; + if(subTableName!=null){ + if (equals(name, rrn.getName()) && equals(subTableName, rrn.getSubTableName())) { + return true; + } + }else{ + if (equals(name, rrn.getName())) { + return true; + } + } + } + return false; + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append(name); + s.append('{').append(statement).append('}'); + return s.toString(); + } + + private static boolean equals(String str1, String str2) { + if (str1 == null) { + return str2 == null; + } + return str1.equals(str2); + } + + public String getSubTableName() { + return this.subTableName; + } + + public void setSubTableName(String subTableName) { + this.subTableName = subTableName; + } + + public boolean isModifySQL() { + return !canRunInReadDB; + } + public boolean isDisctTable() { + if(subTableName!=null && !subTableName.equals("")){ + return true; + }; + return false; + } + + + @Override + public int compareTo(RouteResultsetNode obj) { + if(obj == null) { + return 1; + } + if(this.name == null) { + return -1; + } + if(obj.name == null) { + return 1; + } + int c = this.name.compareTo(obj.name); + if(!this.isDisctTable()){ + return c; + }else{ + if(c==0){ + return this.subTableName.compareTo(obj.subTableName); + } + return c; + } + } + + public boolean isHasBlanceFlag() { + return hasBlanceFlag; + } + + public RouteResultset getSource() { + return source; + } + + public void setSource(RouteResultset source) { + this.source = source; + } +} diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java new file mode 100644 index 0000000..6d6ccb4 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java @@ -0,0 +1,13 @@ +package io.mycat.mycat2.route; + +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.beans.conf.SchemaBean; + +/** + * 路由策略接口 + * + */ +public interface RouteStrategy { + public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + MycatSession mycatSession); +} diff --git a/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java b/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java new file mode 100644 index 0000000..0ee0489 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.mycat2.route; + +import java.io.Serializable; +import java.util.LinkedHashMap; +import java.util.Map; + +import io.mycat.mycat2.sqlengine.mpp.HavingCols; + +public class SQLMerge implements Serializable { + private LinkedHashMap orderByCols; + private HavingCols havingCols; + private Object[] havingColsName; // Added by winbill, 20160314, for having clause + private Map mergeCols; + private String[] groupByCols; + private boolean hasAggrColumn; + + public LinkedHashMap getOrderByCols() { + return orderByCols; + } + + public void setOrderByCols(LinkedHashMap orderByCols) { + this.orderByCols = orderByCols; + } + + public Map getMergeCols() { + return mergeCols; + } + + public void setMergeCols(Map mergeCols) { + this.mergeCols = mergeCols; + } + + public String[] getGroupByCols() { + return groupByCols; + } + + public void setGroupByCols(String[] groupByCols) { + this.groupByCols = groupByCols; + } + + public boolean isHasAggrColumn() { + return hasAggrColumn; + } + + public void setHasAggrColumn(boolean hasAggrColumn) { + this.hasAggrColumn = hasAggrColumn; + } + + public HavingCols getHavingCols() { + return havingCols; + } + + public void setHavingCols(HavingCols havingCols) { + this.havingCols = havingCols; + } + + public Object[] getHavingColsName() { + return havingColsName; + } + + public void setHavingColsName(Object[] havingColsName) { + this.havingColsName = havingColsName; + } +} \ No newline at end of file diff --git a/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java new file mode 100644 index 0000000..951307a --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java @@ -0,0 +1,126 @@ +package io.mycat.mycat2.route.impl; + +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.beans.conf.SchemaBean; +import io.mycat.mycat2.beans.conf.SchemaConfig; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mycat2.route.RouteStrategy; +import io.mycat.mycat2.sqlparser.BufferSQLContext; + +public abstract class AbstractRouteStrategy implements RouteStrategy { + + private static final Logger LOGGER = LoggerFactory.getLogger(AbstractRouteStrategy.class); + + @Override + public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + MycatSession mycatSession) { + + // TODO 待处理checkSQLSchema by zhangsiwei + //对应schema标签checkSQLschema属性,把表示schema的字符去掉 + /* + * if (schema.isCheckSQLSchema()) { origSQL = RouterUtil.removeSchema(origSQL, + * schema.getName()); } + */ + + /** + * 处理一些路由之前的逻辑 + * 全局序列号,父子表插入 + */ + /* + * if (beforeRouteProcess(schema, sqlType, origSQL, mycatSession)) { return null; } + */ + + // TODO 待处理全局表DML by zhangsiwei + /** + * SQL 语句拦截 + */ + /* + * String stmt = MycatServer.getInstance().getSqlInterceptor().interceptSQL(origSQL, + * sqlType); if (!origSQL.equals(stmt) && LOGGER.isDebugEnabled()) { + * LOGGER.debug("sql intercepted to " + stmt + " from " + origSQL); } + */ + + String stmt = origSQL; + RouteResultset rrs = new RouteResultset(stmt, sqlType); + + /** + * 优化debug loaddata输出cache的日志会极大降低性能 + */ + /* + * if (LOGGER.isDebugEnabled() && origSQL.startsWith(LoadData.loadDataHint)) { + * rrs.setCacheAble(false); } + */ + + /** + * rrs携带ServerConnection的autocommit状态用于在sql解析的时候遇到 + * select ... for update的时候动态设定RouteResultsetNode的canRunInReadDB属性 + */ + /* + * if (sc != null ) { rrs.setAutocommit(sc.isAutocommit()); } + */ + + /** + * DDL 语句的路由 + */ + if (BufferSQLContext.ALTER_SQL == sqlType) { + // return RouterUtil.routeToDDLNode(rrs, sqlType, stmt, schema); + return null; + } + + /** + * 检查是否有分片 + */ + if ((schema.getTables() == null || schema.getTables().isEmpty()) + && BufferSQLContext.SHOW_SQL != sqlType) { + // rrs = RouterUtil.routeToSingleNode(rrs, schema.getDataNode(), stmt); + rrs = null; + } else { + // RouteResultset returnedSet = routeSystemInfo(schema, sqlType, stmt, rrs); + // if (returnedSet == null) { + // rrs = routeNormalSqlWithAST(schema, stmt, rrs, charset, sqlType, mycatSession); + // } + } + + return rrs; + } + + // TODO by zhangsiwei + /** + * 路由之前必要的处理 + * 主要是全局序列号插入,还有子表插入 + */ + /* + * private boolean beforeRouteProcess(SchemaConfig schema, int sqlType, String origSQL, + * ServerConnection sc) throws SQLNonTransientException { + * + * return RouterUtil.processWithMycatSeq(schema, sqlType, origSQL, sc) || (sqlType == + * ServerParse.INSERT && RouterUtil.processERChildTable(schema, origSQL, sc)) || (sqlType == + * ServerParse.INSERT && RouterUtil.processInsert(schema, sqlType, origSQL, sc)); } + */ + + /** + * 通过解析AST语法树类来寻找路由 + */ + public abstract RouteResultset routeNormalSqlWithAST(SchemaBean schema, String stmt, + RouteResultset rrs, String charset, int sqlType, MycatSession mycatSession) + throws SQLNonTransientException; + + /** + * 路由信息指令, 如 SHOW、SELECT@@、DESCRIBE + */ + public abstract RouteResultset routeSystemInfo(SchemaBean schema, int sqlType, String stmt, + RouteResultset rrs) throws SQLSyntaxErrorException; + + /** + * 解析 Show 之类的语句 + */ + public abstract RouteResultset analyseShowSQL(SchemaConfig schema, RouteResultset rrs, String stmt) + throws SQLNonTransientException; + +} diff --git a/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java new file mode 100644 index 0000000..efc7df8 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java @@ -0,0 +1,59 @@ +package io.mycat.mycat2.route.impl; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import io.mycat.mycat2.MycatConfig; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.beans.conf.SchemaBean; +import io.mycat.mycat2.beans.conf.TableDefBean; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mycat2.route.RouteResultsetNode; +import io.mycat.mycat2.route.RouteStrategy; +import io.mycat.proxy.ProxyRuntime; + +/** + * DBInMultiServerRouteStrategy + *

+ * DBInMultiServer模式下的路由策略,该模式下不允许跨库. + *

+ * Creation Time: 2017-12-24 + * + * @author zhangsiwei + * @since 2.0 + */ +public class DBInMultiServerRouteStrategy implements RouteStrategy { + + @Override + public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + MycatSession mycatSession) { + + Set dataNodes = new HashSet<>(); + + MycatConfig config = ProxyRuntime.INSTANCE.getConfig(); + + for (int i = 0; i < mycatSession.sqlContext.getTableCount(); i++) { + String tableName = mycatSession.sqlContext.getTableName(i); + + TableDefBean tableDefBean = config.getTableDefBean(tableName); + if (tableDefBean != null) { + dataNodes.add(tableDefBean.getDataNode()); + } + } + RouteResultset rrs = new RouteResultset(origSQL, sqlType); + + if (dataNodes.size() >= 1) { + RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; + int i = 0; + for (Iterator it = dataNodes.iterator(); it.hasNext();) { + nodes[i++] = new RouteResultsetNode(it.next(), sqlType, origSQL); + } + rrs.setNodes(nodes); + return rrs; + } + + return rrs; + } + +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java b/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java index 2f9df93..051ca04 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java @@ -2,15 +2,18 @@ import java.io.IOException; import java.nio.channels.ClosedChannelException; +import java.util.Map; +import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.mycat2.MySQLSession; -import io.mycat.mycat2.beans.heartbeat.DBHeartbeat; +import io.mycat.mycat2.beans.conf.DNBean; import io.mycat.mysql.packet.CommandPacket; import io.mycat.mysql.packet.ErrorPacket; import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.proxy.ProxyRuntime; import io.mycat.util.ErrorCode; public class BackendSynchemaTask extends AbstractBackendIOTask { @@ -19,7 +22,7 @@ public class BackendSynchemaTask extends AbstractBackendIOTask { public BackendSynchemaTask(MySQLSession session) throws IOException{ super(session,true); - String databases = session.getMycatSession().schema.getDefaultDN().getDatabase(); + String databases = findDatabase(session); logger.debug("the Backend Synchema Task begin "); logger.debug(" use "+databases); session.proxyBuffer.reset(); @@ -56,9 +59,9 @@ public void onSocketRead(MySQLSession session) throws IOException { session.proxyBuffer.reset(); try { - if (!session.readFromChannel()){ - return; - } + if (!session.readFromChannel()) { + return; + } }catch(ClosedChannelException e){ session.close(false, e.getMessage()); return; @@ -75,24 +78,44 @@ public void onSocketRead(MySQLSession session) throws IOException { return; } - switch (session.resolveMySQLPackage(session.proxyBuffer, session.curMSQLPackgInf, false)) { - case Full: - if(session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET){ - String database = session.getMycatSession().schema.getDefaultDN().getDatabase(); - session.setDatabase(database ); - logger.debug("the Backend Synchema Task end "); - this.finished(true); - }else if(session.curMSQLPackgInf.pkgType == MySQLPacket.ERROR_PACKET){ - errPkg = new ErrorPacket(); - errPkg.read(session.proxyBuffer); - logger.debug("the Backend Synchema Task end "); - logger.warn("backend state sync Error.Err No. " + errPkg.errno + "," + errPkg.message); - this.finished(false); - } - break; - default: - return; - } + switch (session.resolveMySQLPackage(session.proxyBuffer, session.curMSQLPackgInf, false)) { + case Full: + if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { + String database = findDatabase(session); + session.setDatabase(database); + logger.debug("the Backend Synchema Task end "); + this.finished(true); + } else if (session.curMSQLPackgInf.pkgType == MySQLPacket.ERROR_PACKET) { + errPkg = new ErrorPacket(); + errPkg.read(session.proxyBuffer); + logger.debug("the Backend Synchema Task end "); + logger.warn("backend state sync Error.Err No. " + errPkg.errno + "," + + errPkg.message); + this.finished(false); + } + break; + default: + return; + } + } + + private String findDatabase(MySQLSession session) { + String replicaName = + session.getMySQLMetaBean().getRepBean().getReplicaBean().getName(); + Map dataNodeMap = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap(); + String databases = ""; + if (dataNodeMap != null) { + DNBean dataNode = dataNodeMap.values().stream().filter(dn -> { + return dn.getReplica().equalsIgnoreCase(replicaName); + }).findFirst().orElse(null); + if (dataNode != null) { + databases = dataNode.getDatabase(); + } + } + if (StringUtils.isEmpty(databases)) { + databases = session.getMycatSession().schema.getDefaultDN().getDatabase(); + } + return databases; } } diff --git a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java index 3f032cf..74c110c 100644 --- a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java +++ b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java @@ -6,6 +6,7 @@ import java.util.Map; import java.util.stream.Stream; +import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -252,8 +253,8 @@ public void syncAndExecute(MySQLSession mysqlSession,AsynTaskCallBack callback) throws IOException{ - if(mysqlSession.getMycatSession().schema!=null - &&!mysqlSession.getMycatSession().schema.getDefaultDN().getDatabase().equals(mysqlSession.getDatabase())){ + + if (StringUtils.isEmpty(mysqlSession.getDatabase())) { MycatSession mycatSession = mysqlSession.getMycatSession(); BackendSynchemaTask backendSynchemaTask = new BackendSynchemaTask(mysqlSession); backendSynchemaTask.setCallback((optSession, sender, exeSucces, rv) -> { diff --git a/source/src/main/java/io/mycat/util/FormatUtil.java b/source/src/main/java/io/mycat/util/FormatUtil.java new file mode 100644 index 0000000..8354e9f --- /dev/null +++ b/source/src/main/java/io/mycat/util/FormatUtil.java @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +/** + * 格式化工具 + * + * @author mycat + * @version 2008-11-24 下午12:58:17 + */ +public final class FormatUtil { + + // 右对齐格式化字符串 + public static final int ALIGN_RIGHT = 0; + + // 左对齐格式化字符串 + public static final int ALIGN_LEFT = 1; + + private static final char defaultSplitChar = ' '; + + private static final String[] timeFormat = new String[] { "d ", "h ", "m ", "s ", "ms" }; + + /** + * 格式化后返回的字符串 + * + * @param s + * 需要格式化的原始字符串,默认按左对齐。 + * @param fillLength + * 填充长度 + * @return String + */ + public static String format(String s, int fillLength) { + return format(s, fillLength, defaultSplitChar, ALIGN_LEFT); + } + + /** + * 格式化后返回的字符串 + * + * @param i + * 需要格式化的数字类型,默认按右对齐。 + * @param fillLength + * 填充长度 + * @return String + */ + public static String format(int i, int fillLength) { + return format(Integer.toString(i), fillLength, defaultSplitChar, ALIGN_RIGHT); + } + + /** + * 格式化后返回的字符串 + * + * @param l + * 需要格式化的数字类型,默认按右对齐。 + * @param fillLength + * 填充长度 + * @return String + */ + public static String format(long l, int fillLength) { + return format(Long.toString(l), fillLength, defaultSplitChar, ALIGN_RIGHT); + } + + /** + * @param s + * 需要格式化的原始字符串 + * @param fillLength + * 填充长度 + * @param fillChar + * 填充的字符 + * @param align + * 填充方式(左边填充还是右边填充) + * @return String + */ + public static String format(String s, int fillLength, char fillChar, int align) { + if (s == null) { + s = ""; + } else { + s = s.trim(); + } + int charLen = fillLength - s.length(); + if (charLen > 0) { + char[] fills = new char[charLen]; + for (int i = 0; i < charLen; i++) { + fills[i] = fillChar; + } + StringBuilder str = new StringBuilder(s); + switch (align) { + case ALIGN_RIGHT: + str.insert(0, fills); + break; + case ALIGN_LEFT: + str.append(fills); + break; + default: + str.append(fills); + } + return str.toString(); + } else { + return s; + } + } + + /** + * 格式化时间输出 + *

+ * 1d 15h 4m 15s 987ms + *

+ */ + public static String formatTime(long millis, int precision) { + long[] la = new long[5]; + la[0] = (millis / 86400000);// days + la[1] = (millis / 3600000) % 24;// hours + la[2] = (millis / 60000) % 60;// minutes + la[3] = (millis / 1000) % 60;// seconds + la[4] = (millis % 1000);// ms + + int index = 0; + for (int i = 0; i < la.length; i++) { + if (la[i] != 0) { + index = i; + break; + } + } + + StringBuilder buf = new StringBuilder(); + int validLength = la.length - index; + for (int i = 0; (i < validLength && i < precision); i++) { + buf.append(la[index]).append(timeFormat[index]); + index++; + } + return buf.toString(); + } + +} \ No newline at end of file From b412ce7a865f8579aa2cfcceadafe72822322e74 Mon Sep 17 00:00:00 2001 From: flysqrlboy Date: Mon, 8 Jan 2018 16:25:05 +0800 Subject: [PATCH 02/22] configs for DBINMultiServer --- source/src/main/resources/datasource.yml | 36 ++++++++++++++------- source/src/main/resources/replica-index.yml | 3 +- source/src/main/resources/schema.yml | 20 +++++++++--- source/src/main/resources/user.yml | 2 +- 4 files changed, 43 insertions(+), 18 deletions(-) diff --git a/source/src/main/resources/datasource.yml b/source/src/main/resources/datasource.yml index 5224e6a..05f2738 100644 --- a/source/src/main/resources/datasource.yml +++ b/source/src/main/resources/datasource.yml @@ -1,22 +1,36 @@ replicas: - - name: test # 复制组 名称 必须唯一 + - name: repli # 复制组 名称 必须唯一 repType: MASTER_SLAVE # 复制类型 switchType: SWITCH # 切换类型 balanceType: BALANCE_ALL_READ # 读写分离类型 mysqls: - - hostName: test # mysql 主机名 + - hostName: mytest3307 # mysql 主机名 ip: 127.0.0.1 # ip - port: 3306 # port + port: 3307 # port user: root # 用户名 password: 123456 # 密码 minCon: 1 # 最小连接 maxCon: 10 # 最大连接 maxRetryCount: 3 # 连接重试次数 - - hostName: test1 - ip: 127.0.0.1 - port: 3307 - user: root - password: 123456 - minCon: 1 - maxCon: 10 - maxRetryCount: 3 + - hostName: mytest3308 # mysql 主机名 + ip: 127.0.0.1 # ip + port: 3308 # port + user: root # 用户名 + password: 123456 # 密码 + minCon: 1 # 最小连接 + maxCon: 10 # 最大连接 + maxRetryCount: 3 # 连接重试次数 + + - name: repli2 + repType: MASTER_SLAVE # 复制类型 + switchType: SWITCH # 切换类型 + balanceType: BALANCE_ALL_READ # 读写分离类型 + mysqls: + - hostName: mytest3306 # mysql 主机名 + ip: 127.0.0.1 # ip + port: 3306 # port + user: root # 用户名 + password: 123456 # 密码 + minCon: 1 # 最小连接 + maxCon: 10 # 最大连接 + maxRetryCount: 3 # 连接重试次数 \ No newline at end of file diff --git a/source/src/main/resources/replica-index.yml b/source/src/main/resources/replica-index.yml index bb4dfd8..a68c1ff 100644 --- a/source/src/main/resources/replica-index.yml +++ b/source/src/main/resources/replica-index.yml @@ -1,2 +1,3 @@ replicaIndexes: - test: 0 \ No newline at end of file + repli: 0 + repli2: 0 \ No newline at end of file diff --git a/source/src/main/resources/schema.yml b/source/src/main/resources/schema.yml index c946f87..024c4ff 100644 --- a/source/src/main/resources/schema.yml +++ b/source/src/main/resources/schema.yml @@ -1,6 +1,16 @@ schemas: - - name: test - schemaType: DB_IN_ONE_SERVER - defaultDN: - database: test - replica: test \ No newline at end of file + - name: myfly + schemaType: DB_IN_MULTI_SERVER + defaultDataNode: dn1 + tables: + - name: tb_fly + dataNode: dn1 + - name: tb_boy + dataNode: dn2 +dataNodes: + - name: dn1 + database: mytest + replica: repli + - name: dn2 + database: mytest2 + replica: repli2 \ No newline at end of file diff --git a/source/src/main/resources/user.yml b/source/src/main/resources/user.yml index d031d64..ab79c8b 100644 --- a/source/src/main/resources/user.yml +++ b/source/src/main/resources/user.yml @@ -2,7 +2,7 @@ users: - name: root password: 123456 schemas: - - test + - myfly firewall: enable: false From 27cb2f3aa0f922bac13a71d09bc47b28f53cc87b Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Mon, 8 Jan 2018 18:03:46 +0800 Subject: [PATCH 03/22] change name of method "handleRoute" to "delegateRoute" --- .../mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java index 3ce95cd..f7fee23 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java @@ -85,7 +85,7 @@ protected void initMySqlCmdHandler() { @Override - protected boolean handleRoute(MycatSession session) { + protected boolean delegateRoute(MycatSession session) { byte sqltype = session.sqlContext.getSQLType() != 0 ? session.sqlContext.getSQLType() : session.sqlContext.getCurSQLType(); From 6b0858012701ae06b73c8e700fa2743c89a3bc8a Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Sat, 20 Jan 2018 23:16:56 +0800 Subject: [PATCH 04/22] =?UTF-8?q?schemaType=3DDBINMultiServer=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E6=94=AF=E6=8C=81=E5=85=A8=E5=B1=80=E8=A1=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../java/io/mycat/mycat2/MycatConfig.java | 9 + .../java/io/mycat/mycat2/MycatSession.java | 62 ++-- .../mycat/mycat2/beans/conf/TableDefBean.java | 30 +- ...ExeCmd.java => AbstractMultiDNExeCmd.java} | 2 +- .../cmds/interceptor/SQLAnnotationChain.java | 32 ++ .../cmds/multinode/DbInMultiServerCmd.java | 191 +++++++++++ .../cmds/strategy/AbstractCmdStrategy.java | 18 +- .../strategy/DBINMultiServerCmdStrategy.java | 9 +- .../strategy/DBInOneServerCmdStrategy.java | 1 + .../net/DefaultMycatSessionHandler.java | 1 + .../io/mycat/mycat2/route/RouteResultset.java | 102 +----- .../io/mycat/mycat2/route/RouteStrategy.java | 2 +- .../java/io/mycat/mycat2/route/SQLMerge.java | 87 ----- .../route/impl/AbstractRouteStrategy.java | 2 +- .../impl/DBInMultiServerRouteStrategy.java | 41 ++- .../mycat2/sqlparser/BufferSQLContext.java | 5 + .../main/java/io/mycat/util/SplitUtil.java | 312 ++++++++++++++++++ source/src/main/resources/schema.yml | 3 + 18 files changed, 674 insertions(+), 235 deletions(-) rename source/src/main/java/io/mycat/mycat2/cmds/{AbstractMutiDNExeCmd.java => AbstractMultiDNExeCmd.java} (95%) create mode 100644 source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java delete mode 100644 source/src/main/java/io/mycat/mycat2/route/SQLMerge.java create mode 100644 source/src/main/java/io/mycat/util/SplitUtil.java diff --git a/source/src/main/java/io/mycat/mycat2/MycatConfig.java b/source/src/main/java/io/mycat/mycat2/MycatConfig.java index fcdc80b..0bf4fe4 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatConfig.java +++ b/source/src/main/java/io/mycat/mycat2/MycatConfig.java @@ -14,6 +14,7 @@ import io.mycat.mycat2.beans.conf.TableDefBean; import io.mycat.proxy.ConfigEnum; import io.mycat.proxy.Configurable; +import io.mycat.util.SplitUtil; public class MycatConfig { // 当前节点所用的配置文件的版本 @@ -67,6 +68,14 @@ public void initSchemaMap() { schema.setDefaultDN(mycatDataNodeMap.get(defaultDnName)); } schema.getTables().forEach(table -> { + String theDataNodes[] = SplitUtil.split(table.getDataNode(), ',', '$', '-'); + if (theDataNodes == null || theDataNodes.length <= 0) { + throw new IllegalArgumentException( + "invalid table dataNodes: " + table.getDataNode()); + } + for (String dn : theDataNodes) { + table.getDataNodes().add(dn); + } mycatTableMap.put(table.getName(), table); }); }); diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java index 5c3ca94..1f72e2e 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatSession.java +++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java @@ -9,6 +9,7 @@ import java.util.List; import java.util.concurrent.ConcurrentHashMap; +import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -23,7 +24,6 @@ import io.mycat.mycat2.route.RouteResultset; import io.mycat.mycat2.route.RouteResultsetNode; import io.mycat.mycat2.sqlparser.BufferSQLContext; -import io.mycat.mycat2.sqlparser.NewSQLContext; import io.mycat.mycat2.sqlparser.TokenHash; import io.mycat.mycat2.tasks.AsynTaskCallBack; import io.mycat.mysql.AutoCommit; @@ -50,6 +50,8 @@ public class MycatSession extends AbstractMySQLSession { public MySQLSession curBackend; public RouteResultset curRouteResultset; + + public RouteResultsetNode curRouteResultsetNode; //所有处理cmd中,用来向前段写数据,或者后端写数据的cmd的 public MySQLCommand curSQLCommand; @@ -65,22 +67,25 @@ public class MycatSession extends AbstractMySQLSession { private static List masterSqlList = new ArrayList<>(); - static{ - masterSqlList.add(NewSQLContext.INSERT_SQL); - masterSqlList.add(NewSQLContext.UPDATE_SQL); - masterSqlList.add(NewSQLContext.DELETE_SQL); - masterSqlList.add(NewSQLContext.REPLACE_SQL); - masterSqlList.add(NewSQLContext.SELECT_INTO_SQL); - masterSqlList.add(NewSQLContext.SELECT_FOR_UPDATE_SQL); - //TODO select lock in share mode 。 也需要走主节点 需要完善sql 解析器。 - masterSqlList.add(NewSQLContext.LOAD_SQL); - masterSqlList.add(NewSQLContext.CALL_SQL); - masterSqlList.add(NewSQLContext.TRUNCATE_SQL); - - masterSqlList.add(NewSQLContext.BEGIN_SQL); - masterSqlList.add(NewSQLContext.START_SQL); //TODO 需要完善sql 解析器。 将 start transaction 分离出来。 - masterSqlList.add(NewSQLContext.SET_AUTOCOMMIT_SQL); - } + static { + masterSqlList.add(BufferSQLContext.INSERT_SQL); + masterSqlList.add(BufferSQLContext.UPDATE_SQL); + masterSqlList.add(BufferSQLContext.DELETE_SQL); + masterSqlList.add(BufferSQLContext.REPLACE_SQL); + masterSqlList.add(BufferSQLContext.SELECT_INTO_SQL); + masterSqlList.add(BufferSQLContext.SELECT_FOR_UPDATE_SQL); + masterSqlList.add(BufferSQLContext.CREATE_SQL); + masterSqlList.add(BufferSQLContext.DROP_SQL); + // TODO select lock in share mode 。 也需要走主节点 需要完善sql 解析器。 + masterSqlList.add(BufferSQLContext.LOAD_SQL); + masterSqlList.add(BufferSQLContext.CALL_SQL); + masterSqlList.add(BufferSQLContext.TRUNCATE_SQL); + + masterSqlList.add(BufferSQLContext.BEGIN_SQL); + masterSqlList.add(BufferSQLContext.START_SQL); // TODO 需要完善sql 解析器。 将 start transaction + // 分离出来。 + masterSqlList.add(BufferSQLContext.SET_AUTOCOMMIT_SQL); + } /** * 获取sql 类型 @@ -372,13 +377,17 @@ private String getbackendName(){ break; case DB_IN_MULTI_SERVER: RouteResultsetNode[] nodes = this.curRouteResultset.getNodes(); - if (nodes != null && nodes.length > 0) { - String dataNodeName = nodes[0].getName(); - DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); - if (dnBean != null) { - backendName = dnBean.getReplica(); - } - } else { + String dataNodeName = ""; + if (nodes != null && nodes.length == 1) { + dataNodeName = nodes[0].getName(); + } else if (nodes != null && nodes.length > 1 && curRouteResultsetNode != null) { + dataNodeName = curRouteResultsetNode.getName(); + } + DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); + if (dnBean != null) { + backendName = dnBean.getReplica(); + } + if (StringUtils.isEmpty(backendName)) { backendName = schema.getDefaultDN().getReplica(); } break; @@ -571,8 +580,9 @@ public MySQLSession getCurrCachedSession(MySQLMetaBean targetMetaBean, boolean r */ private boolean canRunOnSlave(){ //静态注解情况下 走读写分离 - if(NewSQLContext.ANNOTATION_BALANCE==sqlContext.getAnnotationType()){ - final long balancevalue = sqlContext.getAnnotationValue(NewSQLContext.ANNOTATION_BALANCE); + if (BufferSQLContext.ANNOTATION_BALANCE == sqlContext.getAnnotationType()) { + final long balancevalue = + sqlContext.getAnnotationValue(BufferSQLContext.ANNOTATION_BALANCE); if(TokenHash.MASTER == balancevalue){ return false; }else if(TokenHash.SLAVE == balancevalue){ diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java index 8f3b986..44d9d2f 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/TableDefBean.java @@ -1,5 +1,8 @@ package io.mycat.mycat2.beans.conf; +import java.util.ArrayList; +import java.util.List; + /** * Desc: * @@ -11,12 +14,21 @@ public enum TableTypeEnum { MASTER, SLAVE; } + public enum TypeEnum { + global; + } + private String name; private TableTypeEnum tableType; private String shardingKey; private String shardingRule; private String store; private String dataNode; + /** + * type=global为全局表,否则为普通表 + */ + private TypeEnum type; + private List dataNodes = new ArrayList(); public String getName() { return name; @@ -66,11 +78,27 @@ public void setDataNode(String dataNode) { this.dataNode = dataNode; } + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + public List getDataNodes() { + return dataNodes; + } + + public void setDataNodes(List dataNodes) { + this.dataNodes = dataNodes; + } + @Override public String toString() { return "TableDefBean [name=" + name + ", tableType=" + tableType + ", shardingKey=" + shardingKey + ", shardingRule=" + shardingRule + ", store=" + store - + ", dataNode=" + dataNode + "]"; + + ", dataNode=" + dataNode + ", type=" + type + "]"; } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/AbstractMutiDNExeCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/AbstractMultiDNExeCmd.java similarity index 95% rename from source/src/main/java/io/mycat/mycat2/cmds/AbstractMutiDNExeCmd.java rename to source/src/main/java/io/mycat/mycat2/cmds/AbstractMultiDNExeCmd.java index b8acc1b..694e2a7 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/AbstractMutiDNExeCmd.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/AbstractMultiDNExeCmd.java @@ -14,7 +14,7 @@ * @author wuzhihui * */ -public class AbstractMutiDNExeCmd implements MySQLCommand{ +public class AbstractMultiDNExeCmd implements MySQLCommand{ @Override public boolean procssSQL(MycatSession session) throws IOException { diff --git a/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java b/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java index 9f7fd64..9fbd5dd 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java @@ -1,5 +1,6 @@ package io.mycat.mycat2.cmds.interceptor; +import java.security.InvalidParameterException; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -8,6 +9,7 @@ import io.mycat.mycat2.MySQLCommand; import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.cmds.multinode.DbInMultiServerCmd; import io.mycat.mycat2.sqlannotations.AnnotationProcessor; import io.mycat.mycat2.sqlannotations.SQLAnnotation; import io.mycat.mycat2.sqlparser.BufferSQLContext; @@ -41,6 +43,36 @@ public SQLAnnotationChain setTarget(MySQLCommand target){ this.target = target; return this; } + + /** + * 处理路由. + * + * @param session + * @return + * @since 2.0 + */ + public SQLAnnotationChain processRoute(MycatSession session) { + + switch (session.schema.schemaType) { + case DB_IN_ONE_SERVER: + break; + case DB_IN_MULTI_SERVER: + if (session.curRouteResultset != null + && session.curRouteResultset.getNodes().length > 1) { + // DB_IN_MULTI_SERVER 模式下 + this.target = DbInMultiServerCmd.INSTANCE; + } + break; + case ANNOTATION_ROUTE: + break; +// case SQL_PARSE_ROUTE: +// AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); + default: + throw new InvalidParameterException("schema type is invalid "); + } + return this; + + } /** * 2. 处理动态注解 diff --git a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java new file mode 100644 index 0000000..2a9c1ec --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java @@ -0,0 +1,191 @@ +package io.mycat.mycat2.cmds.multinode; + +import java.io.IOException; +import java.nio.channels.SelectionKey; +import java.util.concurrent.locks.ReentrantLock; + +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.cmds.AbstractMultiDNExeCmd; +import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.route.RouteResultsetNode; +import io.mycat.mysql.packet.ErrorPacket; +import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.proxy.ProxyBuffer; + +/** + * + * DbInMultiServerCmd + *

+ * DbInMultiServer模式下的多节点执行Command类 + *

+ * Creation Time: 2018-01-20 + * + * @author zhangsiwei + * @since 2.0 + */ +public class DbInMultiServerCmd extends AbstractMultiDNExeCmd { + + public static final DbInMultiServerCmd INSTANCE = new DbInMultiServerCmd(); + + private int backendWritedCount = 0, executeCount = 0; + + private boolean isFirst = true; + + private final ReentrantLock lock = new ReentrantLock(); + + @Override + public boolean procssSQL(MycatSession session) throws IOException { + RouteResultsetNode[] nodes = session.curRouteResultset.getNodes(); + for (int i = 0; i < nodes.length; i++) { + RouteResultsetNode node = nodes[i]; + session.curRouteResultsetNode = node; + /* + * 获取后端连接可能涉及到异步处理,这里需要先取消前端读写事件 + */ + session.clearReadWriteOpts(); + + session.getBackend((mysqlsession, sender, success, result) -> { + + ProxyBuffer curBuffer = session.proxyBuffer; + // 切换 buffer 读写状态 + curBuffer.flip(); + + if (success) { + // 没有读取,直接透传时,需要指定 透传的数据 截止位置 + curBuffer.readIndex = curBuffer.writeIndex; + // 改变 owner,对端Session获取,并且感兴趣写事件 + session.giveupOwner(SelectionKey.OP_WRITE); + if (isFirst) { + isFirst = false; + } else { + // 因为第一次把报文透传到mysql后端后,readmark指针会来到readIndex的位置, + // 所以第一次之后再要透传同样的指令,就要先把readmark重置回原来的位置。 + curBuffer.readMark = + curBuffer.readIndex - session.curMSQLPackgInf.pkgLength; + } + try { + mysqlsession.writeToChannel(); + } catch (IOException e) { + session.closeBackendAndResponseError(mysqlsession, success, + ((ErrorPacket) result)); + } + } else { + session.closeBackendAndResponseError(mysqlsession, success, + ((ErrorPacket) result)); + } + }); + } + return false; + } + + @Override + public boolean onBackendResponse(MySQLSession session) throws IOException { + lock.lock(); + try { + ++executeCount; + // 首先进行一次报文的读取操作 + if (!session.readFromChannel()) { + return false; + } + // 进行报文处理的流程化 + boolean nextReadFlag = false; + do { + // 进行报文的处理流程 + nextReadFlag = session.getMycatSession().commandHandler.procss(session); + } while (nextReadFlag); + + // 获取当前是否结束标识 + Boolean check = (Boolean) session.getSessionAttrMap() + .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); + + MycatSession mycatSession = session.getMycatSession(); + ProxyBuffer buffer = session.getProxyBuffer(); + + if (executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { + // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, + // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 + if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { + // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) + // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 + session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; + } + return false; + } + + // 检查到当前已经完成,执行添加操作 + if (null != check && check) { + // 当知道操作完成后,前段的注册感兴趣事件为读取 + mycatSession.takeOwner(SelectionKey.OP_READ); + } + // 未完成执行继续读取操作 + else { + // 直接透传报文 + mycatSession.takeOwner(SelectionKey.OP_WRITE); + } + buffer.flip(); + executeCount = 0; + mycatSession.writeToChannel(); + } finally { + lock.unlock(); + } + return false; + } + + @Override + public boolean onBackendClosed(MySQLSession session, boolean normal) throws IOException { + // TODO Auto-generated method stub + return super.onBackendClosed(session, normal); + } + + @Override + public boolean onFrontWriteFinished(MycatSession session) throws IOException { + // 判断是否结果集传输完成,决定命令是否结束,切换到前端读取数据 + // 检查当前已经结束,进行切换 + // 检查如果存在传输的标识,说明后传数据向前传传输未完成,注册后端的读取事件 + if (session.getSessionAttrMap() + .containsKey(SessionKeyEnum.SESSION_KEY_TRANSFER_OVER_FLAG.getKey())) { + session.proxyBuffer.flip(); + session.giveupOwner(SelectionKey.OP_READ); + return false; + } + // 当传输标识不存在,则说已经结束,则切换到前端的读取 + else { + session.proxyBuffer.flip(); + // session.chnageBothReadOpts(); + session.takeOwner(SelectionKey.OP_READ); + return true; + } + } + + @Override + public boolean onBackendWriteFinished(MySQLSession session) throws IOException { + + ++backendWritedCount; + session.proxyBuffer.flip(); + session.change2ReadOpts(); + if (backendWritedCount >= session.getMycatSession().curRouteResultset.getNodes().length) { + isFirst = true; + backendWritedCount = 0; + // 绝大部分情况下,前端把数据写完后端发送出去后,就等待后端返回数据了, + // 向后端写入完成数据后,则从后端读取数据 + // session.proxyBuffer.flip(); + // 由于单工模式,在向后端写入完成后,需要从后端进行数据读取 +// session.change2ReadOpts(); + } + return false; + } + + @Override + public void clearFrontResouces(MycatSession session, boolean sessionCLosed) { + // TODO Auto-generated method stub + super.clearFrontResouces(session, sessionCLosed); + } + + @Override + public void clearBackendResouces(MySQLSession session, boolean sessionCLosed) { + // TODO Auto-generated method stub + super.clearBackendResouces(session, sessionCLosed); + } + +} diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java index 7ccaa35..e13d39d 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java @@ -73,9 +73,9 @@ private void initStaticAnnotation(){ * * @param session * @return - * @since 1.0 + * @since 2.0 */ - protected boolean handleRoute(MycatSession session) { + protected boolean delegateRoute(MycatSession session) { return true; }; @@ -117,6 +117,10 @@ public boolean matchMySqlCommand(MycatSession session) { command = DirectPassthrouhCmd.INSTANCE; } + if (!delegateRoute(session)) { + return false; + } + /** * 设置原始处理命令 * 1. 设置目标命令 @@ -125,13 +129,9 @@ public boolean matchMySqlCommand(MycatSession session) { * 4. 构建命令或者注解链。 如果没有注解链,直接返回目标命令 */ SQLAnnotationChain chain = new SQLAnnotationChain(); - session.curSQLCommand = chain.setTarget(command) - .processDynamicAnno(session) - .processStaticAnno(session, staticAnnontationMap) - .build(); - if (!handleRoute(session)) { - return false; - } + session.curSQLCommand = + chain.setTarget(command).processRoute(session).processDynamicAnno(session) + .processStaticAnno(session, staticAnnontationMap).build(); return true; } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java index f7fee23..fb154b3 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java @@ -27,10 +27,10 @@ import io.mycat.util.ErrorCode; public class DBINMultiServerCmdStrategy extends AbstractCmdStrategy { - + private static final Logger logger = LoggerFactory.getLogger(DBINMultiServerCmdStrategy.class); - public static final DBINMultiServerCmdStrategy INSTANCE = new DBINMultiServerCmdStrategy(); + public static final DBINMultiServerCmdStrategy INSTANCE = new DBINMultiServerCmdStrategy(); private RouteStrategy routeStrategy = new DBInMultiServerRouteStrategy(); @@ -73,6 +73,7 @@ protected void initMyCmdHandler() { protected void initMySqlCmdHandler() { MYSQLCOMMANDMAP.put(BufferSQLContext.INSERT_SQL, DirectPassthrouhCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.UPDATE_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.DROP_SQL, DirectPassthrouhCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.COMMIT_SQL, SqlComCommitCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.ROLLBACK_SQL, SqlComRollBackCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.SELECT_SQL, DirectPassthrouhCmd.INSTANCE); @@ -91,7 +92,9 @@ protected boolean delegateRoute(MycatSession session) { : session.sqlContext.getCurSQLType(); RouteResultset rrs = routeStrategy.route(session.schema, sqltype, session.sqlContext.getRealSQL(0), null, session); - if (rrs.getNodes() != null && rrs.getNodes().length > 1) { + + if (rrs.getNodes() != null && rrs.getNodes().length > 1 && !rrs.isGlobalTable()) { + session.curRouteResultset = null; try { logger.error( diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBInOneServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBInOneServerCmdStrategy.java index e02d36f..e8871b7 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBInOneServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBInOneServerCmdStrategy.java @@ -59,6 +59,7 @@ protected void initMyCmdHandler() { protected void initMySqlCmdHandler() { MYSQLCOMMANDMAP.put(BufferSQLContext.INSERT_SQL, DirectPassthrouhCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.UPDATE_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.DROP_SQL, DirectPassthrouhCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.COMMIT_SQL, SqlComCommitCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.ROLLBACK_SQL, SqlComRollBackCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.SELECT_SQL, DirectPassthrouhCmd.INSTANCE); diff --git a/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java b/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java index c526b5c..8d37f0f 100644 --- a/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java +++ b/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java @@ -102,6 +102,7 @@ private void onBackendRead(MySQLSession session) throws IOException { MySQLCommand curCmd = session.getMycatSession().curSQLCommand; try { if (curCmd.onBackendResponse(session)) { + System.out.println("---------------" + session.isClosed()); curCmd.clearBackendResouces(session, session.isClosed()); } } catch (ClosedChannelException ex) { diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java index 71ed7dd..47d1ed7 100644 --- a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java +++ b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java @@ -25,12 +25,10 @@ import java.io.Serializable; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; -import io.mycat.mycat2.sqlengine.mpp.HavingCols; import io.mycat.util.FormatUtil; /** @@ -38,7 +36,7 @@ */ public final class RouteResultset implements Serializable { private String statement; // 原始语句 - private final int sqlType; + private final byte sqlType; private RouteResultsetNode[] nodes; // 路由结果节点 private Set subTables; @@ -50,7 +48,6 @@ public final class RouteResultset implements Serializable { private String primaryKey; // limit output total private int limitSize; - private SQLMerge sqlMerge; private boolean callStatement = false; // 处理call关键字 @@ -146,7 +143,7 @@ public void setGlobalTable(boolean globalTableFlag) { this.globalTableFlag = globalTableFlag; } - public RouteResultset(String stmt, int sqlType) { + public RouteResultset(String stmt, byte sqlType) { this.statement = stmt; this.limitSize = -1; this.sqlType = sqlType; @@ -177,10 +174,6 @@ public void copyLimitToNodes() { } - public SQLMerge getSqlMerge() { - return sqlMerge; - } - public boolean isCacheAble() { return cacheAble; } @@ -189,37 +182,14 @@ public void setCacheAble(boolean cacheAble) { this.cacheAble = cacheAble; } - public boolean needMerge() { - return limitSize > 0 || sqlMerge != null; - } - - public int getSqlType() { + public byte getSqlType() { return sqlType; } - public boolean isHasAggrColumn() { - return (sqlMerge != null) && sqlMerge.isHasAggrColumn(); - } - public int getLimitStart() { return limitStart; } - public String[] getGroupByCols() { - return (sqlMerge != null) ? sqlMerge.getGroupByCols() : null; - } - - private SQLMerge createSQLMergeIfNull() { - if (sqlMerge == null) { - sqlMerge = new SQLMerge(); - } - return sqlMerge; - } - - public Map getMergeCols() { - return (sqlMerge != null) ? sqlMerge.getMergeCols() : null; - } - public void setLimitStart(int limitStart) { this.limitStart = limitStart; } @@ -249,36 +219,6 @@ public String[] getPrimaryKeyItems() { return primaryKey.split("\\."); } - public void setOrderByCols(LinkedHashMap orderByCols) { - if (orderByCols != null && !orderByCols.isEmpty()) { - createSQLMergeIfNull().setOrderByCols(orderByCols); - } - } - - public void setHasAggrColumn(boolean hasAggrColumn) { - if (hasAggrColumn) { - createSQLMergeIfNull().setHasAggrColumn(true); - } - } - - public void setGroupByCols(String[] groupByCols) { - if (groupByCols != null && groupByCols.length > 0) { - createSQLMergeIfNull().setGroupByCols(groupByCols); - } - } - - public void setMergeCols(Map mergeCols) { - if (mergeCols != null && !mergeCols.isEmpty()) { - createSQLMergeIfNull().setMergeCols(mergeCols); - } - - } - - public LinkedHashMap getOrderByCols() { - return (sqlMerge != null) ? sqlMerge.getOrderByCols() : null; - - } - public String getStatement() { return statement; } @@ -331,24 +271,6 @@ public void setCallStatement(boolean callStatement) { } } - /* - * public void changeNodeSqlAfterAddLimit(SchemaConfig schemaConfig, String sourceDbType, String - * sql, int offset, int count, boolean isNeedConvert) { if (nodes != null) { - * - * Map dataNodeDbTypeMap = schemaConfig.getDataNodeDbTypeMap(); Map sqlMapCache = new HashMap<>(); for (RouteResultsetNode node : nodes) { String dbType - * = dataNodeDbTypeMap.get(node.getName()); if (dbType.equalsIgnoreCase("mysql")) { - * node.setStatement(sql); //mysql之前已经加好limit } else if (sqlMapCache.containsKey(dbType)) { - * node.setStatement(sqlMapCache.get(dbType)); } else if(isNeedConvert) { String nativeSql = - * PageSQLUtil.convertLimitToNativePageSql(dbType, sql, offset, count); sqlMapCache.put(dbType, - * nativeSql); node.setStatement(nativeSql); } else { node.setStatement(sql); } - * - * node.setLimitStart(offset); node.setLimitSize(count); } - * - * - * } } - */ - public boolean isAutocommit() { return autocommit; } @@ -365,28 +287,10 @@ public void setCanRunInReadDB(Boolean canRunInReadDB) { this.canRunInReadDB = canRunInReadDB; } - public HavingCols getHavingCols() { - return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; - } - public void setSubTables(Set subTables) { this.subTables = subTables; } - public void setHavings(HavingCols havings) { - if (havings != null) { - createSQLMergeIfNull().setHavingCols(havings); - } - } - - // Added by winbill, 20160314, for having clause, Begin ==> - public void setHavingColsName(Object[] names) { - if (names != null && names.length > 0) { - createSQLMergeIfNull().setHavingColsName(names); - } - } - // Added by winbill, 20160314, for having clause, End <== - public Set getSubTables() { return this.subTables; } diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java index 6d6ccb4..b4ae4e8 100644 --- a/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/route/RouteStrategy.java @@ -8,6 +8,6 @@ * */ public interface RouteStrategy { - public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, String charset, MycatSession mycatSession); } diff --git a/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java b/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java deleted file mode 100644 index 0ee0489..0000000 --- a/source/src/main/java/io/mycat/mycat2/route/SQLMerge.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.mycat2.route; - -import java.io.Serializable; -import java.util.LinkedHashMap; -import java.util.Map; - -import io.mycat.mycat2.sqlengine.mpp.HavingCols; - -public class SQLMerge implements Serializable { - private LinkedHashMap orderByCols; - private HavingCols havingCols; - private Object[] havingColsName; // Added by winbill, 20160314, for having clause - private Map mergeCols; - private String[] groupByCols; - private boolean hasAggrColumn; - - public LinkedHashMap getOrderByCols() { - return orderByCols; - } - - public void setOrderByCols(LinkedHashMap orderByCols) { - this.orderByCols = orderByCols; - } - - public Map getMergeCols() { - return mergeCols; - } - - public void setMergeCols(Map mergeCols) { - this.mergeCols = mergeCols; - } - - public String[] getGroupByCols() { - return groupByCols; - } - - public void setGroupByCols(String[] groupByCols) { - this.groupByCols = groupByCols; - } - - public boolean isHasAggrColumn() { - return hasAggrColumn; - } - - public void setHasAggrColumn(boolean hasAggrColumn) { - this.hasAggrColumn = hasAggrColumn; - } - - public HavingCols getHavingCols() { - return havingCols; - } - - public void setHavingCols(HavingCols havingCols) { - this.havingCols = havingCols; - } - - public Object[] getHavingColsName() { - return havingColsName; - } - - public void setHavingColsName(Object[] havingColsName) { - this.havingColsName = havingColsName; - } -} \ No newline at end of file diff --git a/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java index 951307a..2f42c00 100644 --- a/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java @@ -18,7 +18,7 @@ public abstract class AbstractRouteStrategy implements RouteStrategy { private static final Logger LOGGER = LoggerFactory.getLogger(AbstractRouteStrategy.class); @Override - public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, String charset, MycatSession mycatSession) { // TODO 待处理checkSQLSchema by zhangsiwei diff --git a/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java index efc7df8..79aae41 100644 --- a/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/route/impl/DBInMultiServerRouteStrategy.java @@ -8,9 +8,11 @@ import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.beans.conf.SchemaBean; import io.mycat.mycat2.beans.conf.TableDefBean; +import io.mycat.mycat2.beans.conf.TableDefBean.TypeEnum; import io.mycat.mycat2.route.RouteResultset; import io.mycat.mycat2.route.RouteResultsetNode; import io.mycat.mycat2.route.RouteStrategy; +import io.mycat.mycat2.sqlparser.BufferSQLContext; import io.mycat.proxy.ProxyRuntime; /** @@ -26,23 +28,44 @@ public class DBInMultiServerRouteStrategy implements RouteStrategy { @Override - public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, String charset, + public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, String charset, MycatSession mycatSession) { Set dataNodes = new HashSet<>(); - + Set globalDataNodes = new HashSet<>(); // 全局表的datanode MycatConfig config = ProxyRuntime.INSTANCE.getConfig(); - + boolean existGlobalTable = false; for (int i = 0; i < mycatSession.sqlContext.getTableCount(); i++) { String tableName = mycatSession.sqlContext.getTableName(i); TableDefBean tableDefBean = config.getTableDefBean(tableName); if (tableDefBean != null) { - dataNodes.add(tableDefBean.getDataNode()); + if (tableDefBean.getType() == TypeEnum.global) { + if (!existGlobalTable) { + existGlobalTable = true; + } + globalDataNodes.addAll(tableDefBean.getDataNodes()); + } else { + dataNodes.addAll(tableDefBean.getDataNodes()); + } + } else { + dataNodes.add(schema.getDefaultDataNode()); + } + } + // 就全局表而言,只有查询操作不需要跨节点,其他都要 + if (sqlType != BufferSQLContext.SELECT_SQL + && sqlType != BufferSQLContext.SELECT_FOR_UPDATE_SQL + && sqlType != BufferSQLContext.SELECT_INTO_SQL) { + dataNodes.addAll(globalDataNodes); + } else { + if (!globalDataNodes.isEmpty()) { + dataNodes.add(globalDataNodes.stream().findFirst().get()); } } RouteResultset rrs = new RouteResultset(origSQL, sqlType); - + if (existGlobalTable) { + rrs.setGlobalTable(true); + } if (dataNodes.size() >= 1) { RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; int i = 0; @@ -51,9 +74,13 @@ public RouteResultset route(SchemaBean schema, int sqlType, String origSQL, Stri } rrs.setNodes(nodes); return rrs; + } else { + // 使用默认datanode + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(schema.getDefaultDataNode(), sqlType, origSQL); + rrs.setNodes(nodes); + return rrs; } - - return rrs; } } diff --git a/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java b/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java index 6a5b5f2..2e5b0b3 100644 --- a/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java +++ b/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java @@ -283,6 +283,11 @@ public void setSQLType(byte sqlType) { this.sqlType = sqlType; } + public boolean isDDL() { + return sqlType == CREATE_SQL || sqlType == ALTER_SQL || sqlType == DROP_SQL + || sqlType == TRUNCATE_SQL; + } + public void setSQLIdx(int sqlIdx) { curSQLIdx = sqlIdx; } diff --git a/source/src/main/java/io/mycat/util/SplitUtil.java b/source/src/main/java/io/mycat/util/SplitUtil.java new file mode 100644 index 0000000..22865dd --- /dev/null +++ b/source/src/main/java/io/mycat/util/SplitUtil.java @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; + +/** + * @author mycat + */ +public class SplitUtil { + private static final String[] EMPTY_STRING_ARRAY = new String[0]; + + /** + * 解析字符串
+ * 比如:c1='$',c2='-' 输入字符串:mysql_db$0-2
+ * 输出array:mysql_db[0],mysql_db[1],mysql_db[2] + */ + public static String[] split2(String src, char c1, char c2) { + if (src == null) { + return null; + } + int length = src.length(); + if (length == 0) { + return EMPTY_STRING_ARRAY; + } + List list = new LinkedList(); + String[] p = split(src, c1, true); + if (p.length > 1) { + String[] scope = split(p[1], c2, true); + int min = Integer.parseInt(scope[0]); + int max = Integer.parseInt(scope[scope.length - 1]); + for (int x = min; x <= max; x++) { + list.add(new StringBuilder(p[0]).append('[').append(x).append(']').toString()); + } + } else { + list.add(p[0]); + } + return list.toArray(new String[list.size()]); + } + + public static String[] split(String src) { + return split(src, null, -1); + } + + public static String[] split(String src, char separatorChar) { + if (src == null) { + return null; + } + int length = src.length(); + if (length == 0) { + return EMPTY_STRING_ARRAY; + } + List list = new LinkedList(); + int i = 0; + int start = 0; + boolean match = false; + while (i < length) { + if (src.charAt(i) == separatorChar) { + if (match) { + list.add(src.substring(start, i)); + match = false; + } + start = ++i; + continue; + } + match = true; + i++; + } + if (match) { + list.add(src.substring(start, i)); + } + return list.toArray(new String[list.size()]); + } + + public static String[] split(String src, char separatorChar, boolean trim) { + if (src == null) { + return null; + } + int length = src.length(); + if (length == 0) { + return EMPTY_STRING_ARRAY; + } + List list = new LinkedList(); + int i = 0; + int start = 0; + boolean match = false; + while (i < length) { + if (src.charAt(i) == separatorChar) { + if (match) { + if (trim) { + list.add(src.substring(start, i).trim()); + } else { + list.add(src.substring(start, i)); + } + match = false; + } + start = ++i; + continue; + } + match = true; + i++; + } + if (match) { + if (trim) { + list.add(src.substring(start, i).trim()); + } else { + list.add(src.substring(start, i)); + } + } + return list.toArray(new String[list.size()]); + } + + public static String[] split(String str, String separatorChars) { + return split(str, separatorChars, -1); + } + + public static String[] split(String src, String separatorChars, int max) { + if (src == null) { + return null; + } + int length = src.length(); + if (length == 0) { + return EMPTY_STRING_ARRAY; + } + List list = new LinkedList(); + int sizePlus1 = 1; + int i = 0; + int start = 0; + boolean match = false; + if (separatorChars == null) {// null表示使用空白作为分隔符 + while (i < length) { + if (Character.isWhitespace(src.charAt(i))) { + if (match) { + if (sizePlus1++ == max) { + i = length; + } + list.add(src.substring(start, i)); + match = false; + } + start = ++i; + continue; + } + match = true; + i++; + } + } else if (separatorChars.length() == 1) {// 优化分隔符长度为1的情形 + char sep = separatorChars.charAt(0); + while (i < length) { + if (src.charAt(i) == sep) { + if (match) { + if (sizePlus1++ == max) { + i = length; + } + list.add(src.substring(start, i)); + match = false; + } + start = ++i; + continue; + } + match = true; + i++; + } + } else {// 一般情形 + while (i < length) { + if (separatorChars.indexOf(src.charAt(i)) >= 0) { + if (match) { + if (sizePlus1++ == max) { + i = length; + } + list.add(src.substring(start, i)); + match = false; + } + start = ++i; + continue; + } + match = true; + i++; + } + } + if (match) { + list.add(src.substring(start, i)); + } + return list.toArray(new String[list.size()]); + } + + /** + * 解析字符串,比如:
+ * 1. c1='$',c2='-',c3='[',c4=']' 输入字符串:mysql_db$0-2
+ * 输出mysql_db[0],mysql_db[1],mysql_db[2]
+ * 2. c1='$',c2='-',c3='#',c4='0' 输入字符串:mysql_db$0-2
+ * 输出mysql_db#0,mysql_db#1,mysql_db#2
+ * 3. c1='$',c2='-',c3='0',c4='0' 输入字符串:mysql_db$0-2
+ * 输出mysql_db0,mysql_db1,mysql_db2
+ */ + public static String[] split(String src, char c1, char c2, char c3, char c4) { + if (src == null) { + return null; + } + int length = src.length(); + if (length == 0) { + return EMPTY_STRING_ARRAY; + } + List list = new LinkedList(); + if (src.indexOf(c1) == -1) { + list.add(src.trim()); + } else { + String[] s = split(src, c1, true); + String[] scope = split(s[1], c2, true); + int min = Integer.parseInt(scope[0]); + int max = Integer.parseInt(scope[scope.length - 1]); + if (c3 == '0') { + for (int x = min; x <= max; x++) { + list.add(new StringBuilder(s[0]).append(x).toString()); + } + } else if (c4 == '0') { + for (int x = min; x <= max; x++) { + list.add(new StringBuilder(s[0]).append(c3).append(x).toString()); + } + } else { + for (int x = min; x <= max; x++) { + list.add(new StringBuilder(s[0]).append(c3).append(x).append(c4).toString()); + } + } + } + return list.toArray(new String[list.size()]); + } + + public static String[] split(String src, char fi, char se, char th) { + return split(src, fi, se, th, '0', '0'); + } + + public static String[] split(String src, char fi, char se, char th, char left, char right) { + List list = new LinkedList(); + String[] pools = split(src, fi, true); + for (int i = 0; i < pools.length; i++) { + if (pools[i].indexOf(se) == -1) { + list.add(pools[i]); + continue; + } + String[] s = split(pools[i], se, th, left, right); + for (int j = 0; j < s.length; j++) { + list.add(s[j]); + } + } + return list.toArray(new String[list.size()]); + } + + public static String[] splitByByteSize(String string, int size) { + if (size < 2) + { + return new String[]{string}; + } + byte[] bytes = string.getBytes(); + if (bytes.length <= size) { + return new String[]{string}; + } + // 分成的条数不确定(整除的情况下也许会多出一条),所以先用list再转化为array + List list = new ArrayList(); + int offset = 0;// 偏移量,也就是截取的字符串的首字节的位置 + int length = 0;// 截取的字符串的长度,可能是size,可能是size-1 + int position = 0;// 可能的截取点,根据具体情况判断是不是在此截取 + while (position < bytes.length) { + position = offset + size; + if (position > bytes.length) { + // 最后一条 + String s = new String(bytes, offset, bytes.length - offset); + list.add(s); + break; + } + if (bytes[position - 1] > 0 + || (bytes[position - 1] < 0 && bytes[position - 2] < 0)){ + // 截断点是字母,或者是汉字 + length = size; + } else { + // 截断点在汉字中间 + length = size - 1; + } + String s = new String(bytes, offset, length); + list.add(s); + offset += length; + } + String[] array = new String[list.size()]; + for (int i = 0; i < array.length; i++) { + array[i] = (String) list.get(i); + } + return array; + } + +} \ No newline at end of file diff --git a/source/src/main/resources/schema.yml b/source/src/main/resources/schema.yml index 024c4ff..3398761 100644 --- a/source/src/main/resources/schema.yml +++ b/source/src/main/resources/schema.yml @@ -7,6 +7,9 @@ schemas: dataNode: dn1 - name: tb_boy dataNode: dn2 + - name: tb_paw + type: global + dataNode: dn$1-2 dataNodes: - name: dn1 database: mytest From 18e174b4315eed7078b30463a62af833d34b63fe Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Sat, 20 Jan 2018 23:17:40 +0800 Subject: [PATCH 05/22] =?UTF-8?q?=E6=95=B4=E7=90=86=E6=B3=A8=E9=87=8A?= =?UTF-8?q?=E7=9A=84=E6=A0=BC=E5=BC=8F=EF=BC=8C=E6=96=B9=E4=BE=BF=E9=98=85?= =?UTF-8?q?=E8=AF=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../main/java/io/mycat/proxy/ProxyBuffer.java | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/source/src/main/java/io/mycat/proxy/ProxyBuffer.java b/source/src/main/java/io/mycat/proxy/ProxyBuffer.java index 7b9a93e..37dc504 100644 --- a/source/src/main/java/io/mycat/proxy/ProxyBuffer.java +++ b/source/src/main/java/io/mycat/proxy/ProxyBuffer.java @@ -6,22 +6,33 @@ import org.slf4j.LoggerFactory; /** + *
  * 可重用的Buffer,连续读或者写,当空间不够时Compact擦除之前用过的空间, 处于写状态或者读状态之一,不能同时读写,
  * 只有数据被操作完成(读完或者写完)后State才能被改变(flip方法或手工切换状态),同时可能要改变Owner,chanageOwn
  *
- * 需要外部 关心的状态为 writeIndex 写入buffer 开始位置 readIndex 读取开始位置 inReading 当前buffer 读写状态
- * frontUsing owner 不需要外部关心的状态为 readMark 向channel 中写入数据时的开始位置, 该状态由
- * writeToChannel 自动维护,不需要外部显式指定 preUsing 上一个owner 仅在 write==0
- * 或只写了一部分数据的情况下,需要临时改变 owner .本次写入完成后,需要根据preUsing 自动切换回来 使用流程
- * 一、透传、只前端读写、只后端读写场景 1. 从channel 向 buffer 写入数据 始终从 writeIndex 开始写入 , inReading
- * 状态一定为 false 写入状态 2. 读取 buffer 中数据 读取的数据范围是 readIndex --- writeIndex 之间的数据. 3.
- * 向 channel 写入数据前, flip 切换读写状态 4. 数据全部透传完成(例如:整个结果集透传完成)后 changeOwner,否则 owner
- * 不变. 5. 从 buffer 向 channel 写入数据时,写入 readMark--readIndex 之间的数据. 6. 写完成后 flip
- * 切换读写状态。同时 如果 readIndex > buffer.capacity() * 2 / 3 进行一次压缩 7. 从 channel
- * 向buffer 写入数据时,如果 writeIndex > buffer.capacity() * 1 / 3 进行一次压缩
+ * 需要外部关心的状态
+ * writeIndex 写入buffer开始位置;
+ * readIndex 读取开始位置;
+ * inReading 当前buffer读写状态,inReading=true表示读状态,inReading=false表示写状态。
+ * 不需要外部关心的状态为 
+ * readMark 向channel 中写入数据时的开始位置, 该状态由writeToChannel 自动维护,不需要外部显式指定;
+ * 
+ * 使用流程
+ * 一、透传、只前端读写、只后端读写场景 
+ * 1. 从channel 向 buffer 写入数据 
+ * 始终从 writeIndex 开始写入 , inReading状态一定为 false 写入状态 
+ * 2. 读取 buffer 中数据 
+ * 读取的数据范围是 readIndex --- writeIndex 之间的数据. 
+ * 3. 向 channel 写入数据前, flip 切换读写状态 
+ * 4. 数据全部透传完成(例如:整个结果集透传完成)后 changeOwner,否则 owner不变. 
+ * 5. 从 buffer 向 channel 写入数据时,写入 readMark--readIndex 之间的数据. 
+ * 6. 写完成后 flip切换读写状态。同时 如果 readIndex > buffer.capacity() * 2 / 3 进行一次压缩 
+ * 7. 从 channel向buffer 写入数据时,如果 writeIndex > buffer.capacity() * 1 / 3 进行一次压缩
  *
- * 二、没有读取数据,向buffer中写入数据后 直接 write 到 channel的场景 1. 在写入到 channel 时 ,需要显式 指定
- * readIndex = writeIndex; 2. 其他步骤 同 (透传、只前端读写、只后端读写场景)场景
+ * 二、没有读取数据,向buffer中写入数据后 直接 write 到 channel的场景 
+ * 1. 在写入到 channel 时 ,需要显式 指定readIndex = writeIndex; 
+ * 2. 其他步骤 同 (透传、只前端读写、只后端读写场景)场景
+ * 
* * @author yanjunli * From e4875ddc9f4eed0215abf1bb991a229a92ca58b8 Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Sat, 20 Jan 2018 23:37:10 +0800 Subject: [PATCH 06/22] =?UTF-8?q?DBInMultiServer=E5=AE=9E=E7=8E=B0?= =?UTF-8?q?=E6=80=9D=E8=B7=AF.md=20=E6=B7=BB=E5=8A=A0=E5=85=A8=E5=B1=80?= =?UTF-8?q?=E8=A1=A8=E7=9A=84=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...36\347\216\260\346\200\235\350\267\257.md" | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git "a/doc/discussions/DBInMultiServer\345\256\236\347\216\260\346\200\235\350\267\257.md" "b/doc/discussions/DBInMultiServer\345\256\236\347\216\260\346\200\235\350\267\257.md" index 9990add..bf912be 100644 --- "a/doc/discussions/DBInMultiServer\345\256\236\347\216\260\346\200\235\350\267\257.md" +++ "b/doc/discussions/DBInMultiServer\345\256\236\347\216\260\346\200\235\350\267\257.md" @@ -1,12 +1,14 @@ -| version | date | participants | -|:---------:|------|:--------------:| -| 1.0 | 2017-12-29 | 鼯鼠| +| version | date | participants | content | +|:---------:|------|:--------------:|:------------:| +| 1.0 | 2017-12-29 | 鼯鼠|新建文档| +| 1.1 | 2018-01-20 | 鼯鼠|支持全局表| ### 1. DBInMultiServer目标 * 支持DBInMultiServer模式,即表在不同的MySQL Server上,但不分片 -* 暂不允许跨库DML ,DDL语句(给出错误提示)(已支持) +* 暂不允许跨节点DML ,DDL语句(给出错误提示)(已支持) * 兼容MyCAT 动态注解,静态注解。(待测试) +* 支持全局表 ### 2. 实现思路 @@ -25,13 +27,15 @@ schemas: dataNode: dn1 - name: tb_boy dataNode: dn2 + - name: tb_paw + type: global + dataNode: dn$1-2 dataNodes: - name: dn1 database: mytest replica: repli - name: dn2 database: mytest2 - replica: repli2 ``` @@ -131,6 +135,16 @@ private String getbackendName(){ } ``` +#### 2.3 全局表 + +尽管DBInMultiServer模式一般情况下不允许跨库DML ,DDL语句,但全局表是个例外,仅针对全局表的DDL,DML语句可以跨节点执行。 + +全局表具有如下特性: + +* 全局表的插入、更新操作会实时在所有节点上执行,保持各个分片的数据一致性 +* 全局表的查询操作,只从一个节点获取 +* 全局表可以跟任何一个表进行JOIN 操作 + ### 3.讨论点 #### 3.1 数据库管理语句支持到什么程度 From 6e12bd7360b5ca0043dc6a68858b523bf810035b Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Wed, 24 Jan 2018 22:54:23 +0800 Subject: [PATCH 07/22] =?UTF-8?q?=E5=8E=BB=E6=8E=89=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=E9=94=81=EF=BC=8C=E5=9B=A0=E4=B8=BA=E6=B2=A1=E6=9C=89=E5=BF=85?= =?UTF-8?q?=E8=A6=81=EF=BC=8Cmycatsession=E5=92=8C=E7=BB=91=E5=AE=9A?= =?UTF-8?q?=E7=9A=84mysqlsession=E9=83=BD=E5=9C=A8=E5=90=8C=E4=B8=80?= =?UTF-8?q?=E4=B8=AA=E7=BA=BF=E7=A8=8B=E4=B8=8B=E5=81=9Aselector?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cmds/multinode/DbInMultiServerCmd.java | 84 +++++++++---------- .../net/DefaultMycatSessionHandler.java | 1 - 2 files changed, 39 insertions(+), 46 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java index 2a9c1ec..065d54f 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java @@ -81,54 +81,48 @@ public boolean procssSQL(MycatSession session) throws IOException { @Override public boolean onBackendResponse(MySQLSession session) throws IOException { - lock.lock(); - try { - ++executeCount; - // 首先进行一次报文的读取操作 - if (!session.readFromChannel()) { - return false; - } - // 进行报文处理的流程化 - boolean nextReadFlag = false; - do { - // 进行报文的处理流程 - nextReadFlag = session.getMycatSession().commandHandler.procss(session); - } while (nextReadFlag); - - // 获取当前是否结束标识 - Boolean check = (Boolean) session.getSessionAttrMap() - .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); - - MycatSession mycatSession = session.getMycatSession(); - ProxyBuffer buffer = session.getProxyBuffer(); - - if (executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { - // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, - // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 - if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { - // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) - // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 - session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; - } - return false; + // 首先进行一次报文的读取操作 + if (!session.readFromChannel()) { + return false; + } + // 进行报文处理的流程化 + boolean nextReadFlag = false; + do { + // 进行报文的处理流程 + nextReadFlag = session.getMycatSession().commandHandler.procss(session); + } while (nextReadFlag); + + // 获取当前是否结束标识 + Boolean check = (Boolean) session.getSessionAttrMap() + .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); + + MycatSession mycatSession = session.getMycatSession(); + ProxyBuffer buffer = session.getProxyBuffer(); + + if (++executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { + // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, + // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 + if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { + // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) + // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 + session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; } + return false; + } - // 检查到当前已经完成,执行添加操作 - if (null != check && check) { - // 当知道操作完成后,前段的注册感兴趣事件为读取 - mycatSession.takeOwner(SelectionKey.OP_READ); - } - // 未完成执行继续读取操作 - else { - // 直接透传报文 - mycatSession.takeOwner(SelectionKey.OP_WRITE); - } - buffer.flip(); - executeCount = 0; - mycatSession.writeToChannel(); - } finally { - lock.unlock(); + // 检查到当前已经完成,执行添加操作 + if (null != check && check) { + // 当知道操作完成后,前段的注册感兴趣事件为读取 + mycatSession.takeOwner(SelectionKey.OP_READ); + } + // 未完成执行继续读取操作 + else { + // 直接透传报文 + mycatSession.takeOwner(SelectionKey.OP_WRITE); } + buffer.flip(); + executeCount = 0; + mycatSession.writeToChannel(); return false; } diff --git a/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java b/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java index 8d37f0f..c526b5c 100644 --- a/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java +++ b/source/src/main/java/io/mycat/mycat2/net/DefaultMycatSessionHandler.java @@ -102,7 +102,6 @@ private void onBackendRead(MySQLSession session) throws IOException { MySQLCommand curCmd = session.getMycatSession().curSQLCommand; try { if (curCmd.onBackendResponse(session)) { - System.out.println("---------------" + session.isClosed()); curCmd.clearBackendResouces(session, session.isClosed()); } } catch (ClosedChannelException ex) { From b557847db4c882ec33af23bf6264ce53f8f2652d Mon Sep 17 00:00:00 2001 From: gaulzhw Date: Mon, 5 Mar 2018 23:29:34 +0800 Subject: [PATCH 08/22] =?UTF-8?q?=E5=B0=86=E5=91=BD=E4=BB=A4=E8=A1=8C?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E4=BD=BF=E7=94=A8apache-commons-cli=20jar?= =?UTF-8?q?=E5=8C=85=E5=AE=9E=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- source/pom.xml | 5 ++ .../main/java/io/mycat/mycat2/MycatCore.java | 81 +++++++++++-------- .../java/io/mycat/mycat2/beans/ArgsBean.java | 14 ++-- 3 files changed, 61 insertions(+), 39 deletions(-) diff --git a/source/pom.xml b/source/pom.xml index 976a3a8..0705ccf 100644 --- a/source/pom.xml +++ b/source/pom.xml @@ -43,6 +43,11 @@ slf4j-log4j12 1.7.2 + + commons-cli + commons-cli + 1.4 + diff --git a/source/src/main/java/io/mycat/mycat2/MycatCore.java b/source/src/main/java/io/mycat/mycat2/MycatCore.java index 0094165..11b1523 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatCore.java +++ b/source/src/main/java/io/mycat/mycat2/MycatCore.java @@ -24,6 +24,7 @@ package io.mycat.mycat2; import java.io.IOException; +import java.util.Arrays; import io.mycat.mycat2.beans.ArgsBean; import io.mycat.mycat2.beans.conf.BalancerBean; @@ -34,12 +35,18 @@ import io.mycat.proxy.MycatReactorThread; import io.mycat.proxy.ProxyRuntime; +import org.apache.commons.cli.*; +import org.apache.commons.lang.ArrayUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @author wuzhihui */ public class MycatCore { - public static void main(String[] args) throws IOException { + private static final Logger LOGGER = LoggerFactory.getLogger(MycatCore.class); + + public static void main(String[] args) throws IOException, ParseException { ProxyRuntime runtime = ProxyRuntime.INSTANCE; runtime.setConfig(new MycatConfig()); @@ -61,44 +68,54 @@ public static void main(String[] args) throws IOException { ProxyStarter.INSTANCE.start(); } - private static void solveArgs(String[] args) { - int lenght = args.length; + private static void solveArgs(String[] args) throws ParseException { + Options options = new Options(); + options.addOption(null, ArgsBean.PROXY_PORT,true,"proxy port"); + options.addOption(null, ArgsBean.CLUSTER_ENABLE,true,"cluster enable"); + options.addOption(null, ArgsBean.CLUSTER_PORT,true,"cluster port"); + options.addOption(null, ArgsBean.CLUSTER_MY_NODE_ID,true,"cluster my node id"); + options.addOption(null, ArgsBean.BALANCER_ENABLE,true,"balancer enable"); + options.addOption(null, ArgsBean.BALANCER_PORT,true,"balancer port"); + options.addOption(null, ArgsBean.BALANCER_STRATEGY,true,"balancer strategy"); + + CommandLineParser parser = new DefaultParser(); + CommandLine cmd = parser.parse(options,args); MycatConfig conf = ProxyRuntime.INSTANCE.getConfig(); ProxyConfig proxyConfig = conf.getConfig(ConfigEnum.PROXY); ClusterConfig clusterConfig = conf.getConfig(ConfigEnum.CLUSTER); BalancerConfig balancerConfig= conf.getConfig(ConfigEnum.BALANCER); - for (int i = 0; i < lenght; i++) { - switch(args[i]) { - case ArgsBean.PROXY_PORT: - proxyConfig.getProxy().setPort(Integer.parseInt(args[++i])); - break; - case ArgsBean.CLUSTER_ENABLE: - clusterConfig.getCluster().setEnable(Boolean.parseBoolean(args[++i])); - break; - case ArgsBean.CLUSTER_PORT: - clusterConfig.getCluster().setPort(Integer.parseInt(args[++i])); - break; - case ArgsBean.CLUSTER_MY_NODE_ID: - clusterConfig.getCluster().setMyNodeId(args[++i]); - break; - case ArgsBean.BALANCER_ENABLE: - balancerConfig.getBalancer().setEnable(Boolean.parseBoolean(args[++i])); - break; - case ArgsBean.BALANCER_PORT: - balancerConfig.getBalancer().setPort(Integer.parseInt(args[++i])); - break; - case ArgsBean.BALANCER_STRATEGY: - BalancerBean.BalancerStrategyEnum strategy = BalancerBean.BalancerStrategyEnum.getEnum(args[++i]); - if (strategy == null) { - throw new IllegalArgumentException("no such balancer strategy"); - } - balancerConfig.getBalancer().setStrategy(strategy); - break; - default: - break; + if (cmd.hasOption(ArgsBean.PROXY_PORT)) { + proxyConfig.getProxy().setPort(Integer.parseInt(cmd.getOptionValue(ArgsBean.PROXY_PORT))); + } + + if (cmd.hasOption(ArgsBean.CLUSTER_ENABLE)){ + clusterConfig.getCluster().setEnable(Boolean.parseBoolean(ArgsBean.CLUSTER_ENABLE)); + } + if (cmd.hasOption(ArgsBean.CLUSTER_PORT)){ + clusterConfig.getCluster().setPort(Integer.parseInt(cmd.getOptionValue(ArgsBean.CLUSTER_PORT))); + } + if (cmd.hasOption(ArgsBean.CLUSTER_MY_NODE_ID)){ + clusterConfig.getCluster().setMyNodeId(cmd.getOptionValue(ArgsBean.CLUSTER_MY_NODE_ID)); + } + + if (cmd.hasOption(ArgsBean.BALANCER_ENABLE)){ + balancerConfig.getBalancer().setEnable(Boolean.parseBoolean(cmd.getOptionValue(ArgsBean.BALANCER_ENABLE))); + } + if (cmd.hasOption(ArgsBean.BALANCER_PORT)){ + balancerConfig.getBalancer().setPort(Integer.parseInt(cmd.getOptionValue(ArgsBean.BALANCER_PORT))); + } + if (cmd.hasOption(ArgsBean.BALANCER_STRATEGY)){ + BalancerBean.BalancerStrategyEnum strategy = BalancerBean.BalancerStrategyEnum.getEnum(cmd.getOptionValue(ArgsBean.BALANCER_STRATEGY)); + if (strategy == null) { + throw new IllegalArgumentException("no such balancer strategy"); } + balancerConfig.getBalancer().setStrategy(strategy); + } + // 防止配置错误,做提示 + if (ArrayUtils.isNotEmpty(cmd.getArgs())) { + LOGGER.warn("please check if param is correct"); } } } diff --git a/source/src/main/java/io/mycat/mycat2/beans/ArgsBean.java b/source/src/main/java/io/mycat/mycat2/beans/ArgsBean.java index 47824c4..8243b53 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/ArgsBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/ArgsBean.java @@ -7,13 +7,13 @@ * @author: gaozhiwen */ public class ArgsBean { - public static final String PROXY_PORT = "-mycat.proxy.port"; + public static final String PROXY_PORT = "mycat.proxy.port"; - public static final String CLUSTER_ENABLE = "-mycat.cluster.enable"; - public static final String CLUSTER_PORT = "-mycat.cluster.port"; - public static final String CLUSTER_MY_NODE_ID = "-mycat.cluster.myNodeId"; + public static final String CLUSTER_ENABLE = "mycat.cluster.enable"; + public static final String CLUSTER_PORT = "mycat.cluster.port"; + public static final String CLUSTER_MY_NODE_ID = "mycat.cluster.myNodeId"; - public static final String BALANCER_ENABLE = "-mycat.balancer.enable"; - public static final String BALANCER_PORT = "-mycat.balancer.port"; - public static final String BALANCER_STRATEGY = "-mycat.proxy.strategy"; + public static final String BALANCER_ENABLE = "mycat.balancer.enable"; + public static final String BALANCER_PORT = "mycat.balancer.port"; + public static final String BALANCER_STRATEGY = "mycat.proxy.strategy"; } From b818cebd92ece1a8f2b26ed009ed82f41c8a0b58 Mon Sep 17 00:00:00 2001 From: gaulzhw Date: Tue, 6 Mar 2018 20:56:18 +0800 Subject: [PATCH 09/22] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E8=8E=B7=E5=8F=96?= =?UTF-8?q?=E9=85=8D=E7=BD=AE=E5=8F=82=E6=95=B0=E7=9A=84=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- source/src/main/java/io/mycat/mycat2/MycatCore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/src/main/java/io/mycat/mycat2/MycatCore.java b/source/src/main/java/io/mycat/mycat2/MycatCore.java index 11b1523..2624c58 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatCore.java +++ b/source/src/main/java/io/mycat/mycat2/MycatCore.java @@ -91,7 +91,7 @@ private static void solveArgs(String[] args) throws ParseException { } if (cmd.hasOption(ArgsBean.CLUSTER_ENABLE)){ - clusterConfig.getCluster().setEnable(Boolean.parseBoolean(ArgsBean.CLUSTER_ENABLE)); + clusterConfig.getCluster().setEnable(Boolean.parseBoolean(cmd.getOptionValue(ArgsBean.CLUSTER_ENABLE))); } if (cmd.hasOption(ArgsBean.CLUSTER_PORT)){ clusterConfig.getCluster().setPort(Integer.parseInt(cmd.getOptionValue(ArgsBean.CLUSTER_PORT))); From d22c3103f3b9690cc422a9bfa9b068ef71346df1 Mon Sep 17 00:00:00 2001 From: gaulzhw Date: Tue, 6 Mar 2018 21:01:55 +0800 Subject: [PATCH 10/22] =?UTF-8?q?=E8=B0=83=E6=95=B4=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E5=A3=B0=E6=98=8E=E4=BD=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- source/src/main/java/io/mycat/mycat2/ProxyStarter.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/ProxyStarter.java b/source/src/main/java/io/mycat/mycat2/ProxyStarter.java index 0ed274a..fae0c26 100644 --- a/source/src/main/java/io/mycat/mycat2/ProxyStarter.java +++ b/source/src/main/java/io/mycat/mycat2/ProxyStarter.java @@ -96,15 +96,17 @@ public void startProxy(boolean isLeader) throws IOException { }); } - ClusterConfig clusterConfig = conf.getConfig(ConfigEnum.CLUSTER); - ClusterBean clusterBean = clusterConfig.getCluster(); // 主节点才启动心跳,非集群按主节点处理 if (isLeader) { runtime.startHeartBeatScheduler(); } + ClusterConfig clusterConfig = conf.getConfig(ConfigEnum.CLUSTER); + ClusterBean clusterBean = clusterConfig.getCluster(); + BalancerConfig balancerConfig = conf.getConfig(ConfigEnum.BALANCER); BalancerBean balancerBean = balancerConfig.getBalancer(); + // 集群模式下才开启负载均衡服务 if (clusterBean.isEnable() && balancerBean.isEnable()) { runtime.getAcceptor().startServerChannel(balancerBean.getIp(), balancerBean.getPort(), ServerType.LOAD_BALANCER); From 89f436c7ccdf4ebb6f8e15be00552ab52b5534f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=A5=A0?= Date: Fri, 30 Mar 2018 11:16:32 +0800 Subject: [PATCH 11/22] =?UTF-8?q?=E5=8E=BB=E9=99=A4=E5=A4=9A=E4=BD=99combi?= =?UTF-8?q?ne?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- source/src/main/java/io/mycat/proxy/MycatReactorThread.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java index 3f032cf..df453a0 100644 --- a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java +++ b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java @@ -65,7 +65,7 @@ private int getUsingBackendConCounts(MySQLMetaBean mySQLMetaBean) { MycatSession mycatSession = (MycatSession) session; return mycatSession.getBackendConCounts(mySQLMetaBean); }) - .reduce(0, (sum, count) -> sum += count, (sum1, sum2) -> sum1 + sum2); + .reduce(0, (sum, count) -> sum += count); } From 3a4b3d9ac4769862cd3368e24136717a7c8ef861 Mon Sep 17 00:00:00 2001 From: jwc Date: Tue, 3 Apr 2018 09:30:53 +0800 Subject: [PATCH 12/22] travis support --- .travis.yml | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..f555952 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,8 @@ +language: java + +jdk: + - oraclejdk8 + +install: cd source && mvn install -DskipTests=true -Dmaven.javadoc.skip=true + +script: mvn test From b7947ca052480088af7fef4742951552607b44c4 Mon Sep 17 00:00:00 2001 From: jwc Date: Tue, 3 Apr 2018 13:06:17 +0800 Subject: [PATCH 13/22] add travis status image --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e79c175..9974c6d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# tcp-proxy +# tcp-proxy[![Build Status](https://www.travis-ci.org/MyCATApache/Mycat2.svg?branch=master)](https://www.travis-ci.org/MyCATApache/Mycat2) Mycat 2.0 预览版。 基于Nio实现,有效管理线程,解决高并发问题。 From 64696e7c301c7e266a37c868ffd50832e782f769 Mon Sep 17 00:00:00 2001 From: jwc Date: Tue, 3 Apr 2018 22:58:31 +0800 Subject: [PATCH 14/22] add some documents --- ...20\347\240\201\350\247\243\346\236\220.md" | 721 ++++++++++++++++ ...5\210\260\350\276\276mysql\347\232\204.md" | 780 ++++++++++++++++++ ...\346\263\250\350\247\243(\346\227\247).md" | 603 ++++++++++++++ ...06\345\222\214\345\256\214\345\226\204.md" | 250 ++++++ 4 files changed, 2354 insertions(+) create mode 100644 "doc/discussions/Mycat1.6\345\260\201\350\243\205\347\273\223\346\236\234\351\233\206\346\272\220\347\240\201\350\247\243\346\236\220.md" create mode 100644 "doc/discussions/sql\345\246\202\344\275\225\347\273\217\350\277\207mycat2.0\345\210\260\350\276\276mysql\347\232\204.md" create mode 100644 "doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243(\346\227\247).md" create mode 100644 "doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243\347\232\204\351\234\200\346\261\202\346\220\234\351\233\206\345\222\214\345\256\214\345\226\204.md" diff --git "a/doc/discussions/Mycat1.6\345\260\201\350\243\205\347\273\223\346\236\234\351\233\206\346\272\220\347\240\201\350\247\243\346\236\220.md" "b/doc/discussions/Mycat1.6\345\260\201\350\243\205\347\273\223\346\236\234\351\233\206\346\272\220\347\240\201\350\247\243\346\236\220.md" new file mode 100644 index 0000000..cb3d55c --- /dev/null +++ "b/doc/discussions/Mycat1.6\345\260\201\350\243\205\347\273\223\346\236\234\351\233\206\346\272\220\347\240\201\350\247\243\346\236\220.md" @@ -0,0 +1,721 @@ +### Mycat1.6封装结果集源码解析 + +王亚飞 + +#### 1.Mycat执行普通sql语句 + +1. 设置sql的隔离级别 +2. sql是否是存储过程并且数据库引擎是oracle 如果是执行 存储过程 +3. 判断sql执行类型,并根据sql、数据库引擎、特殊sql语句选择相应的方法执行sql + + + private void executeSQL(RouteResultsetNode rrn, ServerConnection sc, + boolean autocommit) throws IOException { + //获取sql语句 + String orgin = rrn.getStatement(); + // String sql = rrn.getStatement().toLowerCase(); + // LOGGER.info("JDBC SQL:"+orgin+"|"+sc.toString()); + //是否能走从库 + if (!modifiedSQLExecuted && rrn.isModifySQL()) { + modifiedSQLExecuted = true; + } + try { + //设置数据库隔离级别 + syncIsolation(sc.getTxIsolation()) ; + if (!this.schema.equals(this.oldSchema)) { + con.setCatalog(schema); + this.oldSchema = schema; + } + if (!this.isSpark) { + con.setAutoCommit(autocommit); + } + //获取数据库sql类型SELECT/SHOW/INSERT/UPDATE/DELETE语句 + int sqlType = rrn.getSqlType(); + //判断sql是否是存储语句并且引擎是oracle + if(rrn.isCallStatement()&&"oracle".equalsIgnoreCase(getDbType())) + { + //存储过程暂时只支持oracle + ouputCallStatement(rrn,sc,orgin); + } + //如果是SELETE或者SHOW语句 + else if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) { + //如果是SHOW语句并且数据库引擎是mysql + if ((sqlType == ServerParse.SHOW) && (!dbType.equals("MYSQL"))) { + // showCMD(sc, orgin); + //ShowVariables.execute(sc, orgin); + ShowVariables.execute(sc, orgin,this); + } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) {//如果是特殊语句 + //ShowVariables.justReturnValue(sc,String.valueOf(sc.getId())); + ShowVariables.justReturnValue(sc,String.valueOf(sc.getId()),this); + } else { + ouputResultSet(sc, orgin); + } + } else {//如果是INSERT/UPDATE/DELETE语句 + executeddl(sc, orgin); + } + + } catch (SQLException e) {//如果报错 + + String msg = e.getMessage(); + ErrorPacket error = new ErrorPacket(); + error.packetId = ++packetId; + error.errno = e.getErrorCode(); + error.message = msg.getBytes(); + this.respHandler.errorResponse(error.writeToBytes(sc), this); + }catch (Exception e) {//如果报错 + String msg = e.getMessage(); + ErrorPacket error = new ErrorPacket(); + error.packetId = ++packetId; + error.errno = ErrorCode.ER_UNKNOWN_ERROR; + error.message = ((msg == null) ? e.toString().getBytes() : msg.getBytes()); + String err = null; + if(error.message!=null){ + err = new String(error.message); + } + LOGGER.error("sql execute error, "+ err , e); + this.respHandler.errorResponse(error.writeToBytes(sc), this); + } + finally { + this.running = false; + } + } + +#### Mycat解析sql结果 + +##### 执行sql并将执行返回的头、元数据、尾、数据等添加到对应的解析结果的byteBuf中 + + private void ouputResultSet(ServerConnection sc, String sql) + throws SQLException { + ResultSet rs = null; + Statement stmt = null; + + try { + //获取连接 + stmt = con.createStatement(); + //执行sql + rs = stmt.executeQuery(sql); + //初始化list 用来存储sql执行结果元数据 + List fieldPks = new LinkedList(); + //添加sql执行结果元数据 + ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs, + this.isSpark); + //获取list大小即查询出数据的条数 + int colunmCount = fieldPks.size(); + //创建一个bytebuffer + ByteBuffer byteBuf = sc.allocate(); + //头信息 + ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); + headerPkg.fieldCount = fieldPks.size(); + headerPkg.packetId = ++packetId; + //将headerPkg数据写到bytebuf中 + byteBuf = headerPkg.write(byteBuf, sc, true); + //刷新(切换读写模式) + byteBuf.flip(); + //创建一个byte数组 + byte[] header = new byte[byteBuf.limit()]; + //将bytebuf中的数据刷新到header数组中 + byteBuf.get(header); + //清空bytebuf + byteBuf.clear(); + //初始化list + List fields = new ArrayList(fieldPks.size()); + //迭代fieldPks + Iterator itor = fieldPks.iterator(); + //将结果信息放到fields集合中 + while (itor.hasNext()) { + //获取FieldPacket数据 + FieldPacket curField = itor.next(); + //序号 + curField.packetId = ++packetId; + //将FieldPacket信息写入bytebuf中 + byteBuf = curField.write(byteBuf, sc, false); + //切换读写模式 + byteBuf.flip(); + //创建byte数组 + byte[] field = new byte[byteBuf.limit()]; + //将bytebuf中的数据刷新到byte数组中 + byteBuf.get(field); + //清空byteBuf + byteBuf.clear(); + //将field数据添加到集合中 + fields.add(field); + } + //添加数据尾数据 + EOFPacket eofPckg = new EOFPacket(); + //设置packetId + eofPckg.packetId = ++packetId; + //将数据刷新到byteBuf中 + byteBuf = eofPckg.write(byteBuf, sc, false); + //切换读写模式 + byteBuf.flip(); + //创建数组 + byte[] eof = new byte[byteBuf.limit()]; + //将数据写到数组中 + byteBuf.get(eof); + //清空byteBuf + byteBuf.clear(); + //根据数据集的类型(respHandler的子类) 将header、fields、eof数据写到byteBuf中去 + this.respHandler.fieldEofResponse(header, fields, eof, this); + + // output row + //解析数据(将结果添加到待解析buf中) + while (rs.next()) { + //创建RowDataPacket(行数据信息)对象 + RowDataPacket curRow = new RowDataPacket(colunmCount); + for (int i = 0; i < colunmCount; i++) { + int j = i + 1; + if(MysqlDefs.isBianry((byte) fieldPks.get(i).type)) { + curRow.add(rs.getBytes(j)); + } else if(fieldPks.get(i).type == MysqlDefs.FIELD_TYPE_DECIMAL || + fieldPks.get(i).type == (MysqlDefs.FIELD_TYPE_NEW_DECIMAL - 256)) { // field type is unsigned byte + // ensure that do not use scientific notation format + BigDecimal val = rs.getBigDecimal(j); + curRow.add(StringUtil.encode(val != null ? val.toPlainString() : null, + sc.getCharset())); + } else { + curRow.add(StringUtil.encode(rs.getString(j), + sc.getCharset())); + } + + } + curRow.packetId = ++packetId; + byteBuf = curRow.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] row = new byte[byteBuf.limit()]; + byteBuf.get(row); + byteBuf.clear(); + this.respHandler.rowResponse(row, this); + } + + fieldPks.clear(); + + // end row + //添加尾数据 + eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + sc.recycle(byteBuf); + this.respHandler.rowEofResponse(eof, this); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + + + + + + +##### 单节点拼接结果集(SingleNodeHandler) +##### 单节点拼接结果集(SingleNodeHandler) + +###### SELECT + +1. fieldEofResponse():元数据返回时触发,将header和元数据内容依次写入缓冲区中; + + + public void fieldEofResponse(byte[] header, List fields, + byte[] eof, BackendConnection conn) { + this.header = header; + this.fields = fields; + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null !=middlerResultHandler ){ + return; + } + this.netOutBytes += header.length; + for (int i = 0, len = fields.size(); i < len; ++i) { + byte[] field = fields.get(i); + this.netOutBytes += field.length; + } + + header[3] = ++packetId; + ServerConnection source = session.getSource(); + buffer = source.writeToBuffer(header, allocBuffer()); + for (int i = 0, len = fields.size(); i < len; ++i) { + byte[] field = fields.get(i); + field[3] = ++packetId; + + // 保存field信息 + FieldPacket fieldPk = new FieldPacket(); + fieldPk.read(field); + fieldPackets.add(fieldPk); + + buffer = source.writeToBuffer(field, buffer); + } + + fieldCount = fieldPackets.size(); + + eof[3] = ++packetId; + buffer = source.writeToBuffer(eof, buffer); + + if (isDefaultNodeShowTable) { + + for (String name : shardingTablesSet) { + RowDataPacket row = new RowDataPacket(1); + row.add(StringUtil.encode(name.toLowerCase(), source.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + + } else if (isDefaultNodeShowFullTable) { + + for (String name : shardingTablesSet) { + RowDataPacket row = new RowDataPacket(1); + row.add(StringUtil.encode(name.toLowerCase(), source.getCharset())); + row.add(StringUtil.encode("BASE TABLE", source.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + } + } +2. rowResponse():行数据返回时触发,将行数据写入缓冲区中; + + + public void rowResponse(byte[] row, BackendConnection conn) { + + this.netOutBytes += row.length; + this.selectRows++; + + if (isDefaultNodeShowTable || isDefaultNodeShowFullTable) { + RowDataPacket rowDataPacket = new RowDataPacket(1); + rowDataPacket.read(row); + String table = StringUtil.decode(rowDataPacket.fieldValues.get(0), session.getSource().getCharset()); + if (shardingTablesSet.contains(table.toUpperCase())) { + return; + } + } + row[3] = ++packetId; + + if ( prepared ) { + RowDataPacket rowDataPk = new RowDataPacket(fieldCount); + rowDataPk.read(row); + BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); + binRowDataPk.read(fieldPackets, rowDataPk); + binRowDataPk.packetId = rowDataPk.packetId; + //binRowDataPk.write(session.getSource()); + /* + * [fix bug] : 这里不能直接将包写到前端连接, + * 因为在fieldEofResponse()方法结束后buffer还没写出, + * 所以这里应该将包数据顺序写入buffer(如果buffer满了就写出),然后再将buffer写出 + */ + buffer = binRowDataPk.write(buffer, session.getSource(), true); + } else { + + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null ==middlerResultHandler ){ + buffer = session.getSource().writeToBuffer(row, allocBuffer()); + }else{ + if(middlerResultHandler instanceof MiddlerQueryResultHandler){ + byte[] rv = ResultSetUtil.getColumnVal(row, fields, 0); + String rowValue = rv==null?"":new String(rv); + middlerResultHandler.add(rowValue); + } + } + + } + + } + + +3. rowEofResponse():行结束标志返回时触发,将EOF标志写入缓冲区,最后调用source.write(buffer)将缓冲区放入前端连接的写缓冲队列中,等待NIOSocketWR将其发送给应用。 +3. rowEofResponse():行结束标志返回时触发,将EOF标志写入缓冲区,最后调用source.write(buffer)将缓冲区放入前端连接的写缓冲队列中,等待NIOSocketWR将其发送给应用。 + + + public void rowEofResponse(byte[] eof, BackendConnection conn) { + + this.netOutBytes += eof.length; + + ServerConnection source = session.getSource(); + conn.recordSql(source.getHost(), source.getSchema(), node.getStatement()); + // 判断是调用存储过程的话不能在这里释放链接 + if (!rrs.isCallStatement()||(rrs.isCallStatement()&&rrs.getProcedure().isResultSimpleValue())) + { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + endRunning(); + } + + eof[3] = ++packetId; + buffer = source.writeToBuffer(eof, allocBuffer()); + int resultSize = source.getWriteQueue().size()*MycatServer.getInstance().getConfig().getSystem().getBufferPoolPageSize(); + resultSize=resultSize+buffer.position(); + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + + if(middlerResultHandler !=null ){ + middlerResultHandler.secondEexcute(); + } else{ + source.write(buffer); + } + source.setExecuteSql(null); + //TODO: add by zhuam + //查询结果派发 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),resultSize); + QueryResultDispatcher.dispatchQuery( queryResult ); + + } + +###### UPDATE/INSERT/DELETE +调用链路:JDBCConnection.executeSQL->JDBCConnection.executeddl->SingleNodeHandler.okResponse + + + private void executeddl(ServerConnection sc, String sql) + throws SQLException { + Statement stmt = null; + try { + stmt = con.createStatement(); + int count = stmt.executeUpdate(sql); + OkPacket okPck = new OkPacket(); + okPck.affectedRows = count; + okPck.insertId = 0; + okPck.packetId = ++packetId; + okPck.message = " OK!".getBytes(); + this.respHandler.okResponse(okPck.writeToBytes(sc), this); + } finally { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + + + public void okResponse(byte[] data, BackendConnection conn) { + // + this.netOutBytes += data.length; + + boolean executeResponse = conn.syncAndExcute(); + if (executeResponse) { + ServerConnection source = session.getSource(); + OkPacket ok = new OkPacket(); + ok.read(data); + boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue()); + if (rrs.isLoadData()) { + byte lastPackId = source.getLoadDataInfileHandler().getLastPackId(); + ok.packetId = ++lastPackId;// OK_PACKET + source.getLoadDataInfileHandler().clear(); + + } else if (isCanClose2Client) { + ok.packetId = ++packetId;// OK_PACKET + } + + + if (isCanClose2Client) { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + endRunning(); + } + ok.serverStatus = source.isAutocommit() ? 2 : 1; + recycleResources(); + + if (isCanClose2Client) { + source.setLastInsertId(ok.insertId); + ok.write(source); + } + + this.affectedRows = ok.affectedRows; + + source.setExecuteSql(null); + // add by lian + // 解决sql统计中写操作永远为0 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),0); + QueryResultDispatcher.dispatchQuery( queryResult ); + } + } + + +##### 多节点拼接结果集(MultiNodeQueryHandler) +##### 多节点拼接结果集(MultiNodeQueryHandler) + +1. 获取是否启用Off Heap(默认启用)isOffHeapuseOffHeapForMerge +2. 根据是否是SELECT语句和是否需要合并和isOffHeapuseOffHeapForMerge获取dataMergeSvr + + + public MultiNodeQueryHandler(int sqlType, RouteResultset rrs, + boolean autocommit, NonBlockingSession session) { + + super(session); + this.isMiddleResultDone = new AtomicBoolean(false); + + if (rrs.getNodes() == null) { + throw new IllegalArgumentException("routeNode is null!"); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("execute mutinode query " + rrs.getStatement()); + } + + this.rrs = rrs; + //获取是否启用Off Heap + isOffHeapuseOffHeapForMerge = MycatServer.getInstance(). + getConfig().getSystem().getUseOffHeapForMerge(); + //获取dataMergeSvr + if (ServerParse.SELECT == sqlType && rrs.needMerge()) { + /** + * 使用Off Heap + */ + if(isOffHeapuseOffHeapForMerge == 1){ + dataMergeSvr = new DataNodeMergeManager(this,rrs,isMiddleResultDone); + }else { + dataMergeSvr = new DataMergeService(this,rrs); + } + } else { + dataMergeSvr = null; + } + + isCallProcedure = rrs.isCallStatement(); + this.autocommit = session.getSource().isAutocommit(); + this.session = session; + this.lock = new ReentrantLock(); + // this.icHandler = new CommitNodeHandler(session); + + this.limitStart = rrs.getLimitStart(); + this.limitSize = rrs.getLimitSize(); + this.end = limitStart + rrs.getLimitSize(); + + if (this.limitStart < 0) + this.limitStart = 0; + + if (rrs.getLimitSize() < 0) + end = Integer.MAX_VALUE; + if ((dataMergeSvr != null) + && LOGGER.isDebugEnabled()) { + LOGGER.debug("has data merge logic "); + } + + if ( rrs != null && rrs.getStatement() != null) { + netInBytes += rrs.getStatement().getBytes().length; + } + } + +3. 初始化grouper和sorter + +调用链路:JDBCConnection.ouputResultSet ->MultiNodeQueryHandler.fieldEofResponse +->dataMergeSvr.onRowMetaData + +4. 将数据放入List + +调用链路:JDBCConnection.ouputResultSet -> +dataMergeSvr.onNewRecord + + + public boolean onNewRecord(String dataNode, byte[] rowData) { + final PackWraper data = new PackWraper(); + data.dataNode = dataNode; + data.rowData = rowData; + addPack(data); + return false; + } +5. 获取数据生成RowDataPacket实例 + + + public void run() { + if(!running.compareAndSet(false, true)){ + return; + } + boolean nulpack = false; + try{ + for (; ; ) { + final PackWraper pack = packs.poll(); + if(pack == null){ + nulpack = true; + break; + } + if (pack == END_FLAG_PACK) { + final int warningCount = 0; + final EOFPacket eofp = new EOFPacket(); + final ByteBuffer eof = ByteBuffer.allocate(9); + BufferUtil.writeUB3(eof, eofp.calcPacketSize()); + eof.put(eofp.packetId); + eof.put(eofp.fieldCount); + BufferUtil.writeUB2(eof, warningCount); + BufferUtil.writeUB2(eof, eofp.status); + final ServerConnection source = multiQueryHandler.getSession().getSource(); + final byte[] array = eof.array(); + break; + } + final RowDataPacket row = new RowDataPacket(fieldCount); + row.read(pack.rowData); + if (grouper != null) { + grouper.addRow(row); + } else if (sorter != null) { + if (!sorter.addRow(row)) { + canDiscard.put(pack.dataNode,true); + } + } else { + result.get(pack.dataNode).add(row); + } + }// rof + }catch(final Exception e){ + multiQueryHandler.handleDataProcessException(e); + }finally{ + running.set(false); + } + if(nulpack && !packs.isEmpty()){ + this.run(); + } + } + +6. 调用multiQueryHandler.outputMergeResult将结果发送到前端 +6. 调用multiQueryHandler.outputMergeResult将结果发送到前端 + + + public void outputMergeResult(final ServerConnection source, + final byte[] eof, List results) { + try { + lock.lock(); + ByteBuffer buffer = session.getSource().allocate(); + final RouteResultset rrs = this.dataMergeSvr.getRrs(); + + // 处理limit语句 + int start = rrs.getLimitStart(); + int end = start + rrs.getLimitSize(); + + if (start < 0) { + start = 0; + } + if (rrs.getLimitSize() < 0) { + end = results.size(); + } + if (end > results.size()) { + end = results.size(); + } + if(prepared) { + for (int i = start; i < end; i++) { + RowDataPacket row = results.get(i); + BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); + binRowDataPk.read(fieldPackets, row); + binRowDataPk.packetId = ++packetId; + buffer = binRowDataPk.write(buffer, session.getSource(), true); + } + } else { + for (int i = start; i < end; i++) { + RowDataPacket row = results.get(i); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + } + eof[3] = ++packetId; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("last packet id:" + packetId); + } + source.write(source.writeToBuffer(eof, buffer)); + + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + dataMergeSvr.clear(); + } + } + +###### UPDATE/INSERT/DELETE +调用链路:JDBCConnection.executeSQL->JDBCConnection.executeddl->MultiNodeQueryHandler.okResponse + +这三类语句都会返回一个OK包,里面包含了最为核心的affectedRows,因此每得到一个MySQL节点发送回的affectedRows,就将其累加,当收到最后一个节点的数据后(通过decrementOkCountBy()方法判断),将结果返回给前端 + + + + public void okResponse(byte[] data, BackendConnection conn) { + this.netOutBytes += data.length; + + boolean executeResponse = conn.syncAndExcute(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("received ok response ,executeResponse:" + + executeResponse + " from " + conn); + } + if (executeResponse) { + + ServerConnection source = session.getSource(); + OkPacket ok = new OkPacket(); + ok.read(data); + //存储过程 + boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue());; + if(!isCallProcedure) + { + if (clearIfSessionClosed(session)) + { + return; + } else if (canClose(conn, false)) + { + return; + } + } + lock.lock(); + try { + // 判断是否是全局表,如果是,执行行数不做累加,以最后一次执行的为准。 + if (!rrs.isGlobalTable()) { + affectedRows += ok.affectedRows; + } else { + affectedRows = ok.affectedRows; + } + if (ok.insertId > 0) { + insertId = (insertId == 0) ? ok.insertId : Math.min( + insertId, ok.insertId); + } + } finally { + lock.unlock(); + } + // 对于存储过程,其比较特殊,查询结果返回EndRow报文以后,还会再返回一个OK报文,才算结束 + boolean isEndPacket = isCallProcedure ? decrementOkCountBy(1): decrementCountBy(1); + if (isEndPacket && isCanClose2Client) { + + if (this.autocommit && !session.getSource().isLocked()) {// clear all connections + session.releaseConnections(false); + } + + if (this.isFail() || session.closed()) { + tryErrorFinished(true); + return; + } + + lock.lock(); + try { + if (rrs.isLoadData()) { + byte lastPackId = source.getLoadDataInfileHandler() + .getLastPackId(); + ok.packetId = ++lastPackId;// OK_PACKET + ok.message = ("Records: " + affectedRows + " Deleted: 0 Skipped: 0 Warnings: 0") + .getBytes();// 此处信息只是为了控制台给人看的 + source.getLoadDataInfileHandler().clear(); + } else { + ok.packetId = ++packetId;// OK_PACKET + } + + ok.affectedRows = affectedRows; + ok.serverStatus = source.isAutocommit() ? 2 : 1; + if (insertId > 0) { + ok.insertId = insertId; + source.setLastInsertId(insertId); + } + + ok.write(source); + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + } + } + + + + + diff --git "a/doc/discussions/sql\345\246\202\344\275\225\347\273\217\350\277\207mycat2.0\345\210\260\350\276\276mysql\347\232\204.md" "b/doc/discussions/sql\345\246\202\344\275\225\347\273\217\350\277\207mycat2.0\345\210\260\350\276\276mysql\347\232\204.md" new file mode 100644 index 0000000..2560e46 --- /dev/null +++ "b/doc/discussions/sql\345\246\202\344\275\225\347\273\217\350\277\207mycat2.0\345\210\260\350\276\276mysql\347\232\204.md" @@ -0,0 +1,780 @@ +sql如何经过mycat2.0到达mysql的 +==== + +朱玉烁 + +Mycat使用的线程模型是基于Reactor的设计模式, +先说几个概念: + +1.NIOAcceptor,这个类继承于ProxyReactorThread, 在Reactor模式中扮演Acceptor与主Reactor角色,主要承担客户端的连接事件(accept) + +2.MycatReactorThread, 同样继承于ProxyReactorThread,在acceptor监听客户端连接后,交于MycatReactorThread处理 + +3.ProxyReactorThread,NIOAcceptor和MycatReactorThread的父类,是一个继承了Thread的线程类 + +4.ProxyRuntime,我理解的为一个运行时容器 + +5.MycatSession,前端连接会话(client连mycat) + +6.MySQLSession,后端连接会话(mycat连Mysql) + +7.MySQLCommand 用来向前段写数据,或者后端写数据的cmd + + +下面开始流程: +程序的入口是io.mycat.mycat2.MycatCore. 在main 方法中 首选取得ProxyRuntime的实例,该类是一个单例模式 +初始化时: + + public static void main(String[] args) throws IOException { + ProxyRuntime runtime = ProxyRuntime.INSTANCE; + //设置负责读取配置文件的类 + runtime.setConfig(new MycatConfig()); + + //加载配置文件 + ConfigLoader.INSTANCE.loadCore(); + solveArgs(args); + + int cpus = Runtime.getRuntime().availableProcessors(); + runtime.setNioReactorThreads(cpus); + runtime.setReactorThreads(new MycatReactorThread[cpus]); + + // runtime.setNioProxyHandler(new DefaultMySQLProxyHandler()); + // runtime.setNioProxyHandler(new DefaultDirectProxyHandler()); + // runtime.setSessionManager(new DefaultTCPProxySessionManager()); + // Debug观察MySQL协议用 + // runtime.setSessionManager(new MySQLStudySessionManager()); + runtime.setSessionManager(new MycatSessionManager()); + runtime.init(); + + ProxyStarter.INSTANCE.start(); + } +我们展开ProxyStarter.INSTANCE.start();集群这里不做详细说明 + + public void start() throws IOException { + ProxyRuntime runtime = ProxyRuntime.INSTANCE; + MycatConfig conf = runtime.getConfig(); + ProxyConfig proxyConfig = conf.getConfig(ConfigEnum.PROXY); + ProxyBean proxybean = proxyConfig.getProxy(); + // 启动NIO Acceptor + NIOAcceptor acceptor = new NIOAcceptor(new DirectByteBufferPool(proxybean.getBufferPoolPageSize(), + proxybean.getBufferPoolChunkSize(), + proxybean.getBufferPoolPageNumber())); + acceptor.start();(1) + runtime.setAcceptor(acceptor); (2) + + ClusterConfig clusterConfig = conf.getConfig(ConfigEnum.CLUSTER); + ClusterBean clusterBean = clusterConfig.getCluster(); + if (clusterBean.isEnable()) { + // 启动集群 + startCluster(runtime, clusterBean, acceptor); + } else { + // 未配置集群,直接启动 + startProxy(true); + } + } + +NIOAcceptor是一个线程,这里我们展开NIOAcceptor中的run方法 + + public void run() { + long ioTimes = 0; + ReactorEnv reactorEnv = new ReactorEnv(); + while (true) { + try { + selector.select(SELECTOR_TIMEOUT); + final Set keys = selector.selectedKeys(); + // logger.info("handler keys ,total " + selected); + if (keys.isEmpty()) { + if (!pendingJobs.isEmpty()) { + ioTimes = 0; + this.processNIOJob(); + } + continue; + } else if ((ioTimes > 5) & !pendingJobs.isEmpty()) { + ioTimes = 0; + this.processNIOJob(); + } + ioTimes++; + for (final SelectionKey key : keys) { + try { + int readdyOps = key.readyOps(); + reactorEnv.curSession = null; + // 如果当前收到连接请求 + if ((readdyOps & SelectionKey.OP_ACCEPT) != 0) { + processAcceptKey(reactorEnv, key); + } + // 如果当前连接事件 + else if ((readdyOps & SelectionKey.OP_CONNECT) != 0) { + this.processConnectKey(reactorEnv, key); + } else if ((readdyOps & SelectionKey.OP_READ) != 0) { + this.processReadKey(reactorEnv, key); + + } else if ((readdyOps & SelectionKey.OP_WRITE) != 0) { + this.processWriteKey(reactorEnv, key); + } + } catch (Exception e) { + logger.warn("Socket IO err :", e); + key.cancel(); + if (reactorEnv.curSession != null) { + reactorEnv.curSession.close(false, "Socket IO err:" + e); + this.allSessions.remove(reactorEnv.curSession); + reactorEnv.curSession = null; + } + } + } + keys.clear(); + } catch (IOException e) { + logger.warn("caugh error ", e); + } + + } + + } +(1)NIOAcceptor里面我们看到它通过一个死循环不断的监听事件,获取事件的超时时间为100ms. +如果没有事件要处理,并且pendingJobs不为空则进行任务处理. +如果ioTimes大于5并且pendingJobs不为空则进行任务处理. +否则就对事件进行处理.这里重点关注processAcceptKey(reactorEnv, key);我们发现processAcceptKey中调用了accept()方法。 + + private void accept(ReactorEnv reactorEnv,SocketChannel socketChannel,ServerType serverType) throws IOException { + // 找到一个可用的NIO Reactor Thread,交付托管 + ProxyReactorThread nioReactor = getProxyReactor(reactorEnv); + // 将通道注册到reactor对象上 + nioReactor.acceptNewSocketChannel(serverType, socketChannel); + } +getProxyReactor这里可以理解为从工厂工获取一个可用的Reactor,这里我们的sessionManager为MycatSessionManager + + public void acceptNewSocketChannel(Object keyAttachement, final SocketChannel socketChannel) throws IOException { + pendingJobs.offer(() -> { + try { + T session = sessionMan.createSession(keyAttachement, this.bufPool, selector, socketChannel, true); + allSessions.add(session); + } catch (Exception e) { + logger.warn("regist new connection err " + e); + } + }); + } + + private void processNIOJob() { + Runnable nioJob = null; + while ((nioJob = pendingJobs.poll()) != null) { + try { + nioJob.run(); + } catch (Exception e) { + logger.warn("run nio job err ", e); + } + } + + } +这里有一个ConcurrentLinkedQueue的队列,在acceptNewSocketChannel添加,processNIOJob执行,同时,processNIOJob在run方法中一直死循环的执行。相当于阻塞。 +展开createSession + + public MycatSession createSession(Object keyAttachment, BufferPool bufPool, Selector nioSelector, + SocketChannel frontChannel, boolean isAcceptCon) throws IOException { + logger.info("MySQL client connected ." + frontChannel); + MycatSession session = new MycatSession(bufPool, nioSelector, frontChannel);(1) + // 第一个IO处理器为Client Authorware + session.setCurNIOHandler(MySQLClientAuthHandler.INSTANCE); + // 默认为透传命令模式 + //session.curSQLCommand = DirectPassthrouhCmd.INSTANCE; + // 向MySQL Client发送认证报文 + session.sendAuthPackge(); + session.setSessionManager(this); + allSessions.add(session); + return session; + } +我们将(1)展开会发现,在这里socketChannel注册到了nioReactor,并且为OP_READ,同时attach了当前的MycatSession + + public AbstractMySQLSession(BufferPool bufferPool, Selector selector, SocketChannel channel) throws IOException { + this(bufferPool, selector, channel, SelectionKey.OP_READ); + + } + + public AbstractSession(BufferPool bufferPool, Selector selector, SocketChannel channel, int socketOpt) + throws IOException { + this.bufPool = bufferPool; + this.nioSelector = selector; + this.channel = channel; + InetSocketAddress clientAddr = (InetSocketAddress) channel.getRemoteAddress(); + this.addr = clientAddr.getHostString() + ":" + clientAddr.getPort(); + this.host = clientAddr.getHostString(); + SelectionKey socketKey = channel.register(nioSelector, socketOpt, this); + this.channelKey = socketKey; + this.proxyBuffer = new ProxyBuffer(this.bufPool.allocate()); + this.sessionId = ProxyRuntime.INSTANCE.genSessionId(); + this.startTime =System.currentTimeMillis(); + } + +(2)下面我们展开startProxy(true); +(2)下面我们展开startProxy(true); +(2)下面我们展开startProxy(true); + + public void startProxy(boolean isLeader) throws IOException { + ProxyRuntime runtime = ProxyRuntime.INSTANCE; + MycatConfig conf = runtime.getConfig(); + NIOAcceptor acceptor = runtime.getAcceptor(); + + ProxyConfig proxyConfig = conf.getConfig(ConfigEnum.PROXY); + ProxyBean proxyBean = proxyConfig.getProxy(); + if (acceptor.startServerChannel(proxyBean.getIp(), proxyBean.getPort(), ServerType.MYCAT)){(1) + startReactor();(2) + + // 加载配置文件信息 + ConfigLoader.INSTANCE.loadAll(); + + ProxyRuntime.INSTANCE.getConfig().initRepMap(); + ProxyRuntime.INSTANCE.getConfig().initSchemaMap(); + + conf.getMysqlRepMap().forEach((repName, repBean) -> { + repBean.initMaster(); + repBean.getMetaBeans().forEach(metaBean -> metaBean.prepareHeartBeat(repBean, repBean.getDataSourceInitStatus())); + }); + } + + ClusterConfig clusterConfig = conf.getConfig(ConfigEnum.CLUSTER); + ClusterBean clusterBean = clusterConfig.getCluster(); + // 主节点才启动心跳,非集群按主节点处理 + if (isLeader) { + runtime.startHeartBeatScheduler(); + } + + BalancerConfig balancerConfig = conf.getConfig(ConfigEnum.BALANCER); + BalancerBean balancerBean = balancerConfig.getBalancer(); + // 集群模式下才开启负载均衡服务 + if (clusterBean.isEnable() && balancerBean.isEnable()) { + runtime.getAcceptor().startServerChannel(balancerBean.getIp(), balancerBean.getPort(), ServerType.LOAD_BALANCER); + } + } + (1) startServerChannel方法这里根据serverType获取不同的serverChannel,startServerChannel中的 openServerChannel这个方法的作用,将获取到的 serverChannel注册到selector,selector在ProxyReactorThread中定义,并注册为OP_ACCEPT。 + + serverChannel.register(selector, SelectionKey.OP_ACCEPT, serverType); + + (2)startReactor + + private void startReactor() throws IOException { + // Mycat 2.0 Session Manager + MycatReactorThread[] nioThreads = (MycatReactorThread[]) MycatRuntime.INSTANCE.getReactorThreads(); + ProxyConfig proxyConfig = ProxyRuntime.INSTANCE.getConfig().getConfig(ConfigEnum.PROXY); + int cpus = nioThreads.length; + + for (int i = 0; i < cpus; i++) { + MycatReactorThread thread = new MycatReactorThread(ProxyRuntime.INSTANCE.getBufferPoolFactory().getBufferPool()); + thread.setName("NIO_Thread " + (i + 1)); + thread.start(); + nioThreads[i] = thread; + } + } +MycatReactorThread和NIOAcceptor一样继承与ProxyReactorThread,这里创建了和CPU个数相同的线程组。并开启,上面中给我们说到,MycatSession在创建的时候注册到nioReactor,并监听读。这里我们看一下MycatReactorThread读的操作。 + + protected void processReadKey(ReactorEnv reactorEnv, SelectionKey curKey) throws IOException { + // only from cluster server socket + T session = (T) curKey.attachment(); + reactorEnv.curSession = session; + session.getCurNIOHandler().onSocketRead(session); + } +这里的session为MycatSession,因为在注册的时候attach了。这里的CurNIOHandler为上文的MySQLClientAuthHandler。我们展开onSocketRead这个方法 + + public void onSocketRead(MycatSession session) throws IOException { + ProxyBuffer frontBuffer = session.getProxyBuffer(); + if (session.readFromChannel() == false + || CurrPacketType.Full != session.resolveMySQLPackage(frontBuffer, session.curMSQLPackgInf, false)) { + return; + } + + // 处理用户认证报文 + try { + AuthPacket auth = new AuthPacket(); + auth.read(frontBuffer); + + MycatConfig config = ProxyRuntime.INSTANCE.getConfig(); + UserConfig userConfig = config.getConfig(ConfigEnum.USER); + UserBean userBean = null; + for (UserBean user : userConfig.getUsers()) { + if (user.getName().equals(auth.user)) { + userBean = user; + break; + } + } + + // check user + if (!checkUser(session, userConfig, userBean)) { + failure(session, ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + auth.user + "' with addr '" + session.addr + "'"); + return; + } + + // check password + if (!checkPassword(session, userBean, auth.password)) { + failure(session, ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + auth.user + "', because password is error "); + return; + } + + // check schema + switch (checkSchema(userBean, auth.database)) { + case ErrorCode.ER_BAD_DB_ERROR: + failure(session, ErrorCode.ER_BAD_DB_ERROR, "Unknown database '" + auth.database + "'"); + break; + case ErrorCode.ER_DBACCESS_DENIED_ERROR: + String s = "Access denied for user '" + auth.user + "' to database '" + auth.database + "'"; + failure(session, ErrorCode.ER_DBACCESS_DENIED_ERROR, s); + break; + default: + // set schema + if (auth.database == null) { + session.schema = (userBean.getSchemas() == null) ? + config.getDefaultSchemaBean() : config.getSchemaBean(userBean.getSchemas().get(0)); + } else { + session.schema = config.getSchemaBean(auth.database); + } + + logger.debug("set schema: {} for user: {}", session.schema, auth.user); + if (success(session, auth)) { + session.clientUser=auth.user;//设置session用户 + session.proxyBuffer.reset(); + session.answerFront(AUTH_OK); + // 认证通过,设置当前SQL Handler为默认Handler + session.setCurNIOHandler(DefaultMycatSessionHandler.INSTANCE);(1) + } + } + } catch (Throwable e) { + logger.warn("Frontend FrontendAuthenticatingState error:", e); + } + } +代码很长这里我们只看最后的session.setCurNIOHandler(DefaultMycatSessionHandler.INSTANCE);假设已经认证成功的情况下, +DefaultMycatSessionHandler中的onSocketRead,这里session为MycatSession执行所以执行onFrontRead。 + + public void onSocketRead(final AbstractMySQLSession session) throws IOException { + if (session instanceof MycatSession) { + onFrontRead((MycatSession) session); + } else { + onBackendRead((MySQLSession) session); + } + } + + private void onFrontRead(final MycatSession session) throws IOException { + boolean readed = session.readFromChannel(); + ProxyBuffer buffer = session.getProxyBuffer(); + // 在load data的情况下,SESSION_PKG_READ_FLAG会被打开,以不让进行包的完整性检查 + if (!session.getSessionAttrMap().containsKey(SessionKeyEnum.SESSION_PKG_READ_FLAG.getKey()) + && readed == false) { + return; + } + + switch (session.resolveMySQLPackage(buffer, session.curMSQLPackgInf, false)) { + case Full: + session.changeToDirectIfNeed(); + break; + case LongHalfPacket: + // 解包获取包的数据长度 + int pkgLength = session.curMSQLPackgInf.pkgLength; + ByteBuffer bytebuffer = session.proxyBuffer.getBuffer(); + if (pkgLength > bytebuffer.capacity() && !bytebuffer.hasRemaining()) { + try { + session.ensureFreeSpaceOfReadBuffer(); + } catch (RuntimeException e1) { + if (!session.curMSQLPackgInf.crossBuffer) { + session.curMSQLPackgInf.crossBuffer = true; + session.curMSQLPackgInf.remainsBytes = pkgLength + - (session.curMSQLPackgInf.endPos - session.curMSQLPackgInf.startPos); + session.sendErrorMsg(ErrorCode.ER_UNKNOWN_ERROR, e1.getMessage()); + } + session.proxyBuffer.readIndex = session.proxyBuffer.writeIndex; + } + } + case ShortHalfPacket: + session.proxyBuffer.readMark = session.proxyBuffer.readIndex; + return; + } + + if (session.curMSQLPackgInf.endPos < buffer.writeIndex) { + logger.warn("front contains multi package "); + } + + // 进行后端的结束报文处理的绑定 + CommandHandler adapter = HandlerParse.INSTANCE.getHandlerByType(session.curMSQLPackgInf.pkgType);(1) + if (null == adapter) { + logger.error("curr pkg Type :" + session.curMSQLPackgInf.pkgType + " is not handler proess"); + throw new IOException("curr pkgtype " + session.curMSQLPackgInf.pkgType + " not handler!"); + } + + // 指定session中的handler处理为指定的handler + session.commandHandler = adapter; + + if (!session.matchMySqlCommand()) {(1) + return; + } + + // 如果当前包需要处理,则交给对应方法处理,否则直接透传 + if (session.curSQLCommand.procssSQL(session)) { + session.curSQLCommand.clearFrontResouces(session, session.isClosed()); + } + } +这里我们重点看一下CommandHandler adapter = HandlerParse.INSTANCE.getHandlerByType(session.curMSQLPackgInf.pkgType); + +根据前台发过来的数据包类型选择不同的CommandHandler + +这里有一个重要的方法       session.matchMySqlCommand(),     根据sql类型构建CmdChain.绑定MySqlCommand,我们展开来看看 + + public boolean matchMySqlCommand(){ + switch(schema.schemaType){ + case DB_IN_ONE_SERVER: + return DBInOneServerCmdStrategy.INSTANCE.matchMySqlCommand(this); + case DB_IN_MULTI_SERVER: + DBINMultiServerCmdStrategy.INSTANCE.matchMySqlCommand(this); + case ANNOTATION_ROUTE: + AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); + //case SQL_PARSE_ROUTE: + //AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); + default: + throw new InvalidParameterException("schema type is invalid "); + } + } +schemaType可在schema.yml中进行配置,默认是DB_IN_ONE_SERVER +我们这里只考虑DB_IN_ONE_SERVER,展开DBInOneServerCmdStrategy.INSTANCE.matchMySqlCommand(this); + + final public boolean matchMySqlCommand(MycatSession session) { + + MySQLCommand command = null; + if(MySQLPacket.COM_QUERY==(byte)session.curMSQLPackgInf.pkgType){ + /** + * sqlparser + */ + BufferSQLParser parser = new BufferSQLParser(); + int rowDataIndex = session.curMSQLPackgInf.startPos + MySQLPacket.packetHeaderSize +1 ; + int length = session.curMSQLPackgInf.pkgLength - MySQLPacket.packetHeaderSize - 1 ; + try { + parser.parse(session.proxyBuffer.getBuffer(), rowDataIndex, length, session.sqlContext); + } catch (Exception e) { + try { + logger.error("sql parse error",e); + session.sendErrorMsg(ErrorCode.ER_PARSE_ERROR, "sql parse error : "+e.getMessage()); + } catch (IOException e1) { + session.close(false, e1.getMessage()); + } + return false; + } + + byte sqltype = session.sqlContext.getSQLType()!=0?session.sqlContext.getSQLType():session.sqlContext.getCurSQLType(); + + if(BufferSQLContext.MYCAT_SQL==sqltype){ + session.curSQLCommand = MyCatCmdDispatcher.INSTANCE.getMycatCommand(session.sqlContext); + return true; + } + + command = MYSQLCOMMANDMAP.get(sqltype); + }else{ + command = MYCOMMANDMAP.get((byte)session.curMSQLPackgInf.pkgType); + } + if(command==null){ + command = DirectPassthrouhCmd.INSTANCE; + } + + /** + * 设置原始处理命令 + * 1. 设置目标命令 + * 2. 处理动态注解 + * 3. 处理静态注解 + * 4. 构建命令或者注解链。 如果没有注解链,直接返回目标命令 + */ + SQLAnnotationChain chain = new SQLAnnotationChain(); + session.curSQLCommand = chain.setTarget(command) + .processDynamicAnno(session) + .processStaticAnno(session, staticAnnontationMap) + .build(); + return true; + } + +这个方法首先根据mysql的报文类型生成MySQLCommand.如果是COM_QUERY,则调用BufferSQLParser进行解析.否则通过MYCOMMANDMAP生成MySQLCommand.最后,如果command等于NULL,则为DirectPassthrouhCmd.注解这块,不做详细分析。 +由于当前为DirectPassthrouhCmd,即session.curSQLCommand=DirectPassthrouhCmd, 那我们就看一下DirectPassthrouhCmd中的procssSQL + + public boolean procssSQL(MycatSession session) throws IOException { + /* + * 获取后端连接可能涉及到异步处理,这里需要先取消前端读写事件 + */ + session.clearReadWriteOpts();(1) + + session.getBackend((mysqlsession, sender, success, result) -> { + + ProxyBuffer curBuffer = session.proxyBuffer; + // 切换 buffer 读写状态 + curBuffer.flip(); + if (success) { + // 没有读取,直接透传时,需要指定 透传的数据 截止位置 + curBuffer.readIndex = curBuffer.writeIndex; + // 改变 owner,对端Session获取,并且感兴趣写事件 + session.giveupOwner(SelectionKey.OP_WRITE); + try { + mysqlsession.writeToChannel(); + } catch (IOException e) { + session.closeBackendAndResponseError(mysqlsession, success, ((ErrorPacket) result)); + } + } else { + session.closeBackendAndResponseError(mysqlsession, success, ((ErrorPacket) result)); + } + }); + return false; + } +这个方法步骤 +1.首先取消前端读写事件,因为获取后端连接可能涉及到异步处理 + +2.调用session.getBackend获取一个后端(mysql端)连接,并将命令发送给后端,我们看下getBackend + + public void getBackend(AsynTaskCallBack callback) throws IOException { + MycatReactorThread reactorThread = (MycatReactorThread) Thread.currentThread(); + + final boolean runOnSlave = canRunOnSlave(); + + MySQLRepBean repBean = getMySQLRepBean(getbackendName()); + + /** + * 本次根据读写分离策略要使用的metaBean + */ + MySQLMetaBean targetMetaBean = repBean.getBalanceMetaBean(runOnSlave); + + if(targetMetaBean==null){ + String errmsg = " the metaBean is not found,please check datasource.yml!!! [balance] and [type] propertie or see debug log or check heartbeat task!!"; + if(logger.isDebugEnabled()){ + logger.error(errmsg); + } + ErrorPacket error = new ErrorPacket(); + error.errno = ErrorCode.ER_BAD_DB_ERROR; + error.packetId = 1; + error.message = errmsg; + responseOKOrError(error); + return; + } +首先获取当前线程MycatReactorThread,根据 backendName获取MySQLRepBean(一組MySQL复制集群,如主从或者多主)(backendNam可在schema.yml中设置schemas.defaultDN.replica) +然后根据=读写分离策略找出要使用的metaBean,如果为null,返回错误。 + +2.canRunOnSlave方法判断后端连接 是否可以走从节点 + 静态注解情况下 走读写分离 + 事务场景下,走从节点 + + private boolean canRunOnSlave(){ + //静态注解情况下 走读写分离 + if(NewSQLContext.ANNOTATION_BALANCE==sqlContext.getAnnotationType()){ + final long balancevalue = sqlContext.getAnnotationValue(NewSQLContext.ANNOTATION_BALANCE); + if(TokenHash.MASTER == balancevalue){ + return false; + }else if(TokenHash.SLAVE == balancevalue){ + return true; + }else{ + logger.error("sql balance type is invalid, run on slave [{}]",sqlContext.getRealSQL(0)); + } + return true; + } + + //非事务场景下,走从节点 + if(AutoCommit.ON ==autoCommit){ + if(masterSqlList.contains(sqlContext.getSQLType())){ + return false; + }else{ + //走从节点 + return true; + } + }else{ + return false; + } + } + +展开getBalanceMetaBean方法 + + public MySQLMetaBean getBalanceMetaBean(boolean runOnSlave){ + if(ReplicaBean.RepTypeEnum.SINGLE_NODE == replicaBean.getRepType()||!runOnSlave){ + return getCurWriteMetaBean(); + } + + MySQLMetaBean datas = null; + + switch(replicaBean.getBalanceType()){ + case BALANCE_ALL: + datas = getLBReadWriteMetaBean(); + break; + case BALANCE_ALL_READ: + datas = getLBReadMetaBean(); + //如果从节点不可用,从主节点获取连接 + if(datas==null){ + logger.warn("all slaveNode is Unavailable. use master node for read . balance type is {}", replicaBean.getBalanceType()); + datas = getCurWriteMetaBean(); + } + break; + case BALANCE_NONE: + datas = getCurWriteMetaBean(); + break; + default: + logger.warn("current balancetype is not supported!! [{}], use writenode connection .", replicaBean.getBalanceType()); + datas = getCurWriteMetaBean(); + break; + } + return datas; + } + +默认配置的是BALANCE_ALL_READ,因此会调用getLBReadMetaBean,如果从节点不可用,则从主节点获取连接,这几种情况可展开单独说明,此处跳过 +我们来看一下 getLBReadMetaBean + + private MySQLMetaBean getLBReadMetaBean(){ + List result = metaBeans.stream() + .filter(f -> f.isSlaveNode() && f.canSelectAsReadNode()) + .collect(Collectors.toList()); + return result.isEmpty() ? null : result.get(ThreadLocalRandom.current().nextInt(result.size())); + } + +这个方法是去查找去读节点,放入list并返回。 +继续展开canSelectAsReadNode方法 + + public boolean canSelectAsReadNode() { + int slaveBehindMaster = heartbeat.getSlaveBehindMaster(); + int dbSynStatus = heartbeat.getDbSynStatus(); + + if (!isAlive()){ + return false; + } + + if (dbSynStatus == DBHeartbeat.DB_SYN_ERROR) { + return false; + } + boolean isSync = dbSynStatus == DBHeartbeat.DB_SYN_NORMAL; + boolean isNotDelay = (slaveThreshold >= 0) ? (slaveBehindMaster < slaveThreshold) : true; + return isSync && isNotDelay; + } +这个方法主要检查当前节点是否可用。如果不可用 +走getCurWriteMetaBean()方法。展开getCurWriteMetaBean() + + private MySQLMetaBean getCurWriteMetaBean() { + return metaBeans.get(writeIndex).isAlive() ? metaBeans.get(writeIndex) : null; + } +如果当前读不可用,得到写节点。从写节点读取数据 + +接下来我们看下session.sendAuthPackge(); + + public void sendAuthPackge() throws IOException { + // 生成认证数据 + byte[] rand1 = RandomUtil.randomBytes(8); + byte[] rand2 = RandomUtil.randomBytes(12); + + // 保存认证数据 + byte[] seed = new byte[rand1.length + rand2.length]; + System.arraycopy(rand1, 0, seed, 0, rand1.length); + System.arraycopy(rand2, 0, seed, rand1.length, rand2.length); + this.seed = seed; + + // 发送握手数据包 + HandshakePacket hs = new HandshakePacket(); + hs.packetId = 0; + hs.protocolVersion = Version.PROTOCOL_VERSION; + hs.serverVersion = Version.SERVER_VERSION; + hs.threadId = this.getSessionId(); + hs.seed = rand1; + hs.serverCapabilities = getServerCapabilities(); + // hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); + hs.serverStatus = 2; + hs.restOfScrambleBuff = rand2; + hs.write(proxyBuffer); + // 设置frontBuffer 为读取状态 + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + this.writeToChannel();(1) + } +这里我们只看最后一行,这里是proxyBuffer写入channel中,我们把它展开 + + public void writeToChannel() throws IOException { + checkBufferOwner(true); + ByteBuffer buffer = proxyBuffer.getBuffer(); + buffer.limit(proxyBuffer.readIndex); + buffer.position(proxyBuffer.readMark); + int writed = channel.write(buffer); + proxyBuffer.readMark += writed; // 记录本次磁轭如到 Channel 中的数据 + if (!buffer.hasRemaining()) { + // logger.debug("writeToChannel write {} bytes ,curChannel is {}", writed,this); + // buffer 中需要透传的数据全部写入到 channel中后,会进入到当前分支.这时 readIndex == readLimit + if (proxyBuffer.readMark != proxyBuffer.readIndex) { + logger.error("writeToChannel has finished but readIndex != readLimit, please fix it !!!"); + } + if (proxyBuffer.readIndex > buffer.capacity() * 2 / 3) { + proxyBuffer.compact(); + } else { + buffer.limit(buffer.capacity()); + } + // 切换读写状态 + // proxyBuffer.flip(); + /* + * 如果需要自动切换owner,进行切换 1. writed==0 或者 buffer 中数据没有写完时,注册可写事件 + * 时,会进行owner 切换 注册写事件,完成后,需要自动切换回来 + */ + // if (proxyBuf.needAutoChangeOwner()) { + // proxyBuf.changeOwner(!proxyBuf.frontUsing()).setPreUsing(null); + // } + } else { + /** + * 1. writed==0 或者 buffer 中数据没有写完时,注册可写事件 通常发生在网络阻塞或者 客户端 + * COM_STMT_FETCH 命令可能会 出现没有写完或者 writed == 0 的情况 + */ + logger.debug("register OP_WRITE selectkey .write {} bytes. current channel is {}", writed, channel); + // 需要切换 owner ,同时保存当前 owner 用于数据传输完成后,再切换回来 + // proxyBuf 读写状态不切换,会切换到相同的事件,不会重复注册 + // proxyBuf.setPreUsing(proxyBuf.frontUsing()).changeOwner(!proxyBuf.frontUsing()); + } + checkWriteFinished(); + } + +这里设计的很巧妙, +1.proxyBuffer因为不能同时进行读写,所以确保proxyBuffer是可读状态。 + +2.channel 始终从 readMark 开始 读取数据,到 readIndex 结束。 + 即:写入到 channel中的数据范围是 readMark---readIndex 之间的数据。 + +3. readMark 指针的移动 + 将数据写出到channel中后,readMark 对应写出了多少数据。即: writed = channel.write(buffer); + 每次写出数据后,readMark 增加写出数据的长度。即: readMark += writed ; + readMark默认值为0. 有可能存在 要写出的数据 writed 没有写出去,或者只写出去了一部分的情况。 + 下次channel 可写时(通常可写事件被触发),接着从readMark 开始写出数据到channel中。 + 当readMark==readIndex 时,代表 数据全部写完。 + +4. 读写状态转换 + 数据全部写完后,proxybuffer 状态 转换为 可写状态。即 inReading = false; + +5. proxybuffer 压缩。 + 每次从proxybuffer读取数据写入到channel后, + 判断当前proxybuffer 已读是否大于总容量的2/3(readIndex > buffer.capacity() * 2 / 3). +   如果大于 2/3 进行一次 compact。 + +最后还有一个重要的方法checkWriteFinished,进行是否写入完毕检查 + + protected void checkWriteFinished() throws IOException { + checkBufferOwner(true); + if (!this.proxyBuffer.writeFinished()) { + this.change2WriteOpts(); + } else { + writeFinished(); + // clearReadWriteOpts(); + } + } +如果写入未完成,重新注册为OP_WRITE,重新注册到selector中,因为selector一直在轮询会在一次的执行writeToChannel。 继续执行写操作。 + + public void change2WriteOpts() { + checkBufferOwner(true); + int intesOpts = this.channelKey.interestOps(); + // 事件转换时,只注册一个事件,存在可读事件没有取消注册的情况。这里把判断取消 + //if ((intesOpts & SelectionKey.OP_WRITE) != SelectionKey.OP_WRITE) { + channelKey.interestOps(SelectionKey.OP_WRITE); + //} + } +如果写入完成 + + public void writeFinished() throws IOException { + this.getCurNIOHandler().onWriteFinished(this); + + } +因为已经通过认证这里的curNIOHandler为DefaultMycatSessionHandler + + public void onWriteFinished(AbstractMySQLSession session) throws IOException { + // 交给SQLComand去处理 + if (session instanceof MycatSession) { + MycatSession mycatSs = (MycatSession) session; + if (mycatSs.curSQLCommand.onFrontWriteFinished(mycatSs)) { + mycatSs.curSQLCommand.clearFrontResouces(mycatSs, false); + } + } else { + MycatSession mycatSs = ((MySQLSession) session).getMycatSession(); + if (mycatSs.curSQLCommand.onBackendWriteFinished((MySQLSession) session)) { + mycatSs.curSQLCommand.clearBackendResouces((MySQLSession) session, false); + } + } + } diff --git "a/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243(\346\227\247).md" "b/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243(\346\227\247).md" new file mode 100644 index 0000000..8648a7e --- /dev/null +++ "b/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243(\346\227\247).md" @@ -0,0 +1,603 @@ +## 动态注解 + +cjw + + + +### 如何使用 + +1. 先写action的执行类 + + ​ + + ```java + public interface SQLAnnotation extends Function { + /* + 因为继承的是 Function,匹配回调时候执行的是apply,利用Function的andThen便于组合函数,形成责任链 + */ + boolean isDebug=true;//开发时候用的标记 + void init(Map args); + /* + Map args 是action的参数 key是键名,value是值 + */ + } + ``` + + ​ + + \source\src\main\java\io\mycat\mycat2\sqlannotations + + ```java + public class CacheResult implements SQLAnnotation{ + public CacheResult() { + if (isDebug) + System.out.println("=>CacheResult 对象本身的构造 初始化"); + /* + 因为实现是SQLAnnotation annotation =annotationClass.getConstructor().newInstance(); + */ + } + + @Override + public void init(Map args) { + System.out.println("=>CacheResult 动态注解初始化"); + if (args != null) + args.entrySet().stream().forEach((c)->System.out.format("param:%s,value:%s\n",c.getKey(),c.getValue())); + } + @Override + public BufferSQLContext apply(BufferSQLContext context) { + /* + 传入的是BufferSQLContext,用来保存匹配后或者执行action后的状态 + */ + if (isDebug) + System.out.println("=>CacheResult 动态注解被调用"); + return context; + } + } + ``` + + + + +2. 在actions.yaml写action的名字与执行类的关系 + + actions.yaml(暂定文件名) + + 这个文件保存了action的名字与动态注解执行类的关系 + + ```yaml + actions: + - monitorSQL: io.mycat.mycat2.sqlannotations.MonintorSQL + - cacheResult: io.mycat.mycat2.sqlannotations.CacheResult + - sqlCach: io.mycat.mycat2.sqlannotations.SQLCach + ``` + + ​ + +3. 写动态注解 + + annotations.yaml(暂定文件名) + + 这个文件保存了动态注解的信息 + + ```yaml + annotations: #root + - schema: #schema数组 + name: schemA #schema的名字 + matches: + - match: #macth数组 + name: aaa #macth名称 + state: open #当写上open的时候,此macth生效 + sqltype: insert #sql语句类型,INSERT, DELETE,REPLACE,SELECT, UPDATE + where: + # 匹配的条件,匹配的语法下面再描述,where不写的时候,所有sql都会匹配成功(未实现) + - and: id between 1 and 100 + - and: name = "haha" and a=? + - or: name2 = "ha" + tables: + # 匹配的table,表名是否在 x1,x2,x3之内 + - x1 + - x2 + - x3 + actions: + # 当schema,sqltype,where都匹配上的时候,执行下面的action, + # 现在的代码实现是下面的参数是动态注解初始化的时候,action实例化后传入的,执行的时候并不传入 + # 因为传入的参数是Map 参数的解析是action的责任,匹配的时候不能多次解析字符串, + # 这样导致性能下降 + - monitorSQL: + - param1: 1 + - cacheResult: + - sqlCach: + - param1: 1 + - param2: 2 + - match: + name: bbb + state: open + sqltype: insert + where: + - and: id between 2 and 50 + - or: name2 = ? + tables: + - x1 + - x2 + actions: + - monitorSQL: + - param1: 3 + - cacheResult: + - sqlCach: + - param1: 2 + - param2: 1 + ``` + + action的接口,init是实例化之后的时候被调用的, + + ```java + Map args 就是上面的action的参数 key是键名,value是值 + - monitorSQL: + - param1: 3 # param1是键名 3是值,都是字符串类型 + ``` + + ##### + +4. 运行 + + +```java +DynamicAnnotationManagerImpl manager = .... +BufferSQLContext context = new BufferSQLContext(); +BufferSQLParser sqlParser = new BufferSQLParser(); +String str = "select * where id between 1 and 100 and name = \"haha\" and a=1 and name2 = \"ha\""; +System.out.println(str); +sqlParser.parse(str.getBytes(), context); +manager.process("schemA", SQLType.INSERT, new String[]{"x1"}, context).run(); +//这里有多种处理函数来运行,不满足可以自己添上 + + + +manager.prototype(new HashMap<>()).process("schemA", SQLType.INSERT, new String[]{"x1"}, context).run(); + +/*prototype函数传入一个Map用作保存查询匹配器的缓存和达到匹配器线程安全的作用, +在整个匹配过程中,只有匹配器DynamicAnnotationMatch带有条件状态的,为什么不挪出来一个单独的状态的对象,因为开发难度大,之后考虑把该状态保存在BufferSQLContext,达到对象复用 +*/ +``` + +整体上使用流程如上. + +### 条件的语法(where里面的条件) + +``` yaml + where: + - and: id between 2 and 50 + - and: name1 = 1 + - or: name2 = ? +``` + +只支持两种逻辑运算:and,or,上述条件实际可能编译为 + +```java +if (id between 2 and 50 匹配成功){ + if (name1 = 1 匹配成功){ + return true; + } +} +if (name2 = ? 匹配成功){ + return true; +} +return false;//总会在最后添上return false;表示匹配失败 +``` + +匹配语法 + +?是通配符,表示任意一个token,尤其注意的是 例如a.b,应该这样写 ?.? + +一个token可以是数字,字符串,sql关键字,标识符 + +现在实现的匹配器支持以下匹配模式 + +``` +1."b = ? and c = ? and d = ? . ? and c = ?" +2."c = ?", +3."d = ? . ?" +4."b = 1" +5."?=?" +注释:6." d = ? . ? and c = ? and f = 1" +匹配 +"b = 1 and c = 1 and d = a.b and c = 1" +``` + +但是这个匹配器,是用前缀树+计算回溯的位置实现的,(如果有更好的算法请告诉我) + +- 形如?=?这种匹配模式,可能需要回溯几遍导致性能下降,所以尽量不要写这种匹配模式 + +- 也尽量不要写条件前后交叠的,例如模式1和模式6是前后交叠的,也需要回溯 + +- 也尽量不要写一个条件是另一个条件的存在包含关系的,比如模式2是模式1的子集,也需要回溯. + + 某段生成的代码 + + ```java + public final int _QUESTIONMARK_3_3_25_quest(int i, final int arrayCount, BufferSQLContext context, HashArray array) { + + if ((i) < arrayCount && i > -1) { + + { + int start = i; + i = context.matchPlaceholders(i); + + if ((i) < arrayCount && i > -1) { + + if (-1697447933 == array.getIntHash(i)) {//词法分析中intHash的值 + + ++i; + if ((i) < arrayCount && i > -1) { + + if (3145729 == array.getIntHash(i)) { + + ++i; + i = _EQUAL_6_5_25(i, arrayCount, context, array); + + } + } + } + } + } + } + return i; + } + ``` + + ​ + + ```java + public final int _QUESTIONMARK_3_3_3_quest(int i, final int arrayCount, BufferSQLContext context, HashArray array) { + + if ((i) < arrayCount && i > -1) {//范围检查,如果不回溯则不需要i > -1 + + { + int start = i; + i = context.matchPlaceholders(i);//? 跳过 + + tags[0] = 1; + pick0(start - 3, arrayCount, context, array);//回溯 + + } + } + return i; + } + ``` + + ​ + + + +考虑到之后的性能问题,可能会提供几种匹配器给用户选择 + +- 预先处理条件的,例如如果匹配上模式1的,必然会匹配上模式2、3、4、5,这是可以利用正则表达式计算出关系,存在map里面,只要匹配上模式1就可以说明已经匹配模式2、3、4、5了(已经有例子,关系保存在了DynamicAnnotationRuntime的成员成员map里面) + +- 再加上不支持前后交叠的,基本上匹配器就可以实现一遍过了。。。不知道用户是否接受 + + ​ + +### 动态注解的构建流程如下: + +​ DynamicAnnotationManager 接受"actions.yaml", "annotations.yaml"两个文件,之后在resources里面找到这两个文件加载 + +ActonFactory加载actions.yaml + +```java +public Function get(List>>> need) throws Exception { + Iterator>> iterator = pretreatmentArgs(need).entrySet().iterator(); + Function res = null; + do { + Map.Entry> action = iterator.next(); + Map args=action.getValue(); + String actionName = action.getKey(); + System.out.println(action.toString()); + Class> annotationClass = resMap.get(actionName); + SQLAnnotation annotation = annotationClass.getConstructor().newInstance();//new + annotation.init(args);//参数传入 + if (res == null) { + res = annotation; + } else { + res = res.andThen(annotation);//组合函数 + } + } while (iterator.hasNext()); + return res==null?EMPTY:res; +} +``` + +DynamicAnnotationManagerImpl完成动态注解的装配 + +```java + for (Matches matche : matchesList) { + Match match = matche.getMatch(); + String state = match.getState(); + if (state == null) continue; + if (!state.trim().toUpperCase().equals("OPEN")) continue;//OPEN的才加载 +``` + + + +```java +// //把and or 操作分类 分别编译 +List> conditionList = match.getWhere(); + Map>> map = + conditionList.stream().collect(Collectors.partitioningBy((p) -> { + String string = ConditionUtil.mappingKeyInAndOr(p).toUpperCase().trim(); + return "AND".equals(string); + })); +............................................................................................... + + DynamicAnnotationRuntime runtime = DynamicAnnotationUtil.compile(resMap); +// 编译出DynamicAnnotationMatch 包含在DynamicAnnotationRuntime 里 +// DynamicAnnotationMatch是实际匹配词法分析后HashArray的对象 +``` + + +```java +DynamicAnnotation annotation = new DynamicAnnotation(key, runtime.getMatch(), actonFactory.get(match.getActions()), runtime); + table.put(key, annotation); +// 保存结果 HashMap table ; + } +``` +tables经过一系列的转换变成Map> map + +其中int[]是经过词法分析之后intHash值 + +```yaml + tables: + - x1 + - x2 +``` + +而List是动态注解的列表, + +也就是说这是把相同tables的DynamicAnnotation归类在一起 + +然后把这个结果传进去RouteMap + +```java +public RouteMap(Map map) //RouteMap的构造函数 +public List get(int[] targets); +``` + +```java +public RouteMap(Map> map) +public List> get(int[] targets); +//如果target[n]是map中所有int[]这个key的子集,就对应的List>汇聚在一个list返回 +``` + +```java +public List get(int[] targets) { + int size = targets.length; + List c = new ArrayList<>(); + for (int i = 0; i < size; i++) { + List res = map.get(targets[i]);//HashMap> map;key是一个table的hash + int s = res.size(); + for (int j = 0; j < s; j++) { + T v = res.get(j); + c.add(v); + } + } + return c; +} +``` + +RouteMap对象完成对schema,sqltype,tables,条件的路由实现 + +此时,动态注解所需要的运行时构建完成 + + + +### 动态注解的运行流程如下: + +```java +DynamicAnnotationManagerImpl manager = .... +BufferSQLContext context = new BufferSQLContext(); +BufferSQLParser sqlParser = new BufferSQLParser(); +String str = "select * where id between 1 and 100 and name = \"haha\" and a=1 and name2 = \"ha\""; +System.out.println(str); +sqlParser.parse(str.getBytes(), context); +manager.prototype(new HashMap<>()).process("schemA", SQLType.INSERT, new String[]{"x1"}, context).run(); +``` + +整体上使用流程如上 + +```java +/** + * 不同线程复制一个 + * + * @param cache + * @return + */ +public DynamicAnnotationManagerImpl prototype(Map cache) { + return new DynamicAnnotationManagerImpl(cache, this.route); +} +``` + +​ 因为构建出来的match(条件里的匹配器)是带有条件的状态的,所以这个cache既有充当缓冲查询结果又保存了match的副本的作用,这样消耗了更多内存,也会有人就会问为什么不把match的状态单独做成一个对象,这是可以的,现在最简单的改造是每次处理都拷贝一个match进行匹配,经测试,拷贝之后造成了gc压力,性能下降了,另一方面,如果把条件状态单独取出来(之后考虑把这个状态移到BufferSQLContext)里这样处理,暂时还没有什么办法把条件把action高效对应上,一个action对应多个条件,各个条件有and,or运算,条件和action可能是重复,可能在多个match重复出现。 + +```java + public DynamicAnnotation[] getAnnotations(int schema, int sqltype, int[] tables) throws Exception { + DynamicAnnotation res[]; + DynamicAnnotation[] proto = route.front(schema, sqltype, tables); + res = new DynamicAnnotation[proto.length]; + for (int i = 0; i < proto.length; i++) { + DynamicAnnotation it = proto[i]; + //复制带有状态的match,其他的状态共享 + res[i] = new DynamicAnnotation(it.key, it.match.newInstance(), it.actions, it.runtime); + } + return res; + } +``` + +```java +public List get(int[] targets) {//根据tables获取可能匹配的DynamicAnnotation即yaml文件中的match + int size = targets.length; + List c = new ArrayList<>(); + for (int i = 0; i < size; i++) { + List res = map.get(targets[i]);//HashMap> map;key是一个table的hash + int s = res.size(); + for (int j = 0; j < s; j++) { + T v = res.get(j); + c.add(v); + } + } + return c; +} +``` + +```java +res= routeMap.get(tables).stream().flatMap((v) -> v.stream()) +.distinct().toArray(DynamicAnnotation[]::new); +//第一次构造的时候,进行了简单的对重复的对象去重,这里的DynamicAnnotation实际上是对应yaml上的match,每个match生成的DynamicAnnotation是唯一的 +``` + +```java + public void processNow(int schema, int sqltype, int[] tables, BufferSQLContext context) throws Exception { + Arrays.sort(tables);//排序之后减少组合的状态 + DynamicAnnotation[] annotations; + int hash = schema << 3 + sqltype << 2 + Arrays.hashCode(tables);//这个hash值得商讨改进 + annotations = cache.get(hash); + if (annotations == null) { + cache.put(hash, annotations = getAnnotations(schema, sqltype, tables)); + } + DynamicAnnotation[] res = annotations; + doAnnotations(res, context); + } +``` + +```java + public static void doAnnotations(DynamicAnnotation[] res, BufferSQLContext context) { + int size = res.length; + for (int i = 0; i < size; i++) { + DynamicAnnotation annotation = res[i]; + try { + annotation.match.pick(0, context); + if (annotation.match.isComplete()) {//每个match逐一匹配,并执行对应的action + annotation.actions.apply(context); + } + }catch (Exception e){ + System.out.println(annotation.toString()); + e.printStackTrace(); + } + } + } + +``` + +​ 可以看到,实际上,并没有处理action在多个macth中重复的问题,如果不想多次执行完全相同的action,可以把状态保存在BufferSQLContext,即使是相同的action类,但是可能因为构造的时候赋予了不同的参数,导致不同的行为,即使参数完全相同,如果处理了多个action重复的问题,那么执行的action可能就与yaml配置里面的处理顺序不一样。用户应该要清楚自己写了什么配置,导致了什么效果。 + +```java +res= routeMap.get(tables).stream().flatMap((v) -> v.stream()) +.distinct().toArray(DynamicAnnotation[]::new); +//第一次构造的时候,进行了简单的对重复的对象去重,这里的DynamicAnnotation实际上是对应yaml上的match, +//每个match生成的DynamicAnnotation是唯一的 +``` + + + +总结: + +利用jmh进行测试, + +\source\src\test\java\io\mycat\mycat2\sqlparser\DynamicAnnotationManagerBenchmark.java + +测试字符串b = 1 and c = 1 and d = a.b and c = 1 + +```yaml +annotations: + - schema: + name: schemA + matches: + - match: + name: aaa + state: open + sqltype: insert + where: + - and: id between 1 and 100 + - and: name = "haha" and a=? + - or: name2 = "ha" + tables: + - x1 + - x2 + - x3 + actions: + - monitorSQL: + - param1: 1 + - cacheResult: + - sqlCach: + - param1: 1 + - param2: 2 + - match: + name: bbb + state: open + sqltype: insert + where: + - and: id between 2 and 50 + - or: name2 = ? + tables: + - x1 + - x2 + actions: + - monitorSQL: + - param1: 3 + - cacheResult: + - sqlCach: + - param1: 2 + - param2: 1 +``` + +结果是没有触发任何action + +| Benchmark | Score | Error | Units | +| ---------------------------------------- | -----------: | ------------: | ------ | +| dynamicAnnotation | 16623623.469 | ± 1511401.496 | ops/s | +| dynamicAnnotation:·gc.alloc.rate | 169.100 | ± 15.443 | MB/sec | +| stateOneAnd | 3007990.838 | ± 140923.929 | ops/s | +| stateOneAnd:·gc.alloc.rate | ≈ 10⁻⁴ | | MB/sec | +| stateOneAndDynamicAnnotation | 2506757.003 | ± 246100.642 | | +| stateOneAndDynamicAnnotation:·gc.alloc.rate | 25.498 | ± 2.510 | MB/sec | + + + +stateOneAnd是第一阶段解析 + +stateOneAndDynamicAnnotation是第一阶段+动态注解 + +dynamicAnnotation是动态注解 + + + +测试字符串SELECT * FROM Person WHERE name2 = \\\"ha\\\" ,条件同上,触发两个match + + + +| Benchmark | Score | Error | Units | +| ---------------------------------------- | -----------: | ------------: | ------ | +| dynamicAnnotation | 12141487.691 | ± 1650276.530 | ops/s | +| dynamicAnnotation:·gc.alloc.rate | 123.484 | ± 16.809 | MB/sec | +| stateOneAnd | 4343857.198 | ± 315225.109 | ops/s | +| stateOneAnd:·gc.alloc.rate | ≈ 10⁻⁴ | | MB/sec | +| stateOneAndDynamicAnnotation | 3033563.005 | ± 210254.616 | | +| stateOneAndDynamicAnnotation:·gc.alloc.rate | 30.850 | ± 2.101 | MB/sec | + + 由于碰上多个match匹配的情况,由于是多个match逐一匹配,所以会慢,单个match本身匹配是很快的,用户要在taml写条件和tables注意一点不要出现多个match匹配的情况,也尽量注意条件不要出现有矛盾的情况(前后交叠,一个条件是包含个条件) + +现在代码中的问题: + +- schema,sqltype,tables,获得匹配器的过程是耗时的,但是如果缓存结果又可能占很多内存 +- 前缀树+回溯实现的匹配器是否稳定可靠 +- 多个match需要逐一匹配,可以多个match中的条件都一次编译,但是条件与action匹配会变得复杂 + + + +本质问题: + +- schema,sqltype,tables,条件组合出来的情况很多,如何高效路由 + +- 条件与action如何高效对应 + +- 字符串如何高效匹配 + + ​ + diff --git "a/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243\347\232\204\351\234\200\346\261\202\346\220\234\351\233\206\345\222\214\345\256\214\345\226\204.md" "b/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243\347\232\204\351\234\200\346\261\202\346\220\234\351\233\206\345\222\214\345\256\214\345\226\204.md" new file mode 100644 index 0000000..b64e9e3 --- /dev/null +++ "b/doc/discussions/\345\212\250\346\200\201\346\263\250\350\247\243\347\232\204\351\234\200\346\261\202\346\220\234\351\233\206\345\222\214\345\256\214\345\226\204.md" @@ -0,0 +1,250 @@ +动态注解的需求搜集和完善 + +cjw + +背景: +什么是动态注解? + 根据schema,table,sql type和sql的字符串模式执行一些操作并提取信息,比如缓存,SQL拦截(黑白名单) +为什么需要它? + 因为高性能的第一阶段SQL解析不提取不保存丰富的SQL语法结构信息 + +它的本质是什么? + +为匹配sql定制的正则表达式拦截器 + +动态注解已经实现(提取通配符位置还没有实现),但是可能存在使用的问题, + 1.因为支持多种匹配模式,可能产生多次回溯,导致性能下降,需要确定特性和是否开放给用户选择匹配器 + 2.对于动态注解的编写方式和匹配规则,用户可能难以理解和接受 +任务: + 1.重新搜集需求并注意动态注解的全局作用域,优先级,匹配顺序,字符串模式匹配的回溯等问题, + 要求sql字符串模式匹配器必须适配sqlparse + 2.编写设计文档 + 3.实现字符串模式匹配器 + 4.测试字符串模式匹配器 + 5.实现动态注解的配置读取和匹配规则 + 6.测试动态注解 + 7.动态注解配置文件支持动态更新加载 + 8.动态注解支持加载到集群中的功能 + +现阶段动态注解设计 + +注: 可配置(0..1),一个(1),多个(0..n) + +| 属性名 | 位置 | 其父节点下的数量限制 | +| --------------------------------- | ------------------- | -------------------- | +| annotations | 配置的根节点 | (1) | +| global | annotations的属性 | (0..1) | +| schema | annotations的属性 | (0..1) | +| name | schema和match的属性 | (1) | +| blacklist(????实际上就是actions) | schema的属性 | (0..1) | +| matches | schema的属性 | (0..1) | +| match | matches里的一个元素 | (0..n) | +| state | match的属性 | (1) | +| sqltype(sql的类型) | match的属性 | (0..1) | +| where | match的属性 | (0..1) | +| tables | match的属性 | (0..1) | +| 数据库表名 | tables的子元素 | (0..n) | +| actions | match的属性 | (0..1) | +| action名字即注解类,对应一个java类 | actions的子元素 | (0..n) | + +action收集器:保存动态注解匹配的action的结果 + +sql:动态注解匹配器的参数 + +schema:动态注解的参数 + +sqltype:动态注解的参数 + +table:动态注解的参数 + + + +字符串模式匹配需要支持通配符,匹配成功后能保存通配符对应sql的token的下标 + +逻辑匹配过程(实际实现匹配过程经过优化会可能不一样): + +1.把global的action放进action收集器 + +​ 2.根据schema的name匹配, + +​ 若匹配成功,把blacklist的action放进action收集器 + +​ 3.选择state为open的match匹配器**按照配置的顺序进行匹配** + +​ 4.如果无sqltype没有tables则匹配成功,则进入下一步匹配 + +​ 如果有sqltype没有tables且sqltype匹配成功,则进入下一步匹配 + +​ 如果无sqltype有tables且**tables包含table**则进入下一步匹配 + +​ 如果有sqltype有tables,且sqltype和tables都匹配成功,则进入下一步匹配 + +​ 5.如果没有where属性,则把actions放进action收集器 + +​ 如果有where属性,进入下一步匹配 + +​ 6.如果第一个运算是or运算,则匹配成功,把actions放进action收集器 + +​ 如果第一个运算是and运算,**匹配字符串模式**, + +​ 如果匹配成功,继续进行下一步匹配 + +​ 7.根据以下规则匹配, + +​ 凡是or运算的字符串模式匹配成功,则匹配成功 + +​ 凡是and运算的字符串模式匹配成功,则继续往下匹配直至返回成功 + +​ 直至结束时,累积的结果是匹配成功,则把actions放进action收集器 + + + +```yaml +annotations: + - global: + - blacklist: + - selelctAllow: true # sql 拦截 属于前置处理类 + - tableCheck: true + - whitehost: + user: root + host: 127.0.0.1 + - schema: + name: test + blacklist: # 覆盖全局配置 是否存在里层的action覆盖外层的需求? + - deleteAllow : false + matches: + - match: + name: select有条件and or有tables + state: open + sqltype: select + where: + - and: WHERE + - or: id = ? + tables: + - test1 + actions: + - cacheResult: + cache_time: 1800 + access_count: 60 + - match: + name: select有条件多个tables + state: open + sqltype: select + where: + - and: FROM + tables: + - test1 + - test2 + actions: + - cacheResult: + cache_time: 1800 + access_count: 70 + - monitorSQL: + - param1: 777 + - match: + name: select有条件无tables + state: open + sqltype: select + where: + - and: FROM test1 + actions: + - cacheResult: + cache_time: 1800 + access_count: 80 + - match: + name: select无条件有tables + state: open + sqltype: select + tables: + - sbtest1 + actions: + - cacheResult: + cache_time: 1800 + access_count: 90 + - match: + name: insert无tables无条件 + state: open + sqltype: insert + actions: + - monitorSQL: + - param1: 777 + - match: + name: insert无tables无条件2 + state: open + sqltype: insert + actions: + - monitorSQL: + - param1: 777 +``` + +性能问题1:考虑用户写了很多个match,导致匹配时间长 + +性能问题2:考虑一个sql多个table,导致table越多时间变长. + +作用域问题:是否有里层的action可以覆盖外层相同名字的action的需求 + +优先级问题:是否可以设置优先级使覆盖失效 + +编写问题:如何指导用户编写正确的匹配规则 + +逻辑运算是否允许嵌套? + +现在通过分析多个match中是否有相同的table设置,生成Map>解决问题 + +功能问题:字符串模式是否需要支持以下例子: + +```java +输入:"b = 1 and c = 1 and d = a.b and c = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = ? . ? and c = ?", + "c = ?", + "d = ? . ?", + "b = 1" +``` + +```java +输入:"b = 1 and c = 1 and d = a.b and e = 1 and f = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = ? . ? and e = ?", + "d = ? . ? and e = ? and f = 1", +``` + + + +```java +输入:"b = 1 and c = 1 and d = 4 and e = 1 and f = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = 4 and e = ?", + "d = ? and e = 1 and f = 1", +``` + +```java +输入:"b = 1 and c = 1 and d = 4 and e = 2 and f = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = ? and e = 2", + "d = 4 and e = ? and f = 1", +``` + + + +```java +输入:"b = 1 and c = 1 and d = 4 and e = a.ccc and f = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = 4 and e = a . ?", + "d = ? and e = ? . ccc and f = 1", +``` + +```java +输入:"b = 1 and c = 1 and d = s and d = x and f = 1" +成功的字符串匹配模式: + "b = ? and c = ? and d = s and d = x", + "d = s and d = x and f = 1", +``` + +难以使用? + +是否无法控制匹配上了什么样的SQL? + +需要多次回溯导致性能下降????? + +放弃支持一些匹配模式可以使用某个匹配结果是某个匹配结果的子集来代替回溯的方式实现,提高性能 \ No newline at end of file From 134da93ac8bfdc559b7c8c37d870b6296d4fcfc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E6=A5=A0?= Date: Fri, 6 Apr 2018 12:08:22 +0800 Subject: [PATCH 15/22] =?UTF-8?q?=E7=AE=80=E5=8C=96sum=E8=A1=A8=E8=BE=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../main/java/io/mycat/proxy/MycatReactorThread.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java index df453a0..170ac4c 100644 --- a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java +++ b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java @@ -3,7 +3,9 @@ import java.io.IOException; import java.util.HashMap; import java.util.LinkedList; +import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.stream.Stream; import org.slf4j.Logger; @@ -65,15 +67,17 @@ private int getUsingBackendConCounts(MySQLMetaBean mySQLMetaBean) { MycatSession mycatSession = (MycatSession) session; return mycatSession.getBackendConCounts(mySQLMetaBean); }) - .reduce(0, (sum, count) -> sum += count); + .mapToInt(Integer::intValue) + .sum(); } public void createSession(MySQLMetaBean mySQLMetaBean, SchemaBean schema, AsynTaskCallBack callBack) throws IOException { int count = Stream.of(ProxyRuntime.INSTANCE.getReactorThreads()) .map(session -> ((MycatReactorThread)session).mySQLSessionMap.get(mySQLMetaBean)) - .filter(list -> list != null) - .reduce(0, (sum, list) -> sum += list.size(), (sum1, sum2) -> sum1 + sum2); + .filter(Objects::nonNull) + .mapToInt(List::size) + .sum(); int backendCounts = getUsingBackendConCounts(mySQLMetaBean); logger.debug("all session backend count is {},reactor backend count is {},metabean max con is {}",backendCounts,count,mySQLMetaBean.getDsMetaBean().getMaxCon()); if (count + backendCounts + 1 > mySQLMetaBean.getDsMetaBean().getMaxCon()) { From a5a09dbe759e3f3a31d0600d49e6b6229f033bac Mon Sep 17 00:00:00 2001 From: jwc Date: Fri, 6 Apr 2018 19:27:17 +0800 Subject: [PATCH 16/22] =?UTF-8?q?fixed=20the=20bug=20#150=20=E8=83=8C?= =?UTF-8?q?=E6=99=AF:=201.mycat.yml=E4=B8=AD=E7=9A=84bufferPoolChunkSize?= =?UTF-8?q?=E8=BF=87=E5=B0=8F,=E6=AF=94=E5=A6=8264,=E5=AF=BC=E8=87=B4?= =?UTF-8?q?=E6=9E=84=E9=80=A0AuthPackge=E7=A9=BA=E9=97=B4=E4=B8=8D?= =?UTF-8?q?=E8=B6=B3=E6=8A=A5=E9=94=99=20=E5=85=B7=E4=BD=93=E5=87=BA?= =?UTF-8?q?=E9=94=99=E4=BD=8D=E7=BD=AE=E4=B8=BAMycatSession.sendAuthPackge?= =?UTF-8?q?,=E8=A7=A3=E5=86=B3=E5=8A=9E=E6=B3=95=E4=B8=BA=E6=A0=B9?= =?UTF-8?q?=E6=8D=AE=E5=86=85=E5=AD=98=E5=A4=A7=E5=B0=8F=E5=8A=A8=E6=80=81?= =?UTF-8?q?=E8=B0=83=E6=95=B4=20=E8=AE=BE=E7=BD=AE=E7=9A=84=E5=80=BC=202.?= =?UTF-8?q?=E5=BD=93bufferPoolChunkSize=E5=B0=8F=E4=BA=8E90=E6=97=B6,Backe?= =?UTF-8?q?ndIOTaskWithResultSet=E7=9A=84onSocketRead=E5=9C=A8=E8=BF=9E?= =?UTF-8?q?=E6=8E=A5=E5=90=8E=E7=AB=AFmysql=E6=97=B6=E5=80=99=E4=B8=8D?= =?UTF-8?q?=E8=83=BD=E5=A4=84=E7=90=86ProxyBuffer=E4=B8=AD=E7=9A=84?= =?UTF-8?q?=E6=95=B0=E6=8D=AE,=E5=AF=BC=E8=87=B4=E4=B8=8D=E6=96=AD?= =?UTF-8?q?=E8=BE=93=E5=87=BAreaded=20zero=20bytes=20,Maybe=20a=20bug=20,p?= =?UTF-8?q?lease=20fix=20it=20!!!!?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../io/mycat/mycat2/AbstractMySQLSession.java | 598 +++++++++--------- .../io/mycat/mycat2/beans/conf/ProxyBean.java | 10 +- .../main/java/io/mycat/proxy/ProxyBuffer.java | 3 + .../io/mycat/proxy/ProxyReactorThread.java | 1 + .../io/mycat/proxy/buffer/BufferPool.java | 1 + source/src/main/resources/mycat.yml | 2 +- 6 files changed, 318 insertions(+), 297 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/AbstractMySQLSession.java b/source/src/main/java/io/mycat/mycat2/AbstractMySQLSession.java index 17c9443..24d6cd9 100644 --- a/source/src/main/java/io/mycat/mycat2/AbstractMySQLSession.java +++ b/source/src/main/java/io/mycat/mycat2/AbstractMySQLSession.java @@ -1,11 +1,5 @@ package io.mycat.mycat2; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; - import io.mycat.mycat2.beans.MySQLCharset; import io.mycat.mycat2.beans.MySQLPackageInf; import io.mycat.mycat2.beans.conf.ProxyConfig; @@ -24,299 +18,313 @@ import io.mycat.util.StringUtil; import io.mycat.util.TimeUtil; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; + /** * 抽象的MySQL的连接会话 - * - * @author wuzhihui * + * @author wuzhihui */ public abstract class AbstractMySQLSession extends AbstractSession { - // 当前接收到的包类型 - public enum CurrPacketType { - Full, LongHalfPacket, ShortHalfPacket - } - - /** - * 字符集 - */ - public MySQLCharset charSet = new MySQLCharset(); - /** - * 用户 - */ - public String clientUser; - - /** - * 事务隔离级别 - */ - public Isolation isolation = Isolation.REPEATED_READ; - - /** - * 事务提交方式 - */ - public AutoCommit autoCommit = AutoCommit.ON; - - /** - * 认证中的seed报文数据 - */ - public byte[] seed; - - protected long lastLargeMessageTime; - protected long lastReadTime; - - /** - * 当前处理中的SQL报文的信息 - */ - public MySQLPackageInf curMSQLPackgInf = new MySQLPackageInf(); - - /** - * 用来进行指定结束报文处理 - */ - public CommandHandler commandHandler = CommQueryHandler.INSTANCE; - - public AbstractMySQLSession(BufferPool bufferPool, Selector selector, SocketChannel channel) throws IOException { - this(bufferPool, selector, channel, SelectionKey.OP_READ); - - } - - public AbstractMySQLSession(BufferPool bufferPool, Selector selector, SocketChannel channel, int keyOpt) - throws IOException { - super(bufferPool, selector, channel, keyOpt); - - } - - public void setCurBufOwner(boolean curBufOwner) { - this.curBufOwner = curBufOwner; - } - - /** - * 回应客户端(front或Sever)OK 报文。 - * - * @param pkg - * ,必须要是OK报文或者Err报文 - * @throws IOException - */ - public void responseOKOrError(MySQLPacket pkg) throws IOException { - // proxyBuffer.changeOwner(true); - this.proxyBuffer.reset(); - pkg.write(this.proxyBuffer); - proxyBuffer.flip(); - proxyBuffer.readIndex = proxyBuffer.writeIndex; - this.writeToChannel(); - } - - /** - * 回应客户端(front或Sever)OK 报文。 - * - * @param pkg - * ,必须要是OK报文或者Err报文 - * @throws IOException - */ - public void responseOKOrError(byte[] pkg) throws IOException { - // proxyBuffer.changeOwner(true); - this.proxyBuffer.reset(); - proxyBuffer.writeBytes(OKPacket.OK); - proxyBuffer.flip(); - proxyBuffer.readIndex = proxyBuffer.writeIndex; - this.writeToChannel(); - } - - /** - * 解析MySQL报文,解析的结果存储在curMSQLPackgInf中,如果解析到完整的报文,就返回TRUE - * 如果解析的过程中同时要移动ProxyBuffer的readState位置,即标记为读过,后继调用开始解析下一个报文,则需要参数markReaded - * =true - * - * @param proxyBuf - * @return - * @throws IOException - */ - public CurrPacketType resolveMySQLPackage(ProxyBuffer proxyBuf, MySQLPackageInf curPackInf, boolean markReaded) - throws IOException { - - lastReadTime = TimeUtil.currentTimeMillis(); - - ByteBuffer buffer = proxyBuf.getBuffer(); - // 读取的偏移位置 - int offset = proxyBuf.readIndex; - // 读取的总长度 - int limit = proxyBuf.writeIndex; - // 读取当前的总长度 - int totalLen = limit - offset; - if (totalLen == 0) { // 透传情况下. 如果最后一个报文正好在buffer 最后位置,已经透传出去了.这里可能不会为零 - return CurrPacketType.ShortHalfPacket; - } - - if (curPackInf.remainsBytes == 0 && curPackInf.crossBuffer) { - curPackInf.crossBuffer = false; - } - - // 如果当前跨多个报文 - if (curPackInf.crossBuffer) { - if (curPackInf.remainsBytes <= totalLen) { - // 剩余报文结束 - curPackInf.endPos = offset + curPackInf.remainsBytes; - offset += curPackInf.remainsBytes; // 继续处理下一个报文 - proxyBuf.readIndex = offset; - curPackInf.remainsBytes = 0; - } else {// 剩余报文还没读完,等待下一次读取 - curPackInf.startPos = 0; - curPackInf.remainsBytes -= totalLen; - curPackInf.endPos = limit; - proxyBuf.readIndex = curPackInf.endPos; - return CurrPacketType.LongHalfPacket; - } - } - // 验证当前指针位置是否 - if (!ParseUtil.validateHeader(offset, limit)) { - // 收到短半包 - logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, limit); - return CurrPacketType.ShortHalfPacket; - } - - // 解包获取包的数据长度 - int pkgLength = ParseUtil.getPacketLength(buffer, offset); - - // 解析报文类型 - int packetType = -1; - - // 在包长度小于7时,作为resultSet的首包 - if (pkgLength <= 7) { - int index = offset + ParseUtil.msyql_packetHeaderSize; - - long len = proxyBuf.getInt(index, 1) & 0xff; - // 如果长度小于251,则取默认的长度 - if (len < 251) { - packetType = (int) len; - } else if (len == 0xfc) { - // 进行验证是否位数足够,作为短包处理 - if (!ParseUtil.validateResultHeader(offset, limit, 2)) { - // 收到短半包 - logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, - limit); - return CurrPacketType.ShortHalfPacket; - } - packetType = (int) proxyBuf.getInt(index + 1, 2); - } else if (len == 0xfd) { - - // 进行验证是否位数足够,作为短包处理 - if (!ParseUtil.validateResultHeader(offset, limit, 3)) { - // 收到短半包 - logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, - limit); - return CurrPacketType.ShortHalfPacket; - } - - packetType = (int) proxyBuf.getInt(index + 1, 3); - } else { - // 进行验证是否位数足够,作为短包处理 - if (!ParseUtil.validateResultHeader(offset, limit, 8)) { - // 收到短半包 - logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, - limit); - return CurrPacketType.ShortHalfPacket; - } - - packetType = (int) proxyBuf.getInt(index + 1, 8); - } - } else { - // 解析报文类型 - packetType = buffer.get(offset + ParseUtil.msyql_packetHeaderSize); - } - - // 包的类型 - curPackInf.pkgType = packetType; - // 设置包的长度 - curPackInf.pkgLength = pkgLength; - // 设置偏移位置 - curPackInf.startPos = offset; - - curPackInf.crossBuffer = false; - - curPackInf.remainsBytes = 0; - // 如果当前需要跨buffer处理 - if ((offset + pkgLength) > limit) { - logger.debug("Not a whole packet: required length = {} bytes, cur total length = {} bytes, limit ={}, " - + "ready to handle the next read event", pkgLength, (limit - offset), limit); - curPackInf.endPos = limit; - return CurrPacketType.LongHalfPacket; - } else { - // 读到完整报文 - curPackInf.endPos = curPackInf.pkgLength + curPackInf.startPos; - if (ProxyRuntime.INSTANCE.isTraceProtocol()) { - /** - * @todo 跨多个报文的情况下,修正错误。 - */ - final String hexs = StringUtil.dumpAsHex(buffer, curPackInf.startPos, curPackInf.pkgLength); - logger.debug( - " session {} packet: startPos={}, offset = {}, length = {}, type = {}, cur total length = {},pkg HEX\r\n {}", - getSessionId(), curPackInf.startPos, offset, pkgLength, packetType, limit, hexs); - } - if (markReaded) { - proxyBuf.readIndex = curPackInf.endPos; - } - return CurrPacketType.Full; - } - } - - public void ensureFreeSpaceOfReadBuffer() { - int pkgLength = curMSQLPackgInf.pkgLength; - ByteBuffer buffer = proxyBuffer.getBuffer(); - ProxyConfig config = ProxyRuntime.INSTANCE.getConfig().getConfig(ConfigEnum.PROXY); - // need a large buffer to hold the package - if (pkgLength > config.getProxy().getMax_allowed_packet()) { - throw new IllegalArgumentException("Packet size over the limit."); - } else if (buffer.capacity() < pkgLength) { - logger.debug("need a large buffer to hold the package.{}", curMSQLPackgInf); - lastLargeMessageTime = TimeUtil.currentTimeMillis(); - ByteBuffer newBuffer = bufPool.allocate(Double.valueOf(pkgLength + pkgLength * 0.1).intValue()); - resetBuffer(newBuffer); - } else { - if (proxyBuffer.writeIndex != 0) { - // compact bytebuffer only - proxyBuffer.compact(); - } else { - throw new RuntimeException(" not enough space"); - } - } - } - - /** - * 重置buffer - * - * @param newBuffer - */ - private void resetBuffer(ByteBuffer newBuffer) { - newBuffer.put(proxyBuffer.getBytes(proxyBuffer.readIndex, proxyBuffer.writeIndex - proxyBuffer.readIndex)); - proxyBuffer.resetBuffer(newBuffer); - recycleAllocedBuffer(proxyBuffer); - curMSQLPackgInf.endPos = curMSQLPackgInf.endPos - curMSQLPackgInf.startPos; - curMSQLPackgInf.startPos = 0; - } - - /** - * 检查 是否需要切换回正常大小buffer. - * - */ - public void changeToDirectIfNeed() { - - if (!proxyBuffer.getBuffer().isDirect()) { - - if (curMSQLPackgInf.pkgLength > bufPool.getChunkSize()) { - lastLargeMessageTime = TimeUtil.currentTimeMillis(); - return; - } - - if (lastLargeMessageTime < lastReadTime - 30 * 1000L) { - logger.info("change to direct con read buffer ,cur temp buf size : {}", - proxyBuffer.getBuffer().capacity()); - ByteBuffer bytebuffer = bufPool.allocate(); - if (!bytebuffer.isDirect()) { - bufPool.recycle(bytebuffer); - } else { - resetBuffer(bytebuffer); - } - lastLargeMessageTime = TimeUtil.currentTimeMillis(); - } - } - } + // 当前接收到的包类型 + public enum CurrPacketType { + Full, LongHalfPacket, ShortHalfPacket + } + + /** + * 字符集 + */ + public MySQLCharset charSet = new MySQLCharset(); + /** + * 用户 + */ + public String clientUser; + + /** + * 事务隔离级别 + */ + public Isolation isolation = Isolation.REPEATED_READ; + + /** + * 事务提交方式 + */ + public AutoCommit autoCommit = AutoCommit.ON; + + /** + * 认证中的seed报文数据 + */ + public byte[] seed; + + protected long lastLargeMessageTime; + protected long lastReadTime; + + /** + * 当前处理中的SQL报文的信息 + */ + public MySQLPackageInf curMSQLPackgInf = new MySQLPackageInf(); + + /** + * 用来进行指定结束报文处理 + */ + public CommandHandler commandHandler = CommQueryHandler.INSTANCE; + + public AbstractMySQLSession(BufferPool bufferPool, Selector selector, SocketChannel channel) throws IOException { + this(bufferPool, selector, channel, SelectionKey.OP_READ); + + } + + public AbstractMySQLSession(BufferPool bufferPool, Selector selector, SocketChannel channel, int keyOpt) + throws IOException { + super(bufferPool, selector, channel, keyOpt); + + } + + public void setCurBufOwner(boolean curBufOwner) { + this.curBufOwner = curBufOwner; + } + + /** + * 回应客户端(front或Sever)OK 报文。 + * + * @param pkg ,必须要是OK报文或者Err报文 + * @throws IOException + */ + public void responseOKOrError(MySQLPacket pkg) throws IOException { + // proxyBuffer.changeOwner(true); + this.proxyBuffer.reset(); + pkg.write(this.proxyBuffer); + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + this.writeToChannel(); + } + + /** + * 回应客户端(front或Sever)OK 报文。 + * + * @param pkg ,必须要是OK报文或者Err报文 + * @throws IOException + */ + public void responseOKOrError(byte[] pkg) throws IOException { + // proxyBuffer.changeOwner(true); + this.proxyBuffer.reset(); + proxyBuffer.writeBytes(OKPacket.OK); + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + this.writeToChannel(); + } + + /** + * 解析MySQL报文,解析的结果存储在curMSQLPackgInf中,如果解析到完整的报文,就返回TRUE + * 如果解析的过程中同时要移动ProxyBuffer的readState位置,即标记为读过,后继调用开始解析下一个报文,则需要参数markReaded + * =true + * + * @param proxyBuf + * @return + * @throws IOException + */ + public CurrPacketType resolveMySQLPackage(ProxyBuffer proxyBuf, MySQLPackageInf curPackInf, boolean markReaded) + throws IOException { + + lastReadTime = TimeUtil.currentTimeMillis(); + + ByteBuffer buffer = proxyBuf.getBuffer(); + // 读取的偏移位置 + int offset = proxyBuf.readIndex; + // 读取的总长度 + int limit = proxyBuf.writeIndex; + // 读取当前的总长度 + int totalLen = limit - offset; + if (totalLen == 0) { // 透传情况下. 如果最后一个报文正好在buffer 最后位置,已经透传出去了.这里可能不会为零 + return CurrPacketType.ShortHalfPacket; + } + + if (curPackInf.remainsBytes == 0 && curPackInf.crossBuffer) { + curPackInf.crossBuffer = false; + } + + // 如果当前跨多个报文 + if (curPackInf.crossBuffer) { + if (curPackInf.remainsBytes <= totalLen) { + // 剩余报文结束 + curPackInf.endPos = offset + curPackInf.remainsBytes; + offset += curPackInf.remainsBytes; // 继续处理下一个报文 + proxyBuf.readIndex = offset; + curPackInf.remainsBytes = 0; + } else {// 剩余报文还没读完,等待下一次读取 + curPackInf.startPos = 0; + curPackInf.remainsBytes -= totalLen; + curPackInf.endPos = limit; + proxyBuf.readIndex = curPackInf.endPos; + return CurrPacketType.LongHalfPacket; + } + } + // 验证当前指针位置是否 + if (!ParseUtil.validateHeader(offset, limit)) { + // 收到短半包 + logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, limit); + return CurrPacketType.ShortHalfPacket; + } + + // 解包获取包的数据长度 + int pkgLength = ParseUtil.getPacketLength(buffer, offset); + + // 解析报文类型 + int packetType = -1; + + // 在包长度小于7时,作为resultSet的首包 + if (pkgLength <= 7) { + int index = offset + ParseUtil.msyql_packetHeaderSize; + + long len = proxyBuf.getInt(index, 1) & 0xff; + // 如果长度小于251,则取默认的长度 + if (len < 251) { + packetType = (int) len; + } else if (len == 0xfc) { + // 进行验证是否位数足够,作为短包处理 + if (!ParseUtil.validateResultHeader(offset, limit, 2)) { + // 收到短半包 + logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, + limit); + return CurrPacketType.ShortHalfPacket; + } + packetType = (int) proxyBuf.getInt(index + 1, 2); + } else if (len == 0xfd) { + + // 进行验证是否位数足够,作为短包处理 + if (!ParseUtil.validateResultHeader(offset, limit, 3)) { + // 收到短半包 + logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, + limit); + return CurrPacketType.ShortHalfPacket; + } + + packetType = (int) proxyBuf.getInt(index + 1, 3); + } else { + // 进行验证是否位数足够,作为短包处理 + if (!ParseUtil.validateResultHeader(offset, limit, 8)) { + // 收到短半包 + logger.debug("not read a whole packet ,session {},offset {} ,limit {}", getSessionId(), offset, + limit); + return CurrPacketType.ShortHalfPacket; + } + + packetType = (int) proxyBuf.getInt(index + 1, 8); + } + } else { + // 解析报文类型 + packetType = buffer.get(offset + ParseUtil.msyql_packetHeaderSize); + } + + // 包的类型 + curPackInf.pkgType = packetType; + // 设置包的长度 + curPackInf.pkgLength = pkgLength; + // 设置偏移位置 + curPackInf.startPos = offset; + + curPackInf.crossBuffer = false; + + curPackInf.remainsBytes = 0; + // 如果当前需要跨buffer处理 + + + if ((offset + pkgLength) > limit) { + logger.debug("Not a whole packet: required length = {} bytes, cur total length = {} bytes, limit ={}, " + + "ready to handle the next read event", pkgLength, (limit - offset), limit); + if (offset == 0 && pkgLength > limit){ + /* + cjw 2018.4.6 + 假设整个buffer空间为88,开始位置是0,需要容纳89的数据大小,还缺一个数据没用接受完, + 之后作为LongHalfPacket返回,之后上一级处理结果的函数因为是解析所以只处理整包,之后就一直不处理数据, + 导致一直没有把数据处理,一直报错 readed zero bytes ,Maybe a bug ,please fix it !!!! + 解决办法:扩容 + */ + proxyBuf.setBuffer(this.bufPool.expandBuffer(this.proxyBuffer.getBuffer())); + } + curPackInf.endPos = limit; + return CurrPacketType.LongHalfPacket; + } else { + // 读到完整报文 + curPackInf.endPos = curPackInf.pkgLength + curPackInf.startPos; + if (ProxyRuntime.INSTANCE.isTraceProtocol()) { + /** + * @todo 跨多个报文的情况下,修正错误。 + */ + final String hexs = StringUtil.dumpAsHex(buffer, curPackInf.startPos, curPackInf.pkgLength); + logger.debug( + " session {} packet: startPos={}, offset = {}, length = {}, type = {}, cur total length = {},pkg HEX\r\n {}", + getSessionId(), curPackInf.startPos, offset, pkgLength, packetType, limit, hexs); + } + if (markReaded) { + proxyBuf.readIndex = curPackInf.endPos; + } + return CurrPacketType.Full; + } + } + + public void ensureFreeSpaceOfReadBuffer() { + int pkgLength = curMSQLPackgInf.pkgLength; + ByteBuffer buffer = proxyBuffer.getBuffer(); + ProxyConfig config = ProxyRuntime.INSTANCE.getConfig().getConfig(ConfigEnum.PROXY); + // need a large buffer to hold the package + if (pkgLength > config.getProxy().getMax_allowed_packet()) { + throw new IllegalArgumentException("Packet size over the limit."); + } else if (buffer.capacity() < pkgLength) { + logger.debug("need a large buffer to hold the package.{}", curMSQLPackgInf); + lastLargeMessageTime = TimeUtil.currentTimeMillis(); + ByteBuffer newBuffer = bufPool.allocate(Double.valueOf(pkgLength + pkgLength * 0.1).intValue()); + resetBuffer(newBuffer); + } else { + if (proxyBuffer.writeIndex != 0) { + // compact bytebuffer only + proxyBuffer.compact(); + } else { + throw new RuntimeException(" not enough space"); + } + } + } + + /** + * 重置buffer + * + * @param newBuffer + */ + private void resetBuffer(ByteBuffer newBuffer) { + newBuffer.put(proxyBuffer.getBytes(proxyBuffer.readIndex, proxyBuffer.writeIndex - proxyBuffer.readIndex)); + proxyBuffer.resetBuffer(newBuffer); + recycleAllocedBuffer(proxyBuffer); + curMSQLPackgInf.endPos = curMSQLPackgInf.endPos - curMSQLPackgInf.startPos; + curMSQLPackgInf.startPos = 0; + } + + /** + * 检查 是否需要切换回正常大小buffer. + */ + public void changeToDirectIfNeed() { + + if (!proxyBuffer.getBuffer().isDirect()) { + + if (curMSQLPackgInf.pkgLength > bufPool.getChunkSize()) { + lastLargeMessageTime = TimeUtil.currentTimeMillis(); + return; + } + + if (lastLargeMessageTime < lastReadTime - 30 * 1000L) { + logger.info("change to direct con read buffer ,cur temp buf size : {}", + proxyBuffer.getBuffer().capacity()); + ByteBuffer bytebuffer = bufPool.allocate(); + if (!bytebuffer.isDirect()) { + bufPool.recycle(bytebuffer); + } else { + resetBuffer(bytebuffer); + } + lastLargeMessageTime = TimeUtil.currentTimeMillis(); + } + } + } } diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/ProxyBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/ProxyBean.java index 117cc5f..21dc55f 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/ProxyBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/ProxyBean.java @@ -1,6 +1,9 @@ package io.mycat.mycat2.beans.conf; import io.mycat.mycat2.beans.GlobalBean; +import io.mycat.proxy.buffer.BufferPooLFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Desc: mycat代理配置类 @@ -13,7 +16,7 @@ public class ProxyBean { private static final short DEFAULT_BUFFER_CHUNK_SIZE = 1024*4*2; private static final int DEFAULT_BUFFER_POOL_PAGE_SIZE = 1024*1024*4; private static final short DEFAULT_BUFFER_POOL_PAGE_NUMBER = 64; - + private static final Logger logger = LoggerFactory.getLogger(ProxyBean.class); private static final int MAX_ALLOWED_PACKET = 16 * 1024 * 1024; /** @@ -83,6 +86,11 @@ public short getBufferPoolChunkSize() { } public void setBufferPoolChunkSize(short bufferPoolChunkSize) { + if (bufferPoolChunkSize < 86){ + ///cjw 2018.4.6 fix the HandshakePacket write proxybuffer which is low than 86 lead to error + logger.warn("bufferPoolChunkSize should be greater than 86,and will be updated to 128;"); + bufferPoolChunkSize = 128; + } this.bufferPoolChunkSize = bufferPoolChunkSize; } diff --git a/source/src/main/java/io/mycat/proxy/ProxyBuffer.java b/source/src/main/java/io/mycat/proxy/ProxyBuffer.java index 7b9a93e..a3820cf 100644 --- a/source/src/main/java/io/mycat/proxy/ProxyBuffer.java +++ b/source/src/main/java/io/mycat/proxy/ProxyBuffer.java @@ -120,6 +120,9 @@ public void resetBuffer(ByteBuffer newBuffer){ this.writeIndex = newBuffer.position(); this.buffer = newBuffer; } + public void setBuffer(ByteBuffer newBuffer){ + this.buffer = newBuffer; + } /** * 只能用在读状态下,跳过指定的N个字符 diff --git a/source/src/main/java/io/mycat/proxy/ProxyReactorThread.java b/source/src/main/java/io/mycat/proxy/ProxyReactorThread.java index 35900e0..04f5dca 100644 --- a/source/src/main/java/io/mycat/proxy/ProxyReactorThread.java +++ b/source/src/main/java/io/mycat/proxy/ProxyReactorThread.java @@ -51,6 +51,7 @@ public void acceptNewSocketChannel(Object keyAttachement, final SocketChannel so T session = sessionMan.createSession(keyAttachement, this.bufPool, selector, socketChannel, true); allSessions.add(session); } catch (Exception e) { + e.printStackTrace(); logger.warn("regist new connection err " + e); } }); diff --git a/source/src/main/java/io/mycat/proxy/buffer/BufferPool.java b/source/src/main/java/io/mycat/proxy/buffer/BufferPool.java index db5062d..d6a978a 100644 --- a/source/src/main/java/io/mycat/proxy/buffer/BufferPool.java +++ b/source/src/main/java/io/mycat/proxy/buffer/BufferPool.java @@ -13,6 +13,7 @@ public interface BufferPool { public ByteBuffer allocate(); public ByteBuffer allocate(int size); + public ByteBuffer expandBuffer(ByteBuffer buffer); public void recycle(ByteBuffer theBuf); public long capacity(); public long size(); diff --git a/source/src/main/resources/mycat.yml b/source/src/main/resources/mycat.yml index cb3cbd1..7668522 100644 --- a/source/src/main/resources/mycat.yml +++ b/source/src/main/resources/mycat.yml @@ -2,5 +2,5 @@ proxy: ip: 0.0.0.0 port: 8066 bufferPoolPageSize: 4194304 # 一页的大小,默认 1024*1024*4 - bufferPoolChunkSize: 8192 # chunk 大小 , 默认 1024*4*2 。 chunk 为bufferpool 分配的最小单元 + bufferPoolChunkSize: 8192 # chunk 大小 , 默认 1024*4*2 。 chunk 为bufferpool 分配的最小单元,不能低于86 bufferPoolPageNumber: 64 # 页数量. 默认 64 \ No newline at end of file From 59d41409f1373441e65c59c12b9405ca8566e761 Mon Sep 17 00:00:00 2001 From: flysquirrel Date: Mon, 9 Apr 2018 00:48:47 +0800 Subject: [PATCH 17/22] =?UTF-8?q?SchemaBean=20=E5=8E=BB=E6=8E=89defaultDN?= =?UTF-8?q?=E8=BF=99=E4=B8=AA=E5=AD=97=E6=AE=B5=EF=BC=8C=E5=AF=B9=E5=BA=94?= =?UTF-8?q?schema.yml=20=E5=8E=BB=E6=8E=89defaultDN=E5=B1=9E=E6=80=A7?= =?UTF-8?q?=EF=BC=8C=E5=8F=96=E8=80=8C=E4=BB=A3=E4=B9=8B=E7=9A=84=E6=98=AF?= =?UTF-8?q?defaultDataNode?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/main/java/io/mycat/mycat2/MycatConfig.java | 7 ------- .../src/main/java/io/mycat/mycat2/MycatSession.java | 6 ++++-- .../java/io/mycat/mycat2/beans/conf/SchemaBean.java | 12 ++---------- .../main/java/io/mycat/mycat2/cmds/ComInitDB.java | 7 +++++-- .../io/mycat/mycat2/tasks/BackendSynchemaTask.java | 3 ++- 5 files changed, 13 insertions(+), 22 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/MycatConfig.java b/source/src/main/java/io/mycat/mycat2/MycatConfig.java index 0bf4fe4..6ea1b74 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatConfig.java +++ b/source/src/main/java/io/mycat/mycat2/MycatConfig.java @@ -3,8 +3,6 @@ import java.util.HashMap; import java.util.Map; -import org.apache.commons.lang.StringUtils; - import io.mycat.mycat2.beans.GlobalBean; import io.mycat.mycat2.beans.MySQLRepBean; import io.mycat.mycat2.beans.conf.DNBean; @@ -62,11 +60,6 @@ public void initSchemaMap() { defaultSchemaBean = schema; } mycatSchemaMap.put(schema.getName(), schema); - String defaultDnName = schema.getDefaultDataNode(); - if (StringUtils.isNotEmpty(defaultDnName) - && mycatDataNodeMap.containsKey(defaultDnName)) { - schema.setDefaultDN(mycatDataNodeMap.get(defaultDnName)); - } schema.getTables().forEach(table -> { String theDataNodes[] = SplitUtil.split(table.getDataNode(), ',', '$', '-'); if (theDataNodes == null || theDataNodes.length <= 0) { diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java index 1f72e2e..d9e1cc8 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatSession.java +++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java @@ -371,7 +371,8 @@ private String getbackendName(){ String backendName = null; switch (schema.getSchemaType()) { case DB_IN_ONE_SERVER: - backendName = schema.getDefaultDN().getReplica(); + backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() + .get(schema.getDefaultDataNode()).getReplica(); break; case ANNOTATION_ROUTE: break; @@ -388,7 +389,8 @@ private String getbackendName(){ backendName = dnBean.getReplica(); } if (StringUtils.isEmpty(backendName)) { - backendName = schema.getDefaultDN().getReplica(); + backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() + .get(schema.getDefaultDataNode()).getReplica(); } break; // case SQL_PARSE_ROUTE: diff --git a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java index 06ac5bf..6421b44 100644 --- a/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java +++ b/source/src/main/java/io/mycat/mycat2/beans/conf/SchemaBean.java @@ -24,7 +24,6 @@ public enum SchemaTypeEnum { public String name; public SchemaTypeEnum schemaType; private String defaultDataNode; - private DNBean defaultDN; private List tables = new ArrayList(); public String getName() { @@ -51,14 +50,6 @@ public void setDefaultDataNode(String defaultDataNode) { this.defaultDataNode = defaultDataNode; } - public DNBean getDefaultDN() { - return defaultDN; - } - - public void setDefaultDN(DNBean defaultDN) { - this.defaultDN = defaultDN; - } - public List getTables() { return tables; } @@ -69,6 +60,7 @@ public void setTables(List tables) { @Override public String toString() { - return "SchemaBean{" + "name='" + name + '\'' + ", schemaType=" + schemaType + ", defaultDN=" + defaultDN + ", tables=" + tables + '}'; + return "SchemaBean{" + "name='" + name + '\'' + ", schemaType=" + schemaType + ", tables=" + + tables + '}'; } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/ComInitDB.java b/source/src/main/java/io/mycat/mycat2/cmds/ComInitDB.java index 93380af..81f5991 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/ComInitDB.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/ComInitDB.java @@ -2,12 +2,13 @@ import java.io.IOException; -import io.mycat.mycat2.beans.conf.SchemaBean; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.mycat2.MycatConfig; import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.beans.conf.DNBean; +import io.mycat.mycat2.beans.conf.SchemaBean; import io.mycat.mycat2.sqlparser.BufferSQLParser; import io.mycat.mysql.packet.ErrorPacket; import io.mycat.mysql.packet.MySQLPacket; @@ -49,7 +50,9 @@ public boolean procssSQL(MycatSession session) throws IOException { session.responseOKOrError(OKPacket.OK); return false; }else if(SchemaBean.SchemaTypeEnum.DB_IN_ONE_SERVER==session.schema.getSchemaType()){ - session.schema.getDefaultDN().setDatabase(schema); + DNBean defaultDN = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() + .get(session.schema.getDefaultDataNode()); + defaultDN.setDatabase(schema); return super.procssSQL(session); }else{ logger.warn("Unknown database '" + schema + "'"); diff --git a/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java b/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java index 051ca04..b93194e 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/BackendSynchemaTask.java @@ -113,7 +113,8 @@ private String findDatabase(MySQLSession session) { } } if (StringUtils.isEmpty(databases)) { - databases = session.getMycatSession().schema.getDefaultDN().getDatabase(); + databases = dataNodeMap.get(session.getMycatSession().schema.getDefaultDataNode()) + .getDatabase(); } return databases; } From d9abb169b7cd2e5982f23353cbb6ccb38c8c7495 Mon Sep 17 00:00:00 2001 From: jwc Date: Sat, 14 Apr 2018 16:09:23 +0800 Subject: [PATCH 18/22] getRealSQLSize can not return negative --- .../mycat2/sqlparser/BufferSQLContext.java | 43 +++++++++++-------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java b/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java index 2e5b0b3..487661e 100644 --- a/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java +++ b/source/src/main/java/io/mycat/mycat2/sqlparser/BufferSQLContext.java @@ -1,11 +1,11 @@ package io.mycat.mycat2.sqlparser; -import java.util.Arrays; - import io.mycat.mycat2.sqlparser.SQLParseUtils.HashArray; import io.mycat.mycat2.sqlparser.byteArrayInterface.ByteArrayInterface; import io.mycat.mycat2.sqlparser.byteArrayInterface.TokenizerUtil; +import java.util.Arrays; + /** *
  * Created by Kaiz on 2017/3/21.
@@ -124,7 +124,7 @@ public BufferSQLContext() {
         sqlInfoArray = new long[256];
         annotationValue = new long[16];
         annotationStringValue = new String[16];
-        annotationCondition=new int[64];
+        annotationCondition = new int[64];
         myCmdValue = new HashArray(256);
         selectItemArray = new int[128];
     }
@@ -187,11 +187,11 @@ public String getSchemaName(int idx) {
     }
 
     public long getTokenType(int sqlIdx, int sqlPos) {
-        return hashArray.getType( (int)(sqlInfoArray[sqlIdx] >>> 50) + sqlPos);
+        return hashArray.getType((int) (sqlInfoArray[sqlIdx] >>> 50) + sqlPos);
     }
 
     public long getTokenHash(int sqlIdx, int sqlPos) {
-        return hashArray.getHash( (int)(sqlInfoArray[sqlIdx] >>> 50) + sqlPos);
+        return hashArray.getHash((int) (sqlInfoArray[sqlIdx] >>> 50) + sqlPos);
     }
 
     public long getSchemaHash(int idx) {
@@ -217,7 +217,7 @@ public String getSQLTableName(int sqlIdx, int tblIdx) {
             // int sqlInfoOffset = (sqlIdx << 2) + 3;
             // int tblResultOffset = sqlInfoArray[sqlInfoOffset] >>> 8;
             // int tblResultCount = sqlInfoArray[sqlInfoOffset] & 0xFF;
-            int sqlInfo = (int)sqlInfoArray[sqlIdx];
+            int sqlInfo = (int) sqlInfoArray[sqlIdx];
             int tblResultOffset = (sqlInfo >>> 8) & 0xFFF;
             int tblResultCount = sqlInfo & 0xFF;
             if (tblIdx < tblResultCount) {
@@ -242,12 +242,12 @@ public void setSQLFinished(int curHashPos) {
 
             int idx = curSQLIdx;
             curSQLIdx++;
-            long sqlInfo = ((long)preHashArrayPos & 0x3FFF) << 50;
-            sqlInfo |= ((long)hashArrayRealSQLOffset & 0xFF) << 42;
-            sqlInfo |= ((long)sqlType & 0xFF) << 34 ;
-            sqlInfo |= ((long)sqlSize & 0x3FFF) << 20;
-            sqlInfo |= ((long)preTableResultPos & 0xFFF) << 8;
-            sqlInfo |= (long)(curSQLTblCount & 0xFF);
+            long sqlInfo = ((long) preHashArrayPos & 0x3FFF) << 50;
+            sqlInfo |= ((long) hashArrayRealSQLOffset & 0xFF) << 42;
+            sqlInfo |= ((long) sqlType & 0xFF) << 34;
+            sqlInfo |= ((long) sqlSize & 0x3FFF) << 20;
+            sqlInfo |= ((long) preTableResultPos & 0xFFF) << 8;
+            sqlInfo |= (long) (curSQLTblCount & 0xFF);
             sqlInfoArray[idx] = sqlInfo;
             curSQLTblCount = 0;
             preTableResultPos = tblResultPos;
@@ -265,7 +265,7 @@ public int getSQLCount() {
 
     public int getSQLTblCount(int sqlIdx) {
         if (sqlIdx < totalSQLCount) {
-            return (int)sqlInfoArray[sqlIdx] & 0xFF;
+            return (int) sqlInfoArray[sqlIdx] & 0xFF;
         }
         return 0;
     }
@@ -293,11 +293,11 @@ public void setSQLIdx(int sqlIdx) {
     }
 
     public byte getSQLType() {
-        return (byte) ((this.sqlInfoArray[0]>>34) & 0xFF);
+        return (byte) ((this.sqlInfoArray[0] >> 34) & 0xFF);
     }
 
     public byte getSQLType(int sqlIdx) {
-        return (byte)((this.sqlInfoArray[sqlIdx]>>34) & 0xFF);
+        return (byte) ((this.sqlInfoArray[sqlIdx] >> 34) & 0xFF);
     }
 
     public byte getCurSQLType() {
@@ -321,13 +321,14 @@ public int getRealSQLOffset(int sqlIdx) {
     public int getRealSQLSize(int sqlIdx) {
         int hashArrayEndPos = ((int) (sqlInfoArray[sqlIdx] >> 50) & 0x3FFF)
                 + ((int) (sqlInfoArray[sqlIdx] >> 20) & 0x3FFF) - 1;
+        if (hashArrayEndPos < 0) return 0;
         return hashArray.getPos(hashArrayEndPos) + hashArray.getSize(hashArrayEndPos);
     }
 
     public String getRealSQL(int sqlIdx) {
         int sqlStartPos = getRealSQLOffset(sqlIdx);
         int sqlSize = getRealSQLSize(sqlIdx) - sqlStartPos;
-        return buffer.getString( sqlStartPos, sqlSize);
+        return buffer.getString(sqlStartPos, sqlSize);
     }
 
 
@@ -370,14 +371,15 @@ public void setAnnotationType(byte type) {
     public void setAnnotationValue(byte typeKey, long value) {
         this.annotationValue[typeKey] = value;
     }
+
     public void setAnnotationStringValue(byte typeKey, String value) {
         this.annotationStringValue[typeKey] = value;
     }
-    
+
     public String getAnnotationStringValue(byte typeKey) {
         return this.annotationStringValue[typeKey];
     }
-    
+
     public void setAnnotationStart(int pos) {
     }
 
@@ -395,6 +397,7 @@ public long getAnnotationValue(byte typeKey) {
     public HashArray getMyCmdValue() {
         return this.myCmdValue;
     }
+
     public String getAnnotationContent() {
         return null;
     } //by kaiz : 返回注解等号后面的内容
@@ -408,8 +411,9 @@ public HashArray getHashArray() {
     }
 
     public boolean matchDigit(int pos1, int data) {
-        return TokenizerUtil.pickNumber(pos1,this.hashArray,buffer)==data;
+        return TokenizerUtil.pickNumber(pos1, this.hashArray, buffer) == data;
     }
+
     public int matchPlaceholders(int pos1) {
         ++pos1;
 //        if (hashArray.getType(pos1)== Tokenizer2.DOT){
@@ -418,6 +422,7 @@ public int matchPlaceholders(int pos1) {
 //        }
         return pos1;
     }
+
     public int getTableIntHash(int idx) {
         int hashArrayIdx = tblResult[(idx << 1) + 1];
         int intHash = hashArray.getIntHash(hashArrayIdx);

From 9e4b7d86ed3e0132f507026a05290c51a53f7bd1 Mon Sep 17 00:00:00 2001
From: jwc 
Date: Sat, 14 Apr 2018 16:54:39 +0800
Subject: [PATCH 19/22] multiple ways to get backseesion

---
 .../java/io/mycat/mycat2/MycatSession.java    | 30 ++++++++++++++++++-
 1 file changed, 29 insertions(+), 1 deletion(-)

diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java
index d9e1cc8..4388a68 100644
--- a/source/src/main/java/io/mycat/mycat2/MycatSession.java
+++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java
@@ -418,6 +418,24 @@ private void putbackendMap(MySQLSession mysqlSession){
 		logger.debug("add backend connection in mycatSession . {}",mysqlSession);
 		list.add(mysqlSession);
 	}
+	/**
+	 * 根据datanode名称获取后端会话连接
+	 *
+	 * @return
+	 */
+	public void getBackendByDataNodeName(String dataNodeName,AsynTaskCallBack callback) throws IOException {
+		DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName);
+		String repBeanName = "";
+		if (dnBean != null) {
+			repBeanName = dnBean.getReplica();
+		}
+		if (StringUtils.isEmpty(repBeanName)) {
+			repBeanName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap()
+					.get(schema.getDefaultDataNode()).getReplica();
+			logger.warn("failed to get the replication group for the specified datanode!!! and will set the default data node");
+		}
+		getBackendByRepBeanName(repBeanName,callback);
+	}
 
 	/**
 	 * 当前操作的后端会话连接
@@ -425,11 +443,21 @@ private void putbackendMap(MySQLSession mysqlSession){
 	 * @return
 	 */
 	public void getBackend(AsynTaskCallBack callback) throws IOException {
+		getBackendByRepBeanName(getbackendName(),callback);
+	}
+
+	/**
+	 *根据复制组名称获取后端会话连接
+	 * @param repBeanName 复制组名称
+	 * @param callback cjw
+	 * @throws IOException
+	 */
+	public void getBackendByRepBeanName(String repBeanName,AsynTaskCallBack callback) throws IOException {
 		MycatReactorThread reactorThread = (MycatReactorThread) Thread.currentThread();
 		
 		final boolean runOnSlave = canRunOnSlave();
 		
-		MySQLRepBean repBean = getMySQLRepBean(getbackendName());
+		MySQLRepBean repBean = getMySQLRepBean(repBeanName);
 		
 		/**
 		 * 本次根据读写分离策略要使用的metaBean

From 9285932cd53a20b1e156d943e4e5c8f1f96b872f Mon Sep 17 00:00:00 2001
From: jwc 
Date: Mon, 16 Apr 2018 09:47:08 +0800
Subject: [PATCH 20/22] dev multi stream

---
 .../main/java/io/mycat/mycat2/ColumnMeta.java |  88 ++++
 .../java/io/mycat/mycat2/MycatSession.java    |  45 +-
 .../main/java/io/mycat/mycat2/PackWraper.java |  24 +
 .../cmds/multinode/DbInMultiServerCmd.java    | 215 +++++----
 .../cmds/strategy/AbstractCmdStrategy.java    |   6 +-
 .../strategy/DBINMultiServerCmdStrategy.java  |   3 +-
 .../java/io/mycat/mycat2/hbt/TableMeta.java   |  25 +-
 .../io/mycat/mycat2/route/RouteResultset.java | 114 ++---
 .../mycat2/tasks/AbstractDataNodeMerge.java   |  85 ++++
 .../mycat2/tasks/DataNodeMergeManager.java    | 435 ++++++++++++++++++
 .../tasks/HeapDataNodeMergeManager.java       | 123 +++++
 .../mycat2/tasks/MultiNodeQueryHandler.java   |   4 +
 .../io/mycat/mycat2/tasks/MyRowStream.java    | 185 ++++++++
 .../tasks/RawSQLQueryResultTaskWrapper.java   | 115 +++++
 .../java/io/mycat/mycat2/tasks/RowStream.java |  13 +
 .../mycat2/tasks/SQLQueryResultTask.java      |  80 ++++
 .../java/io/mycat/proxy/AbstractSession.java  |   1 +
 17 files changed, 1373 insertions(+), 188 deletions(-)
 create mode 100644 source/src/main/java/io/mycat/mycat2/ColumnMeta.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/PackWraper.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/MultiNodeQueryHandler.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java
 create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java

diff --git a/source/src/main/java/io/mycat/mycat2/ColumnMeta.java b/source/src/main/java/io/mycat/mycat2/ColumnMeta.java
new file mode 100644
index 0000000..4a5638d
--- /dev/null
+++ b/source/src/main/java/io/mycat/mycat2/ColumnMeta.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese 
+ * opensource volunteers. you can redistribute it and/or modify it under the 
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * 
+ * Any questions about this component can be directed to it's project Web address 
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.mycat2;
+
+import java.io.Serializable;
+
+public class ColumnMeta implements Serializable{
+	public static final int COL_TYPE_DECIMAL = 0;
+	public static final int COL_TYPE_INT = 1;
+	public static final int COL_TYPE_SHORT = 2;
+	public static final int COL_TYPE_LONG = 3;
+	public static final int COL_TYPE_FLOAT = 4;
+	public static final int COL_TYPE_DOUBLE = 5;
+	public static final int COL_TYPE_NULL = 6;
+	public static final int COL_TYPE_TIMSTAMP = 7;
+	public static final int COL_TYPE_LONGLONG = 8;
+	public static final int COL_TYPE_INT24 = 9;
+	public static final int COL_TYPE_DATE = 0x0a;
+	public static final int COL_TYPE_DATETIME=0X0C;
+	public static final int COL_TYPE_TIME = 0x0b;
+	public static final int COL_TYPE_YEAR = 0x0d;
+	public static final int COL_TYPE_NEWDATE = 0x0e;
+	public static final int COL_TYPE_VACHAR = 0x0f;
+	public static final int COL_TYPE_BIT = 0x10;
+	public static final int COL_TYPE_NEWDECIMAL = 0xf6;
+	public static final int COL_TYPE_ENUM = 0xf7;
+	public static final int COL_TYPE_SET = 0xf8;
+	public static final int COL_TYPE_TINY_BLOB = 0xf9;
+	public static final int COL_TYPE_TINY_TYPE_MEDIUM_BLOB = 0xfa;
+	public static final int COL_TYPE_TINY_TYPE_LONG_BLOB = 0xfb;
+	public static final int COL_TYPE_BLOB = 0xfc;
+	public static final int COL_TYPE_VAR_STRING = 0xfd;
+	public static final int COL_TYPE_STRING = 0xfe;
+	public static final int COL_TYPE_GEOMETRY = 0xff;
+	public  int colIndex;
+	public final int colType;
+
+	public int decimals;
+
+    public  int avgSumIndex;
+    public  int avgCountIndex;
+
+    public ColumnMeta(int colIndex, int colType) {
+		super();
+		this.colIndex = colIndex;
+		this.colType = colType;
+	}
+    public ColumnMeta(int avgSumIndex, int avgCountIndex, int colType) {
+        super();
+        this.avgSumIndex = avgSumIndex;
+        this.avgCountIndex=avgCountIndex;
+        this.colType = colType;
+    }
+	public int getColIndex() {
+		return colIndex;
+	}
+
+	public int getColType() {
+		return colType;
+	}
+
+	@Override
+	public String toString() {
+		return "ColMeta [colIndex=" + colIndex + ", colType=" + colType + "]";
+	}
+
+}
\ No newline at end of file
diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java
index 4388a68..949c8d0 100644
--- a/source/src/main/java/io/mycat/mycat2/MycatSession.java
+++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java
@@ -9,6 +9,7 @@
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 
+import io.mycat.mycat2.tasks.AbstractDataNodeMerge;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -53,6 +54,8 @@ public class MycatSession extends AbstractMySQLSession {
     
     public RouteResultsetNode curRouteResultsetNode;
 
+    public AbstractDataNodeMerge merge;
+
 	//所有处理cmd中,用来向前段写数据,或者后端写数据的cmd的
 	public MySQLCommand curSQLCommand;
 
@@ -377,22 +380,22 @@ private String getbackendName(){
 			case ANNOTATION_ROUTE:
 				break;
 			case DB_IN_MULTI_SERVER:
-                RouteResultsetNode[] nodes = this.curRouteResultset.getNodes();
-                String dataNodeName = "";
-                if (nodes != null && nodes.length == 1) {
-                    dataNodeName = nodes[0].getName();
-                } else if (nodes != null && nodes.length > 1 && curRouteResultsetNode != null) {
-                    dataNodeName = curRouteResultsetNode.getName();
-                }
-                DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName);
-                if (dnBean != null) {
-                    backendName = dnBean.getReplica();
-                }
-                if (StringUtils.isEmpty(backendName)) {
-                    backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap()
-                            .get(schema.getDefaultDataNode()).getReplica();
-                }
-				break;
+//                RouteResultsetNode[] nodes = this.curRouteResultset.getNodes();
+//                String dataNodeName = "";
+//                if (nodes != null && nodes.length == 1) {
+//                    dataNodeName = nodes[0].getName();
+//                } else if (nodes != null && nodes.length > 1 && curRouteResultsetNode != null) {
+//                    dataNodeName = curRouteResultsetNode.getName();
+//                }
+//                DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName);
+//                if (dnBean != null) {
+//                    backendName = dnBean.getReplica();
+//                }
+//                if (StringUtils.isEmpty(backendName)) {
+//                    backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap()
+//                            .get(schema.getDefaultDataNode()).getReplica();
+//                }
+				return "repli";
 //			case SQL_PARSE_ROUTE:
 //				break;
 			default:
@@ -425,15 +428,7 @@ private void putbackendMap(MySQLSession mysqlSession){
 	 */
 	public void getBackendByDataNodeName(String dataNodeName,AsynTaskCallBack callback) throws IOException {
 		DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName);
-		String repBeanName = "";
-		if (dnBean != null) {
-			repBeanName = dnBean.getReplica();
-		}
-		if (StringUtils.isEmpty(repBeanName)) {
-			repBeanName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap()
-					.get(schema.getDefaultDataNode()).getReplica();
-			logger.warn("failed to get the replication group for the specified datanode!!! and will set the default data node");
-		}
+		String repBeanName = dnBean.getReplica();
 		getBackendByRepBeanName(repBeanName,callback);
 	}
 
diff --git a/source/src/main/java/io/mycat/mycat2/PackWraper.java b/source/src/main/java/io/mycat/mycat2/PackWraper.java
new file mode 100644
index 0000000..6144d14
--- /dev/null
+++ b/source/src/main/java/io/mycat/mycat2/PackWraper.java
@@ -0,0 +1,24 @@
+package io.mycat.mycat2;
+
+
+/**
+ * Created by zagnix on 2016/7/6.
+ */
+
+import java.nio.ByteBuffer;
+
+/**
+ * 一行数据是从哪个节点来的。
+ * 通过dataNode查找对应的sorter,
+ * 将数据放到对应的datanode的sorter,
+ * 进行排序.
+ */
+public final class PackWraper {
+    public ByteBuffer rowData;
+    public String dataNode;
+
+    public PackWraper(ByteBuffer rowData, String dataNode) {
+        this.rowData = rowData;
+        this.dataNode = dataNode;
+    }
+}
diff --git a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java
index 065d54f..ceec989 100644
--- a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java
+++ b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java
@@ -1,26 +1,33 @@
 package io.mycat.mycat2.cmds.multinode;
 
-import java.io.IOException;
-import java.nio.channels.SelectionKey;
-import java.util.concurrent.locks.ReentrantLock;
-
 import io.mycat.mycat2.MySQLSession;
 import io.mycat.mycat2.MycatSession;
+import io.mycat.mycat2.beans.conf.DNBean;
 import io.mycat.mycat2.cmds.AbstractMultiDNExeCmd;
+import io.mycat.mycat2.cmds.ComInitDB;
+import io.mycat.mycat2.cmds.DirectPassthrouhCmd;
 import io.mycat.mycat2.console.SessionKeyEnum;
+import io.mycat.mycat2.route.RouteResultset;
 import io.mycat.mycat2.route.RouteResultsetNode;
+import io.mycat.mycat2.tasks.HeapDataNodeMergeManager;
+import io.mycat.mycat2.tasks.MyRowStream;
+import io.mycat.mycat2.tasks.SQLQueryResultTask;
 import io.mycat.mysql.packet.ErrorPacket;
-import io.mycat.mysql.packet.MySQLPacket;
 import io.mycat.proxy.ProxyBuffer;
+import io.mycat.proxy.ProxyRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.channels.SelectionKey;
 
 /**
- * 
  * DbInMultiServerCmd
  * 

* DbInMultiServer模式下的多节点执行Command类 *

* Creation Time: 2018-01-20 - * + * * @author zhangsiwei * @since 2.0 */ @@ -28,101 +35,117 @@ public class DbInMultiServerCmd extends AbstractMultiDNExeCmd { public static final DbInMultiServerCmd INSTANCE = new DbInMultiServerCmd(); - private int backendWritedCount = 0, executeCount = 0; - - private boolean isFirst = true; - - private final ReentrantLock lock = new ReentrantLock(); - - @Override - public boolean procssSQL(MycatSession session) throws IOException { - RouteResultsetNode[] nodes = session.curRouteResultset.getNodes(); - for (int i = 0; i < nodes.length; i++) { + private static final Logger logger = LoggerFactory.getLogger(ComInitDB.class); + private void broadcast(MycatSession mycatSession, RouteResultsetNode[] nodes) throws IOException { + ProxyBuffer curBuffer = mycatSession.proxyBuffer; + int readIndex = curBuffer.writeIndex; + int readMark = curBuffer.readMark; + int size = nodes.length; + for (int i = 0; i < size; i++) { RouteResultsetNode node = nodes[i]; - session.curRouteResultsetNode = node; /* * 获取后端连接可能涉及到异步处理,这里需要先取消前端读写事件 */ - session.clearReadWriteOpts(); - - session.getBackend((mysqlsession, sender, success, result) -> { - - ProxyBuffer curBuffer = session.proxyBuffer; - // 切换 buffer 读写状态 - curBuffer.flip(); - +// curBuffer.readIndex = readIndex; +// curBuffer.readMark = readMark; + mycatSession.getBackendByDataNodeName(node.getName(), (mysqlsession, sender, success, result) -> { if (success) { - // 没有读取,直接透传时,需要指定 透传的数据 截止位置 - curBuffer.readIndex = curBuffer.writeIndex; - // 改变 owner,对端Session获取,并且感兴趣写事件 - session.giveupOwner(SelectionKey.OP_WRITE); - if (isFirst) { - isFirst = false; - } else { - // 因为第一次把报文透传到mysql后端后,readmark指针会来到readIndex的位置, - // 所以第一次之后再要透传同样的指令,就要先把readmark重置回原来的位置。 - curBuffer.readMark = - curBuffer.readIndex - session.curMSQLPackgInf.pkgLength; - } try { - mysqlsession.writeToChannel(); - } catch (IOException e) { - session.closeBackendAndResponseError(mysqlsession, success, + MyRowStream stream = new MyRowStream(mysqlsession); + stream.setAbstractDataNodeMerge(mycatSession.merge); + stream.fetchStream(node.getStatement()); + } catch (Exception e) { + mycatSession.closeBackendAndResponseError(mysqlsession, success, ((ErrorPacket) result)); } } else { - session.closeBackendAndResponseError(mysqlsession, success, + mycatSession.closeBackendAndResponseError(mysqlsession, success, ((ErrorPacket) result)); } }); } - return false; } @Override - public boolean onBackendResponse(MySQLSession session) throws IOException { - // 首先进行一次报文的读取操作 - if (!session.readFromChannel()) { - return false; - } - // 进行报文处理的流程化 - boolean nextReadFlag = false; - do { - // 进行报文的处理流程 - nextReadFlag = session.getMycatSession().commandHandler.procss(session); - } while (nextReadFlag); - - // 获取当前是否结束标识 - Boolean check = (Boolean) session.getSessionAttrMap() - .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); - - MycatSession mycatSession = session.getMycatSession(); - ProxyBuffer buffer = session.getProxyBuffer(); - - if (++executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { - // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, - // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 - if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { - // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) - // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 - session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; + public boolean procssSQL(MycatSession mycatSession) throws IOException { + DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn1"); + logger.warn("dev 版本暂时还没有实现路由,默认路由到dn1,dn2"); + DNBean dnBean2 = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn2"); + mycatSession.curRouteResultset = new RouteResultset("", (byte) 0); + mycatSession.curRouteResultset.setNodes(new RouteResultsetNode[]{ + new RouteResultsetNode(dnBean.getName(), (byte) 1, mycatSession.sqlContext.getRealSQL(0)), + new RouteResultsetNode(dnBean2.getName(), (byte) 1, mycatSession.sqlContext.getRealSQL(0)) + }); + RouteResultsetNode[] nodes = mycatSession.curRouteResultset.getNodes(); + if (true) { + if (null != mycatSession.curRouteResultset) { + mycatSession.merge = new HeapDataNodeMergeManager(mycatSession.curRouteResultset, mycatSession); + if (nodes != null && nodes.length > 0) { + broadcast(mycatSession, nodes); + return false; + } } - return false; - } + } else { + //lobal table optimization + - // 检查到当前已经完成,执行添加操作 - if (null != check && check) { - // 当知道操作完成后,前段的注册感兴趣事件为读取 - mycatSession.takeOwner(SelectionKey.OP_READ); - } - // 未完成执行继续读取操作 - else { - // 直接透传报文 - mycatSession.takeOwner(SelectionKey.OP_WRITE); } - buffer.flip(); - executeCount = 0; - mycatSession.writeToChannel(); + return DirectPassthrouhCmd.INSTANCE.procssSQL(mycatSession); + } + + @Override + public boolean onBackendResponse(MySQLSession session) throws IOException { +// session.proxyBuffer. +// + +// task.onSocketRead(session); + // 首先进行一次报文的读取操作 +// if (!session.readFromChannel()) { +// return false; +// } +// // 进行报文处理的流程化 +// boolean nextReadFlag = false; +// do { +// // 进行报文的处理流程 +// CommandHandler commandHandler = session.getMycatSession().commandHandler; +// if (commandHandler == C) +// nextReadFlag = commandHandler.procss(session); +// } while (nextReadFlag); +// +// // 获取当前是否结束标识 +// Boolean check = (Boolean) session.getSessionAttrMap() +// .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); +// +// MycatSession mycatSession = session.getMycatSession(); +// ProxyBuffer buffer = session.getProxyBuffer(); +// +//// if (++executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { +//// // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, +//// // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 +//// if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { +//// // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) +//// // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 +//// session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; +//// } +//// return false; +//// } +// +// // 检查到当前已经完成,执行添加操作 +// if (null != check && check) { +// // 当知道操作完成后,前段的注册感兴趣事件为读取 +// mycatSession.takeOwner(SelectionKey.OP_READ); +// } +// // 未完成执行继续读取操作 +// else { +// // 直接透传报文 +// mycatSession.takeOwner(SelectionKey.OP_WRITE); +// } +// buffer.flip(); +// // executeCount = 0; +// mycatSession.writeToChannel(); +// return true; + + return false; } @@ -154,32 +177,26 @@ public boolean onFrontWriteFinished(MycatSession session) throws IOException { @Override public boolean onBackendWriteFinished(MySQLSession session) throws IOException { - - ++backendWritedCount; + SQLQueryResultTask task = new SQLQueryResultTask(session.getMycatSession().merge); + session.setCurNIOHandler(task); session.proxyBuffer.flip(); session.change2ReadOpts(); - if (backendWritedCount >= session.getMycatSession().curRouteResultset.getNodes().length) { - isFirst = true; - backendWritedCount = 0; - // 绝大部分情况下,前端把数据写完后端发送出去后,就等待后端返回数据了, - // 向后端写入完成数据后,则从后端读取数据 - // session.proxyBuffer.flip(); - // 由于单工模式,在向后端写入完成后,需要从后端进行数据读取 -// session.change2ReadOpts(); - } return false; } @Override public void clearFrontResouces(MycatSession session, boolean sessionCLosed) { - // TODO Auto-generated method stub - super.clearFrontResouces(session, sessionCLosed); + if (sessionCLosed) { + session.recycleAllocedBuffer(session.getProxyBuffer()); + session.unbindAllBackend(); + } } @Override - public void clearBackendResouces(MySQLSession session, boolean sessionCLosed) { - // TODO Auto-generated method stub - super.clearBackendResouces(session, sessionCLosed); + public void clearBackendResouces(MySQLSession mysqlSession, boolean sessionCLosed) { + if (sessionCLosed) { + mysqlSession.recycleAllocedBuffer(mysqlSession.getProxyBuffer()); + } } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java index e13d39d..1e27c5d 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/AbstractCmdStrategy.java @@ -117,9 +117,9 @@ public boolean matchMySqlCommand(MycatSession session) { command = DirectPassthrouhCmd.INSTANCE; } - if (!delegateRoute(session)) { - return false; - } +// if (!delegateRoute(session)) { +// return false; +// } /** * 设置原始处理命令 diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java index fb154b3..b5f96e6 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java @@ -2,6 +2,7 @@ import java.io.IOException; +import io.mycat.mycat2.cmds.multinode.DbInMultiServerCmd; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +77,7 @@ protected void initMySqlCmdHandler() { MYSQLCOMMANDMAP.put(BufferSQLContext.DROP_SQL, DirectPassthrouhCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.COMMIT_SQL, SqlComCommitCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.ROLLBACK_SQL, SqlComRollBackCmd.INSTANCE); - MYSQLCOMMANDMAP.put(BufferSQLContext.SELECT_SQL, DirectPassthrouhCmd.INSTANCE); + MYSQLCOMMANDMAP.put(BufferSQLContext.SELECT_SQL, DbInMultiServerCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.BEGIN_SQL, SqlComBeginCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.START_SQL, SqlComStartCmd.INSTANCE); MYSQLCOMMANDMAP.put(BufferSQLContext.USE_SQL, SqlComStartCmd.INSTANCE); diff --git a/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java b/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java index 45d0f1c..efde6a0 100644 --- a/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java +++ b/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java @@ -4,6 +4,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collector; +import java.util.stream.Collectors; import io.mycat.mysql.packet.EOFPacket; import io.mycat.mysql.packet.RowDataPacket; @@ -18,7 +20,7 @@ public class TableMeta { public String table; public String alias; - public List> fieldValues; + public List> fieldValues; public int fieldCount; public ResultSetMeta headerResultSetMeta; private byte packetId; @@ -44,7 +46,7 @@ public void init(ResultSetMeta resultSetMeta) { } - public void addFieldValues(List row) { + public synchronized void addFieldValues(List row) { fieldValues.add(row); } @@ -69,8 +71,8 @@ public void writeRowData(ProxyBuffer buffer) { for(byte[] value : fieldValue) { dataPacket.add(value); } - - if(dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize <= buffer.getBuffer().remaining()) { + int size = dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize; + if(size <= buffer.getBuffer().remaining()) { dataPacket.packetId = packetId ++; dataPacket.write(buffer); this.writeRowDataIndex ++; @@ -183,5 +185,18 @@ public List> getFieldValues() { public void setFieldValues(List> fieldValues) { this.fieldValues = fieldValues; } - + + @Override + public String toString() { + return "TableMeta{" + + "table='" + table + '\'' + + ", alias='" + alias + '\'' + + ", fieldValues=" + fieldValues.stream().flatMap(i->i.stream()).map(i->new String(i)) + .collect(Collectors.joining(","))+ + ", fieldCount=" + fieldCount + + ", headerResultSetMeta=" + headerResultSetMeta + + ", packetId=" + packetId + + ", writeRowDataIndex=" + writeRowDataIndex + + '}'; + } } diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java index 47d1ed7..a026253 100644 --- a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java +++ b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,20 +16,22 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.mycat2.route; +import io.mycat.mycat2.MySQLSession; +import io.mycat.util.FormatUtil; + import java.io.Serializable; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; - -import io.mycat.util.FormatUtil; +import java.util.concurrent.atomic.AtomicInteger; /** * @author mycat @@ -39,7 +41,7 @@ public final class RouteResultset implements Serializable { private final byte sqlType; private RouteResultsetNode[] nodes; // 路由结果节点 private Set subTables; - + private int limitStart; private boolean cacheAble; @@ -60,17 +62,17 @@ public final class RouteResultset implements Serializable { //是否自动提交,此属性主要用于记录ServerConnection上的autocommit状态 private boolean autocommit = true; - private boolean isLoadData=false; + private boolean isLoadData = false; //是否可以在从库运行,此属性主要供RouteResultsetNode获取 private Boolean canRunInReadDB; // 强制走 master,可以通过 RouteResultset的属性canRunInReadDB=false // 传给 RouteResultsetNode 来实现,但是 强制走 slave需要增加一个属性来实现: - private Boolean runOnSlave = null; // 默认null表示不施加影响 + private Boolean runOnSlave = null; // 默认null表示不施加影响 - //key=dataNode value=slot - private Map dataNodeSlotMap=new HashMap<>(); + //key=dataNode value=slot + private Map dataNodeSlotMap = new HashMap<>(); private boolean selectForUpdate; @@ -81,10 +83,20 @@ public boolean isSelectForUpdate() { public void setSelectForUpdate(boolean selectForUpdate) { this.selectForUpdate = selectForUpdate; } - - - private List tables; + + private List tables; + + public AtomicInteger count = new AtomicInteger(0); + + public void countDown(MySQLSession session, Runnable runnable){ + int c = count.decrementAndGet(); + if (c ==0){ + System.out.println("count=>"+c); + runnable.run(); + count.set(nodes.length); + } + } public List getTables() { return tables; } @@ -102,28 +114,26 @@ public void setDataNodeSlotMap(Map dataNodeSlotMap) { } public Boolean getRunOnSlave() { - return runOnSlave; - } + return runOnSlave; + } - public void setRunOnSlave(Boolean runOnSlave) { - this.runOnSlave = runOnSlave; - } + public void setRunOnSlave(Boolean runOnSlave) { + this.runOnSlave = runOnSlave; + } // TODO 待支持存储过程 by zhangsiwei /* * private Procedure procedure; - * + * * public Procedure getProcedure() { return procedure; } - * + * * public void setProcedure(Procedure procedure) { this.procedure = procedure; } */ - public boolean isLoadData() - { + public boolean isLoadData() { return isLoadData; } - public void setLoadData(boolean isLoadData) - { + public void setLoadData(boolean isLoadData) { this.isLoadData = isLoadData; } @@ -143,7 +153,7 @@ public void setGlobalTable(boolean globalTableFlag) { this.globalTableFlag = globalTableFlag; } - public RouteResultset(String stmt, byte sqlType) { + public /**/RouteResultset(String stmt, byte sqlType) { this.statement = stmt; this.limitSize = -1; this.sqlType = sqlType; @@ -159,12 +169,9 @@ public void resetNodes() { public void copyLimitToNodes() { - if(nodes!=null) - { - for (RouteResultsetNode node : nodes) - { - if(node.getLimitSize()==-1&&node.getLimitStart()==0) - { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { + if (node.getLimitSize() == -1 && node.getLimitStart() == 0) { node.setLimitStart(limitStart); node.setLimitSize(limitSize); } @@ -228,11 +235,10 @@ public RouteResultsetNode[] getNodes() { } public void setNodes(RouteResultsetNode[] nodes) { - if(nodes!=null) - { - int nodeSize=nodes.length; - for (RouteResultsetNode node : nodes) - { + count.set(nodes.length); + if (nodes != null) { + int nodeSize = nodes.length; + for (RouteResultsetNode node : nodes) { node.setTotalNodeSize(nodeSize); } @@ -261,10 +267,8 @@ public boolean isCallStatement() { public void setCallStatement(boolean callStatement) { this.callStatement = callStatement; - if(nodes!=null) - { - for (RouteResultsetNode node : nodes) - { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { node.setCallStatement(callStatement); } @@ -287,22 +291,22 @@ public void setCanRunInReadDB(Boolean canRunInReadDB) { this.canRunInReadDB = canRunInReadDB; } - public void setSubTables(Set subTables) { - this.subTables = subTables; - } + public void setSubTables(Set subTables) { + this.subTables = subTables; + } + + public Set getSubTables() { + return this.subTables; + } - public Set getSubTables() { - return this.subTables; - } - - public boolean isDistTable(){ - if(this.getSubTables()!=null && !this.getSubTables().isEmpty() ){ - return true; - } - return false; - } + public boolean isDistTable() { + if (this.getSubTables() != null && !this.getSubTables().isEmpty()) { + return true; + } + return false; + } - @Override + @Override public String toString() { StringBuilder s = new StringBuilder(); s.append(statement).append(", route={"); diff --git a/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java b/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java new file mode 100644 index 0000000..976d842 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java @@ -0,0 +1,85 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.ColumnMeta; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.PackWraper; +import io.mycat.mycat2.route.RouteResultset; +import org.apache.log4j.Logger; + +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; + +public abstract class AbstractDataNodeMerge implements Runnable { + + private static Logger LOGGER = Logger.getLogger(AbstractDataNodeMerge.class); + /** + * row 有多少col + */ + protected int fieldCount; + + /** + * 本次select的路由缓存集 + */ + protected final RouteResultset rrs; + /** + * 夸分片处理handler + */ + protected MultiNodeQueryHandler multiQueryHandler = null; + + /** + * 是否执行流式结果集输出 + */ + + protected boolean isStreamOutputResult = false; + + /** + * rowData缓存队列 + */ + protected LinkedBlockingQueue packs = new LinkedBlockingQueue<>(); + + MycatSession mycatSession; + /** + * 分片结束包 + */ + protected static final PackWraper END_FLAG_PACK = new PackWraper(null,null); + public AbstractDataNodeMerge(RouteResultset rrs, MycatSession mycatSession) { + this.rrs = rrs; + this.mycatSession = mycatSession;; + this.executor = Executors.newSingleThreadExecutor(); + } + public void onEOF() { + packs.add(END_FLAG_PACK); + executor.submit(this); + } + ExecutorService executor; + + public boolean onNewRecords(String repName, ByteBuffer rowData) { + /* + 读取的数据范围是 readIndex --- writeIndex 之间的数据. + */ + System.out.println("onNewRecords"+repName + rowData); + if(packs.offer(new PackWraper(rowData,repName))){ + executor.submit(this); + return true; + }else{ + return false; + } + } + + public abstract void onRowMetaData(Map columToIndx, int fieldCount); + + public RouteResultset getRrs() { + return this.rrs; + } + + /** + * 做最后的结果集输出 + * @return (最多i*(offset+size)行数据) + */ + public abstract Iterator getResults(byte[] eof); + public abstract void clear(); +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java b/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java new file mode 100644 index 0000000..04846c9 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java @@ -0,0 +1,435 @@ +//package io.mycat.mycat2.tasks; +// +//import io.mycat.mycat2.route.RouteResultset; +//import io.mycat.mysql.packet.RowDataPacket; +//import io.mycat.util.StringUtil; +// +//import java.io.IOException; +//import java.nio.ByteBuffer; +//import java.util.*; +//import java.util.concurrent.atomic.AtomicBoolean; +// +//public class DataNodeMergeManager { +// public DataNodeMergeManager(MultiNodeQueryHandler handler, RouteResultset rrs, AtomicBoolean isMiddleResultDone) { +// super(handler,rrs); +// this.isMiddleResultDone = isMiddleResultDone; +// this.myCatMemory = MycatServer.getInstance().getMyCatMemory(); +// this.memoryManager = myCatMemory.getResultMergeMemoryManager(); +// this.conf = myCatMemory.getConf(); +// this.limitStart = rrs.getLimitStart(); +// this.limitSize = rrs.getLimitSize(); +// } +// +// +// public void onRowMetaData(Map columToIndx, int fieldCount) throws IOException { +// +// if (LOGGER.isDebugEnabled()) { +// LOGGER.debug("field metadata keys:" + columToIndx != null ? columToIndx.keySet() : "null"); +// LOGGER.debug("field metadata values:" + columToIndx != null ? columToIndx.values() : "null"); +// } +// +// OrderCol[] orderCols = null; +// StructType schema = null; +// UnsafeExternalRowSorter.PrefixComputer prefixComputer = null; +// PrefixComparator prefixComparator = null; +// +// +// DataNodeMemoryManager dataNodeMemoryManager = null; +// UnsafeExternalRowSorter sorter = null; +// +// int[] groupColumnIndexs = null; +// this.fieldCount = fieldCount; +// +// if (rrs.getGroupByCols() != null) { +// groupColumnIndexs = toColumnIndex(rrs.getGroupByCols(), columToIndx); +// if (LOGGER.isDebugEnabled()) { +// for (int i = 0; i mergCols = new LinkedList(); +// Map mergeColsMap = rrs.getMergeCols(); +// +// if (mergeColsMap != null) { +// +// if (LOGGER.isDebugEnabled() && rrs.getMergeCols() != null) { +// LOGGER.debug("isHasAggrColumn:" + rrs.getMergeCols().toString()); +// } +// for (Map.Entry mergEntry : mergeColsMap +// .entrySet()) { +// String colName = mergEntry.getKey().toUpperCase(); +// int type = mergEntry.getValue(); +// if (MergeCol.MERGE_AVG == type) { +// ColMeta sumColMeta = columToIndx.get(colName + "SUM"); +// ColMeta countColMeta = columToIndx.get(colName +// + "COUNT"); +// if (sumColMeta != null && countColMeta != null) { +// ColMeta colMeta = new ColMeta(sumColMeta.colIndex, +// countColMeta.colIndex, +// sumColMeta.getColType()); +// mergCols.add(new MergeCol(colMeta, mergEntry +// .getValue())); +// } +// } else { +// ColMeta colMeta = columToIndx.get(colName); +// mergCols.add(new MergeCol(colMeta, mergEntry.getValue())); +// } +// } +// } +// +// // add no alias merg column +// for (Map.Entry fieldEntry : columToIndx.entrySet()) { +// String colName = fieldEntry.getKey(); +// int result = MergeCol.tryParseAggCol(colName); +// if (result != MergeCol.MERGE_UNSUPPORT +// && result != MergeCol.MERGE_NOMERGE) { +// mergCols.add(new MergeCol(fieldEntry.getValue(), result)); +// } +// } +// +// /** +// * Group操作 +// */ +// MergeCol[] mergColsArrays = mergCols.toArray(new MergeCol[mergCols.size()]); +// unsafeRowGrouper = new UnsafeRowGrouper(columToIndx,rrs.getGroupByCols(), +// mergColsArrays, +// rrs.getHavingCols()); +// +// if(mergColsArrays!=null&&mergColsArrays.length>0){ +// mergeColsIndex = new int[mergColsArrays.length]; +// for(int i = 0;i orders = rrs.getOrderByCols(); +// orderCols = new OrderCol[orders.size()]; +// int i = 0; +// for (Map.Entry entry : orders.entrySet()) { +// String key = StringUtil.removeBackquote(entry.getKey() +// .toUpperCase()); +// ColMeta colMeta = columToIndx.get(key); +// if (colMeta == null) { +// throw new IllegalArgumentException( +// "all columns in order by clause should be in the selected column list!" +// + entry.getKey()); +// } +// orderCols[i++] = new OrderCol(colMeta, entry.getValue()); +// } +// +// /** +// * 构造全局排序器 +// */ +// schema = new StructType(columToIndx,fieldCount); +// schema.setOrderCols(orderCols); +// +// prefixComputer = new RowPrefixComputer(schema); +// +//// if(orderCols.length>0 +//// && orderCols[0].getOrderType() +//// == OrderCol.COL_ORDER_TYPE_ASC){ +//// prefixComparator = PrefixComparators.LONG; +//// }else { +//// prefixComparator = PrefixComparators.LONG_DESC; +//// } +// +// prefixComparator = getPrefixComparator(orderCols); +// +// dataNodeMemoryManager = +// new DataNodeMemoryManager(memoryManager,Thread.currentThread().getId()); +// +// /** +// * 默认排序,只是将数据连续存储到内存中即可。 +// */ +// globalSorter = new UnsafeExternalRowSorter( +// dataNodeMemoryManager, +// myCatMemory, +// schema, +// prefixComparator, prefixComputer, +// conf.getSizeAsBytes("mycat.buffer.pageSize","32k"), +// false/**是否使用基数排序*/, +// true/**排序*/); +// } +// +// +// if(conf.getBoolean("mycat.stream.output.result",false) +// && globalSorter == null +// && unsafeRowGrouper == null){ +// setStreamOutputResult(true); +// }else { +// +// /** +// * 1.schema +// */ +// +// schema = new StructType(columToIndx,fieldCount); +// schema.setOrderCols(orderCols); +// +// /** +// * 2 .PrefixComputer +// */ +// prefixComputer = new RowPrefixComputer(schema); +// +// /** +// * 3 .PrefixComparator 默认是ASC,可以选择DESC +// */ +// +// prefixComparator = PrefixComparators.LONG; +// +// +// dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, +// Thread.currentThread().getId()); +// +// globalMergeResult = new UnsafeExternalRowSorter( +// dataNodeMemoryManager, +// myCatMemory, +// schema, +// prefixComparator, +// prefixComputer, +// conf.getSizeAsBytes("mycat.buffer.pageSize", "32k"), +// false,/**是否使用基数排序*/ +// false/**不排序*/); +// } +// } +// +// private PrefixComparator getPrefixComparator(OrderCol[] orderCols) { +// PrefixComparator prefixComparator = null; +// OrderCol firstOrderCol = orderCols[0]; +// int orderType = firstOrderCol.getOrderType(); +// int colType = firstOrderCol.colMeta.colType; +// +// switch (colType) { +// case ColMeta.COL_TYPE_INT: +// case ColMeta.COL_TYPE_LONG: +// case ColMeta.COL_TYPE_INT24: +// case ColMeta.COL_TYPE_SHORT: +// case ColMeta.COL_TYPE_LONGLONG: +// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); +// break; +// case ColMeta.COL_TYPE_FLOAT: +// case ColMeta.COL_TYPE_DOUBLE: +// case ColMeta.COL_TYPE_DECIMAL: +// case ColMeta.COL_TYPE_NEWDECIMAL: +// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.DOUBLE : PrefixComparators.DOUBLE_DESC); +// break; +// case ColMeta.COL_TYPE_DATE: +// case ColMeta.COL_TYPE_TIMSTAMP: +// case ColMeta.COL_TYPE_TIME: +// case ColMeta.COL_TYPE_YEAR: +// case ColMeta.COL_TYPE_DATETIME: +// case ColMeta.COL_TYPE_NEWDATE: +// case ColMeta.COL_TYPE_BIT: +// case ColMeta.COL_TYPE_VAR_STRING: +// case ColMeta.COL_TYPE_STRING: +// case ColMeta.COL_TYPE_ENUM: +// case ColMeta.COL_TYPE_SET: +// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.BINARY : PrefixComparators.BINARY_DESC); +// break; +// default: +// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); +// break; +// } +// +// return prefixComparator; +// } +// +// @Override +// public List getResults(byte[] eof) +// +// @Override +// public void run() { +// +// if (!running.compareAndSet(false, true)) { +// return; +// } +// +// boolean nulpack = false; +// +// try { +// for (; ; ) { +// final PackWraper pack = packs.poll(); +// +// if (pack == null) { +// nulpack = true; +// break; +// } +// if (pack == END_FLAG_PACK) { +// +// hasEndFlag = true; +// +// if(packs.peek()!=null){ +// packs.add(pack); +// continue; +// } +// +// /** +// * 最后一个节点datenode发送了row eof packet说明了整个 +// * 分片数据全部接收完成,进而将结果集全部发给你Mycat 客户端 +// */ +// final int warningCount = 0; +// final EOFPacket eofp = new EOFPacket(); +// final ByteBuffer eof = ByteBuffer.allocate(9); +// BufferUtil.writeUB3(eof, eofp.calcPacketSize()); +// eof.put(eofp.packetId); +// eof.put(eofp.fieldCount); +// BufferUtil.writeUB2(eof,warningCount); +// BufferUtil.writeUB2(eof,eofp.status); +// final ServerConnection source = multiQueryHandler.getSession().getSource(); +// final byte[] array = eof.array(); +// +// +// Iterator iters = null; +// +// +// if (unsafeRowGrouper != null){ +// /** +// * group by里面需要排序情况 +// */ +// if (globalSorter != null){ +// iters = unsafeRowGrouper.getResult(globalSorter); +// }else { +// iters = unsafeRowGrouper.getResult(globalMergeResult); +// } +// +// }else if(globalSorter != null){ +// +// iters = globalSorter.sort(); +// +// }else if (!isStreamOutputResult){ +// +// iters = globalMergeResult.sort(); +// +// } +// +// if(iters != null){ +// multiQueryHandler.outputMergeResult(source,array,iters,isMiddleResultDone); +// } +// break; +// } +// +// unsafeRow = new UnsafeRow(fieldCount); +// bufferHolder = new BufferHolder(unsafeRow,0); +// unsafeRowWriter = new UnsafeRowWriter(bufferHolder,fieldCount); +// bufferHolder.reset(); +// +// /** +// *构造一行row,将对应的col填充. +// */ +// MySQLMessage mm = new MySQLMessage(pack.rowData); +// mm.readUB3(); +// mm.read(); +// +// int nullnum = 0; +// for (int i = 0; i < fieldCount; i++) { +// byte[] colValue = mm.readBytesWithLength(); +// if (colValue != null) +// unsafeRowWriter.write(i,colValue); +// else +// { +// if(mergeColsIndex!=null&&mergeColsIndex.length>0){ +// +// if(Arrays.binarySearch(mergeColsIndex, i)<0){ +// nullnum++; +// } +// } +// unsafeRow.setNullAt(i); +// } +// } +// +// if(mergeColsIndex!=null&&mergeColsIndex.length>0){ +// if(nullnum == (fieldCount - mergeColsIndex.length)){ +// if(!hasEndFlag){ +// packs.add(pack); +// continue; +// } +// } +// } +// +// unsafeRow.setTotalSize(bufferHolder.totalSize()); +// +// if(unsafeRowGrouper != null){ +// unsafeRowGrouper.addRow(unsafeRow); +// }else if (globalSorter != null){ +// globalSorter.insertRow(unsafeRow); +// }else { +// globalMergeResult.insertRow(unsafeRow); +// } +// +// unsafeRow = null; +// bufferHolder = null; +// unsafeRowWriter = null; +// } +// +// } catch (final Exception e) { +// e.printStackTrace(); +// multiQueryHandler.handleDataProcessException(e); +// } finally { +// running.set(false); +// if (nulpack && !packs.isEmpty()) { +// this.run(); +// } +// } +// } +// +// /** +// * 释放DataNodeMergeManager所申请的资源 +// */ +// public void clear() { +// +// unsafeRows.clear(); +// +// synchronized (this) +// { +// if (unsafeRowGrouper != null) { +// unsafeRowGrouper.free(); +// unsafeRowGrouper = null; +// } +// } +// +// if(globalSorter != null){ +// globalSorter.cleanupResources(); +// globalSorter = null; +// } +// +// if (globalMergeResult != null){ +// globalMergeResult.cleanupResources(); +// globalMergeResult = null; +// } +// } +//} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java b/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java new file mode 100644 index 0000000..e9e5522 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java @@ -0,0 +1,123 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.ColumnMeta; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.PackWraper; +import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.hbt.TableMeta; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.proxy.ProxyBuffer; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +public class HeapDataNodeMergeManager extends AbstractDataNodeMerge { + TableMeta tableMeta; + /** + * 标志业务线程是否启动了? + */ + protected final AtomicBoolean running = new AtomicBoolean(false); + + public HeapDataNodeMergeManager(RouteResultset rrs, MycatSession mycatSession) { + super(rrs, mycatSession); + } + + @Override + public void onRowMetaData(Map columToIndx, int fieldCount) { + if (tableMeta == null) { + tableMeta = new TableMeta(); + tableMeta.init(fieldCount); + Set> entries = columToIndx.entrySet(); + for (Map.Entry entry : entries) { + tableMeta.headerResultSetMeta.addFiled(entry.getKey(), entry.getValue().colType); + } + } + + } + + @Override + public Iterator getResults(byte[] eof) { + return null; + } + + + @Override + public void clear() { + this.tableMeta = null; + } + + @Override + public void run() { + // sort-or-group: no need for us to using multi-threads, because + //both sorter and group are synchronized!! + // @author Uncle-pan + // @since 2016-03-23 + if (!running.compareAndSet(false, true)) { + return; + } + + // eof handler has been placed to "if (pack == END_FLAG_PACK){}" in for-statement + // @author Uncle-pan + // @since 2016-03-23 + boolean nulpack = false; + try { + // loop-on-packs + for (; ; ) { + final PackWraper pack = packs.take(); + System.out.println(packs.size()); + // async: handling row pack queue, this business thread should exit when no pack + // @author Uncle-pan + // @since 2016-03-23 + if (pack == null) { + nulpack = true; + break; + } + if (pack == END_FLAG_PACK) { + System.out.println("END_FLAG_PACK"); + ProxyBuffer proxyBuffer = mycatSession.proxyBuffer; + proxyBuffer.reset(); + tableMeta.writeBegin(proxyBuffer); + tableMeta.writeRowData(proxyBuffer); + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + mycatSession.takeBufferOwnerOnly(); + if (!tableMeta.isWriteFinish()) { + mycatSession.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_HBT_TABLE_META.getKey(), tableMeta); + } + try { + System.out.println("开始发送"); + mycatSession.writeToChannel(); + clear(); + } catch (Exception e) { + e.printStackTrace(); + } + return; + } else { + ArrayList v = new ArrayList<>(tableMeta.fieldCount); + ProxyBuffer proxyBuffer = new ProxyBuffer(pack.rowData); + for (int i = 0; i < tableMeta.fieldCount; i++) { + byte[] value = proxyBuffer.readLenencBytes(); + v.add(value); + } + tableMeta.addFieldValues(v); + } + } + } catch (final Exception e) { + e.printStackTrace(); + // multiQueryHandler.handleDataProcessException(e); + } finally { + running.set(false); + } + // try to check packs, it's possible that adding a pack after polling a null pack + //and before this time pointer!! + // @author Uncle-pan + // @since 2016-03-23 + if (nulpack && !packs.isEmpty()) { + this.run(); + } + } +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/MultiNodeQueryHandler.java b/source/src/main/java/io/mycat/mycat2/tasks/MultiNodeQueryHandler.java new file mode 100644 index 0000000..0b6fc4a --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/MultiNodeQueryHandler.java @@ -0,0 +1,4 @@ +package io.mycat.mycat2.tasks; + +public class MultiNodeQueryHandler { +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java b/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java new file mode 100644 index 0000000..b90a169 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java @@ -0,0 +1,185 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.ColumnMeta; +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.beans.MySQLPackageInf; +import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.hbt.MyFunction; +import io.mycat.mycat2.hbt.ResultSetMeta; +import io.mycat.mycat2.hbt.SqlMeta; +import io.mycat.mycat2.net.DefaultMycatSessionHandler; +import io.mycat.mysql.packet.ErrorPacket; +import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.mysql.packet.QueryPacket; +import io.mycat.proxy.ProxyBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; + +public class MyRowStream extends BackendIOTaskWithResultSet { + private static Logger logger = LoggerFactory.getLogger(MyRowStream.class); + AbstractDataNodeMerge merge; + Map columToIndx = new HashMap<>(); + private ResultSetMeta resultSetMeta; + int fieldCount = 0; + int getFieldCount = 0; + public MyRowStream(MySQLSession optSession) { + this.useNewBuffer = true; + setSession(optSession, true, false); + this.session = optSession; + } + + public AbstractDataNodeMerge getAbstractDataNodeMerge() { + return merge; + } + + public void setAbstractDataNodeMerge(AbstractDataNodeMerge abstractDataNodeMerge) { + this.merge = abstractDataNodeMerge; + } + + // public void fetchStream() { +// /*设置为忙*/ +// session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); +// ProxyBuffer proxyBuf = session.proxyBuffer; +// session.setCurNIOHandler(this); +// proxyBuf.flip(); +// proxyBuf.readIndex = proxyBuf.writeIndex; +// try { +// this.session.writeToChannel(); +// } catch (IOException e) { +// logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); +// e.printStackTrace(); +// } +// } +public void fetchStream(String sql) { + /*设置为忙*/ + session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); + ProxyBuffer proxyBuf = session.proxyBuffer; + proxyBuf.reset(); + QueryPacket queryPacket = new QueryPacket(); + queryPacket.packetId = 0; + queryPacket.sql = sql; + queryPacket.write(proxyBuf); + session.setCurNIOHandler(this); + proxyBuf.flip(); + proxyBuf.readIndex = proxyBuf.writeIndex; + try { + this.session.writeToChannel(); + } catch (IOException e) { + logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); + e.printStackTrace(); + } +} + public void fetchStream(MycatSession mycatSession) { + // 切换 buffer 读写状态 + ProxyBuffer proxyBuf = mycatSession.proxyBuffer; + proxyBuf.flip(); + // 改变 owner,对端Session获取,并且感兴趣写事件 + mycatSession.clearReadWriteOpts(); + /*设置为忙*/ + this.session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); + this.session.setCurNIOHandler(this); + proxyBuf.flip(); + proxyBuf.readIndex = proxyBuf.writeIndex; + try { + this.session.writeToChannel(); + } catch (IOException e) { + logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); + e.printStackTrace(); + } + } + @Override + void onRsColCount(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMSQLPackgInf = session.curMSQLPackgInf; + int fieldCount = (int) proxyBuffer.getLenencInt(curMSQLPackgInf.startPos + MySQLPacket.packetHeaderSize); + + this.resultSetMeta = new ResultSetMeta(fieldCount); + this.fieldCount = fieldCount; + } + + @Override + void onRsColDef(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; + + int tmpReadIndex = proxyBuffer.readIndex; + int rowDataIndex = curMQLPackgInf.startPos+MySQLPacket.packetHeaderSize; + proxyBuffer.readIndex = rowDataIndex; + proxyBuffer.readLenencString(); //catalog + proxyBuffer.readLenencString(); //schema + proxyBuffer.readLenencString(); //table + proxyBuffer.readLenencString(); //orgTable + String name = proxyBuffer.readLenencString(); //name + proxyBuffer.readLenencString(); + proxyBuffer.readBytes(7); // 1(filler) + 2(charsetNumber) + 4 (length) + int fieldType = proxyBuffer.readByte() & 0xff; + this.resultSetMeta.addFiled(name, fieldType); + proxyBuffer.readIndex = tmpReadIndex; + if(resultSetMeta.getFiledCount() == resultSetMeta.getRealFieldNameListSize()) { + + } + + columToIndx.put(name, new ColumnMeta(getFieldCount++, fieldType)); + if (fieldCount == getFieldCount) { + merge.onRowMetaData(columToIndx, fieldCount); + } + } + + @Override + void onRsRow(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; + int rowDataIndex = curMQLPackgInf.startPos + MySQLPacket.packetHeaderSize; + int fieldCount = resultSetMeta.getFiledCount(); + int tmpReadIndex = proxyBuffer.readIndex; + proxyBuffer.readIndex = rowDataIndex; + ByteBuffer byteBuffer = ByteBuffer.allocate(proxyBuffer.getBuffer().capacity()); + if(merge!=null){ + for (int i = proxyBuffer.readIndex; i < proxyBuffer.writeIndex; i++) { + byteBuffer.put(proxyBuffer.getByte(i)); + }} + merge.onNewRecords(session.getDatabase(), byteBuffer); + proxyBuffer.readIndex = tmpReadIndex; + + } + + @Override + void onRsFinish(MySQLSession session, boolean success, String msg) throws IOException { + if (callBack != null) { + if (success == false) { + this.errPkg = new ErrorPacket(); + MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; + session.proxyBuffer.readIndex = curMQLPackgInf.startPos; + this.errPkg.read(session.proxyBuffer); + session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); + revertPreBuffer(); + callBack.finished(session, this, success, this.errPkg); + } else { + session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); + revertPreBuffer(); + callBack.finished(session, null, success, null); + } + } + if(merge!=null){ + System.out.println("=>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.hahahahah"); + merge.rrs.countDown(session,()->{ + System.out.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); + merge.onEOF(); + }); + } + logger.debug("session[{}] load result finish", session); + //@todo check + session.unbindMycatSession(); + } +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java b/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java new file mode 100644 index 0000000..e30bc60 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java @@ -0,0 +1,115 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.beans.MySQLPackageInf; +import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.proxy.ProxyBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; + +public abstract class RawSQLQueryResultTaskWrapper extends BackendIOTaskWithResultSet { + private static Logger logger = LoggerFactory.getLogger(RawSQLQueryResultTaskWrapper.class); + + @Override + void onRsColCount(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMSQLPackgInf = session.curMSQLPackgInf; + onRsColCount(session, (int) proxyBuffer.getLenencInt(curMSQLPackgInf.startPos + MySQLPacket.packetHeaderSize)); + } + + /** + * *
+     * Bytes                      Name
+     * -----                      ----
+     * n (Length Coded String)    catalog
+     * n (Length Coded String)    db
+     * n (Length Coded String)    table
+     * n (Length Coded String)    org_table
+     * n (Length Coded String)    name
+     * n (Length Coded String)    org_name
+     * 1                          (filler)
+     * 2                          charsetNumber
+     * 4                          length
+     * 1                          type
+     * 2                          flags
+     * 1                          decimals
+     * 2                          (filler), always 0x00
+     * n (Length Coded Binary)    default
+     *
+     * @see http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Field_Packet
+     * 
+ * + * @param session + */ + @Override + void onRsColDef(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; + + int tmpReadIndex = proxyBuffer.readIndex; + int rowDataIndex = curMQLPackgInf.startPos + MySQLPacket.packetHeaderSize; + proxyBuffer.readIndex = rowDataIndex; + + String catalog = proxyBuffer.readLenencString().intern(); //catalog + String schema = proxyBuffer.readLenencString().intern(); //schema + String table = proxyBuffer.readLenencString().intern(); //table + String orgTable = proxyBuffer.readLenencString().intern(); //orgTable + String name = proxyBuffer.readLenencString().intern(); //name + String orgName = proxyBuffer.readLenencString().intern(); + + //proxyBuffer.readBytes(7); // 1(filler) + 2(charsetNumber) + 4 (length) + byte filler = proxyBuffer.readByte(); + int charsetNumber = proxyBuffer.readByte() << 1 & proxyBuffer.readByte(); + int length = (int) proxyBuffer.readFixInt(4); + + int fieldType = proxyBuffer.readByte() & 0xff; + onRsColDef(session, + catalog, + schema, + table, + orgTable, + name, + orgName, + filler, + charsetNumber, + length, + fieldType + ); + + proxyBuffer.readIndex = tmpReadIndex; + } + + @Override + void onRsRow(MySQLSession session) { + ProxyBuffer proxyBuffer = session.proxyBuffer; + MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; + int rowDataIndex = curMQLPackgInf.startPos + MySQLPacket.packetHeaderSize; + int tmpReadIndex = proxyBuffer.readIndex; + proxyBuffer.readIndex = rowDataIndex; + + onRsRow(session,proxyBuffer); + + + proxyBuffer.readIndex = tmpReadIndex; + + } + + abstract void onRsColCount(MySQLSession session, int fieldCount); + + abstract void onRsColDef(MySQLSession session, + String catalog, + String schema, + String table, + String orgTable, + String name, + String org_name, + byte filler, + int charsetNumber, + int length, + int fieldType); + + abstract void onRsRow(MySQLSession session, ProxyBuffer proxyBuffer); +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java b/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java index 2866a72..3ef0372 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java @@ -59,6 +59,19 @@ public void fetchStream() { e.printStackTrace(); } } + public void fetchStream(ProxyBuffer proxyBuf) { + /*设置为忙*/ + session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); + session.setCurNIOHandler(this); + proxyBuf.flip(); + proxyBuf.readIndex = proxyBuf.writeIndex; + try { + this.session.writeToChannel(); + } catch (IOException e) { + logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); + e.printStackTrace(); + } + } @Override void onRsColCount(MySQLSession session) { ProxyBuffer proxyBuffer = session.proxyBuffer; diff --git a/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java b/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java new file mode 100644 index 0000000..8fbdbf1 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java @@ -0,0 +1,80 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.ColumnMeta; +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.hbt.TableMeta; +import io.mycat.mycat2.net.DefaultMycatSessionHandler; +import io.mycat.proxy.ProxyBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class SQLQueryResultTask extends RawSQLQueryResultTaskWrapper { + private static Logger logger = LoggerFactory.getLogger(SQLQueryResultTask.class); + AbstractDataNodeMerge merge; + int fieldCount = 0; + int getFieldCount = 0; + Map columToIndx = new HashMap<>(); + + public SQLQueryResultTask(AbstractDataNodeMerge merge) { + this.merge = merge; + } + + @Override + void onRsColCount(MySQLSession session, int fieldCount) { + this.fieldCount = fieldCount; + } + + @Override + void onRsColDef(MySQLSession session, String catalog, String schema, String table, String orgTable, String name, String originName, byte filler, int charsetNumber, int length, int fieldType) { + columToIndx.put(name, new ColumnMeta(getFieldCount++, fieldType)); + if (fieldCount == getFieldCount) { + merge.onRowMetaData(columToIndx, fieldCount); + } + } + + @Override + void onRsRow(MySQLSession session, ProxyBuffer proxyBuffer) { +// ArrayList row = new ArrayList(3); +// for(int i = 0; i < 3; i++) { +// byte[] x = proxyBuffer.readLenencBytes(); +// ByteBuffer byteBuffer =ByteBuffer.allocate(proxyBuffer.getBuffer().position());; +// byteBuffer.put(x); +// //byteBuffer.flip(); +// merge.onNewRecords("",byteBuffer); +// row.add(x); +// } + + ByteBuffer byteBuffer = ByteBuffer.allocate(proxyBuffer.getBuffer().capacity()); + for (int i = proxyBuffer.readIndex; i < proxyBuffer.writeIndex; i++) { + byteBuffer.put(proxyBuffer.getByte(i)); + } + merge.onNewRecords("", byteBuffer); + + + +// for(int i = 0; i < 3; i++) { +// System.out.println(row); +// byteBuffer.put(row.get(i)); +// +// } + +} + + + @Override + void onRsFinish(MySQLSession session, boolean success, String msg) throws IOException { + merge.onEOF(); + System.out.println(msg); + session.setCurNIOHandler(DefaultMycatSessionHandler.INSTANCE); + } +} diff --git a/source/src/main/java/io/mycat/proxy/AbstractSession.java b/source/src/main/java/io/mycat/proxy/AbstractSession.java index 27a2b17..c96677a 100644 --- a/source/src/main/java/io/mycat/proxy/AbstractSession.java +++ b/source/src/main/java/io/mycat/proxy/AbstractSession.java @@ -136,6 +136,7 @@ public boolean readFromChannel() throws IOException { // logger.debug(" readed {} total bytes curChannel is {}", readed,this); if (readed == -1) { logger.warn("Read EOF ,socket closed "); + System.out.println("==============================>"+this); throw new ClosedChannelException(); } else if (readed == 0) { logger.warn("readed zero bytes ,Maybe a bug ,please fix it !!!!"); From d43736d7dc1e2a0a9b23ae2ca4f1e41c7153c21a Mon Sep 17 00:00:00 2001 From: jwc Date: Tue, 17 Apr 2018 16:28:33 +0800 Subject: [PATCH 21/22] to get mutil node data and marge --- .../main/java/io/mycat/mycat2/ColumnMeta.java | 88 -- .../java/io/mycat/mycat2/MycatSession.java | 1082 ++++++++--------- .../io/mycat/mycat2/beans/ColumnMeta.java | 89 ++ .../cmds/interceptor/SQLAnnotationChain.java | 23 +- .../cmds/multinode/DbInMultiServerCmd.java | 214 ++-- .../strategy/DBINMultiServerCmdStrategy.java | 72 +- .../mycat/mycat2/console/SessionKeyEnum.java | 6 +- .../io/mycat/mycat2/hbt/ResultSetMeta.java | 14 +- .../java/io/mycat/mycat2/hbt/TableMeta.java | 351 +++--- .../io/mycat/mycat2/route/RouteResultset.java | 11 +- .../route/impl/AbstractRouteStrategy.java | 23 +- .../mycat2/sqlannotations/CacheResult.java | 14 +- .../mycat2/tasks/AbstractDataNodeMerge.java | 85 -- .../mycat/mycat2/tasks/DataNodeManager.java | 171 +++ .../mycat2/tasks/DataNodeMergeManager.java | 435 ------- .../tasks/HeapDataNodeMergeManager.java | 81 +- .../io/mycat/mycat2/tasks/MyRowStream.java | 185 --- .../tasks/RawSQLQueryResultTaskWrapper.java | 93 +- .../java/io/mycat/mycat2/tasks/RowStream.java | 17 +- .../mycat2/tasks/SQLQueryResultTask.java | 80 -- .../io/mycat/mycat2/tasks/SQLQueryStream.java | 87 ++ .../java/io/mycat/proxy/AbstractSession.java | 12 +- .../io/mycat/proxy/MycatReactorThread.java | 27 +- 23 files changed, 1357 insertions(+), 1903 deletions(-) delete mode 100644 source/src/main/java/io/mycat/mycat2/ColumnMeta.java create mode 100644 source/src/main/java/io/mycat/mycat2/beans/ColumnMeta.java delete mode 100644 source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/DataNodeManager.java delete mode 100644 source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java delete mode 100644 source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java delete mode 100644 source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java create mode 100644 source/src/main/java/io/mycat/mycat2/tasks/SQLQueryStream.java diff --git a/source/src/main/java/io/mycat/mycat2/ColumnMeta.java b/source/src/main/java/io/mycat/mycat2/ColumnMeta.java deleted file mode 100644 index 4a5638d..0000000 --- a/source/src/main/java/io/mycat/mycat2/ColumnMeta.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.mycat2; - -import java.io.Serializable; - -public class ColumnMeta implements Serializable{ - public static final int COL_TYPE_DECIMAL = 0; - public static final int COL_TYPE_INT = 1; - public static final int COL_TYPE_SHORT = 2; - public static final int COL_TYPE_LONG = 3; - public static final int COL_TYPE_FLOAT = 4; - public static final int COL_TYPE_DOUBLE = 5; - public static final int COL_TYPE_NULL = 6; - public static final int COL_TYPE_TIMSTAMP = 7; - public static final int COL_TYPE_LONGLONG = 8; - public static final int COL_TYPE_INT24 = 9; - public static final int COL_TYPE_DATE = 0x0a; - public static final int COL_TYPE_DATETIME=0X0C; - public static final int COL_TYPE_TIME = 0x0b; - public static final int COL_TYPE_YEAR = 0x0d; - public static final int COL_TYPE_NEWDATE = 0x0e; - public static final int COL_TYPE_VACHAR = 0x0f; - public static final int COL_TYPE_BIT = 0x10; - public static final int COL_TYPE_NEWDECIMAL = 0xf6; - public static final int COL_TYPE_ENUM = 0xf7; - public static final int COL_TYPE_SET = 0xf8; - public static final int COL_TYPE_TINY_BLOB = 0xf9; - public static final int COL_TYPE_TINY_TYPE_MEDIUM_BLOB = 0xfa; - public static final int COL_TYPE_TINY_TYPE_LONG_BLOB = 0xfb; - public static final int COL_TYPE_BLOB = 0xfc; - public static final int COL_TYPE_VAR_STRING = 0xfd; - public static final int COL_TYPE_STRING = 0xfe; - public static final int COL_TYPE_GEOMETRY = 0xff; - public int colIndex; - public final int colType; - - public int decimals; - - public int avgSumIndex; - public int avgCountIndex; - - public ColumnMeta(int colIndex, int colType) { - super(); - this.colIndex = colIndex; - this.colType = colType; - } - public ColumnMeta(int avgSumIndex, int avgCountIndex, int colType) { - super(); - this.avgSumIndex = avgSumIndex; - this.avgCountIndex=avgCountIndex; - this.colType = colType; - } - public int getColIndex() { - return colIndex; - } - - public int getColType() { - return colType; - } - - @Override - public String toString() { - return "ColMeta [colIndex=" + colIndex + ", colType=" + colType + "]"; - } - -} \ No newline at end of file diff --git a/source/src/main/java/io/mycat/mycat2/MycatSession.java b/source/src/main/java/io/mycat/mycat2/MycatSession.java index 949c8d0..45a32ac 100644 --- a/source/src/main/java/io/mycat/mycat2/MycatSession.java +++ b/source/src/main/java/io/mycat/mycat2/MycatSession.java @@ -1,19 +1,5 @@ package io.mycat.mycat2; -import java.io.IOException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; -import java.security.InvalidParameterException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; - -import io.mycat.mycat2.tasks.AbstractDataNodeMerge; -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.beans.MySQLMetaBean; import io.mycat.mycat2.beans.MySQLRepBean; import io.mycat.mycat2.beans.conf.DNBean; @@ -23,10 +9,10 @@ import io.mycat.mycat2.cmds.strategy.DBInOneServerCmdStrategy; import io.mycat.mycat2.console.SessionKeyEnum; import io.mycat.mycat2.route.RouteResultset; -import io.mycat.mycat2.route.RouteResultsetNode; import io.mycat.mycat2.sqlparser.BufferSQLContext; import io.mycat.mycat2.sqlparser.TokenHash; import io.mycat.mycat2.tasks.AsynTaskCallBack; +import io.mycat.mycat2.tasks.DataNodeManager; import io.mycat.mysql.AutoCommit; import io.mycat.mysql.Capabilities; import io.mycat.mysql.packet.ErrorPacket; @@ -37,39 +23,29 @@ import io.mycat.util.ErrorCode; import io.mycat.util.ParseUtil; import io.mycat.util.RandomUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.security.InvalidParameterException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; /** * 前端连接会话 * * @author wuzhihui - * */ public class MycatSession extends AbstractMySQLSession { - private static Logger logger = LoggerFactory.getLogger(MycatSession.class); - - public MySQLSession curBackend; - - public RouteResultset curRouteResultset; - - public RouteResultsetNode curRouteResultsetNode; - - public AbstractDataNodeMerge merge; - - //所有处理cmd中,用来向前段写数据,或者后端写数据的cmd的 - public MySQLCommand curSQLCommand; - - public BufferSQLContext sqlContext = new BufferSQLContext(); - - /** - * Mycat Schema - */ - public SchemaBean schema; - - private ConcurrentHashMap> backendMap = new ConcurrentHashMap<>(); + private static Logger logger = LoggerFactory.getLogger(MycatSession.class); + private static List masterSqlList = new ArrayList<>(); - private static List masterSqlList = new ArrayList<>(); - static { masterSqlList.add(BufferSQLContext.INSERT_SQL); masterSqlList.add(BufferSQLContext.UPDATE_SQL); @@ -86,548 +62,564 @@ public class MycatSession extends AbstractMySQLSession { masterSqlList.add(BufferSQLContext.BEGIN_SQL); masterSqlList.add(BufferSQLContext.START_SQL); // TODO 需要完善sql 解析器。 将 start transaction - // 分离出来。 + // 分离出来。 masterSqlList.add(BufferSQLContext.SET_AUTOCOMMIT_SQL); } - - /** - * 获取sql 类型 - * @return - */ - public boolean matchMySqlCommand(){ - switch(schema.schemaType){ - case DB_IN_ONE_SERVER: - return DBInOneServerCmdStrategy.INSTANCE.matchMySqlCommand(this); - case DB_IN_MULTI_SERVER: + + public MySQLSession curBackend; + public DataNodeManager merge; + //所有处理cmd中,用来向前段写数据,或者后端写数据的cmd的 + public MySQLCommand curSQLCommand; + public BufferSQLContext sqlContext = new BufferSQLContext(); + /** + * Mycat Schema + */ + public SchemaBean schema; + private ConcurrentHashMap> backendMap = new ConcurrentHashMap<>(); + private ExecutorService executor; + private RouteResultset curRouteResultset; + + public MycatSession(BufferPool bufPool, Selector nioSelector, SocketChannel frontChannel) throws IOException { + super(bufPool, nioSelector, frontChannel); + + } + + /** + * 获取sql 类型 + * + * @return + */ + public boolean matchMySqlCommand() { + switch (schema.schemaType) { + case DB_IN_ONE_SERVER: + return DBInOneServerCmdStrategy.INSTANCE.matchMySqlCommand(this); + case DB_IN_MULTI_SERVER: return DBINMultiServerCmdStrategy.INSTANCE.matchMySqlCommand(this); - case ANNOTATION_ROUTE: - AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); + case ANNOTATION_ROUTE: + AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); // case SQL_PARSE_ROUTE: // AnnotateRouteCmdStrategy.INSTANCE.matchMySqlCommand(this); - default: - throw new InvalidParameterException("schema type is invalid "); - } - } - - public MycatSession(BufferPool bufPool, Selector nioSelector, SocketChannel frontChannel) throws IOException { - super(bufPool, nioSelector, frontChannel); - - } - - protected int getServerCapabilities() { - int flag = 0; - flag |= Capabilities.CLIENT_LONG_PASSWORD; - flag |= Capabilities.CLIENT_FOUND_ROWS; - flag |= Capabilities.CLIENT_LONG_FLAG; - flag |= Capabilities.CLIENT_CONNECT_WITH_DB; - // flag |= Capabilities.CLIENT_NO_SCHEMA; - // boolean usingCompress = MycatServer.getInstance().getConfig() - // .getSystem().getUseCompression() == 1; - // if (usingCompress) { - // flag |= Capabilities.CLIENT_COMPRESS; - // } - flag |= Capabilities.CLIENT_ODBC; - flag |= Capabilities.CLIENT_LOCAL_FILES; - flag |= Capabilities.CLIENT_IGNORE_SPACE; - flag |= Capabilities.CLIENT_PROTOCOL_41; - flag |= Capabilities.CLIENT_INTERACTIVE; - // flag |= Capabilities.CLIENT_SSL; - flag |= Capabilities.CLIENT_IGNORE_SIGPIPE; - flag |= Capabilities.CLIENT_TRANSACTIONS; - // flag |= ServerDefs.CLIENT_RESERVED; - flag |= Capabilities.CLIENT_SECURE_CONNECTION; - return flag; - } - - /** - * 给客户端(front)发送认证报文 - * - * @throws IOException - */ - public void sendAuthPackge() throws IOException { - // 生成认证数据 - byte[] rand1 = RandomUtil.randomBytes(8); - byte[] rand2 = RandomUtil.randomBytes(12); - - // 保存认证数据 - byte[] seed = new byte[rand1.length + rand2.length]; - System.arraycopy(rand1, 0, seed, 0, rand1.length); - System.arraycopy(rand2, 0, seed, rand1.length, rand2.length); - this.seed = seed; - - // 发送握手数据包 - HandshakePacket hs = new HandshakePacket(); - hs.packetId = 0; - hs.protocolVersion = Version.PROTOCOL_VERSION; - hs.serverVersion = Version.SERVER_VERSION; - hs.threadId = this.getSessionId(); - hs.seed = rand1; - hs.serverCapabilities = getServerCapabilities(); - // hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); - hs.serverStatus = 2; - hs.restOfScrambleBuff = rand2; - hs.write(proxyBuffer); - // 设置frontBuffer 为读取状态 - proxyBuffer.flip(); - proxyBuffer.readIndex = proxyBuffer.writeIndex; - this.writeToChannel(); - } - - public int getBackendConCounts(MySQLMetaBean metaBean) { - return (int)backendMap.values() - .stream() - .flatMap(f->f.stream()) - .filter(f->f.getMySQLMetaBean().equals(metaBean)) - .count(); - } - - /** - * 关闭后端连接,同时向前端返回错误信息 - * @param mysqlsession - * @param normal - * @param hint - */ - public void closeBackendAndResponseError(MySQLSession mysqlsession,boolean normal, ErrorPacket error)throws IOException{ - unbindBeckend(mysqlsession); - mysqlsession.close(normal, error.message); - takeBufferOwnerOnly(); - responseOKOrError(error); - } - - /** - * 关闭后端连接,同时向前端返回错误信息 - * @param session - * @param mysqlsession - * @param normal - * @param errno - * @param error - * @throws IOException - */ - public void closeBackendAndResponseError(MySQLSession mysqlsession,boolean normal,int errno, String error)throws IOException{ - unbindBeckend(mysqlsession); - mysqlsession.close(normal, error); - takeBufferOwnerOnly(); - sendErrorMsg(errno,error); - } - - /** - * 向客户端响应 错误信息 - * @param session - * @throws IOException - */ - public void sendErrorMsg(int errno,String errMsg) throws IOException{ - ErrorPacket errPkg = new ErrorPacket(); - errPkg.packetId = (byte) (proxyBuffer.getByte(curMSQLPackgInf.startPos - + ParseUtil.mysql_packetHeader_length) + 1); - errPkg.errno = errno; - errPkg.message = errMsg; - proxyBuffer.reset(); - responseOKOrError(errPkg); - } - - /** - * 绑定后端MySQL会话 - * - * @param backend - */ - public void bindBackend(MySQLSession backend) { - this.curBackend = backend; - /* - * 这里,不能reset . - * 一个前端连接, 的sessionM安排 中有多个后端连接时, 多个后端连接和前端连接使用的是同一个buffer. - * 这里reset ,会把前端连接的buffer 也给reset的掉. - * 连接池 新创建的连接放入 reactor 时,会进行一次reset ,保证 session 拿到的连接 buffer 状态是正确的. - */ + default: + throw new InvalidParameterException("schema type is invalid "); + } + } + + protected int getServerCapabilities() { + int flag = 0; + flag |= Capabilities.CLIENT_LONG_PASSWORD; + flag |= Capabilities.CLIENT_FOUND_ROWS; + flag |= Capabilities.CLIENT_LONG_FLAG; + flag |= Capabilities.CLIENT_CONNECT_WITH_DB; + // flag |= Capabilities.CLIENT_NO_SCHEMA; + // boolean usingCompress = MycatServer.getInstance().getConfig() + // .getSystem().getUseCompression() == 1; + // if (usingCompress) { + // flag |= Capabilities.CLIENT_COMPRESS; + // } + flag |= Capabilities.CLIENT_ODBC; + flag |= Capabilities.CLIENT_LOCAL_FILES; + flag |= Capabilities.CLIENT_IGNORE_SPACE; + flag |= Capabilities.CLIENT_PROTOCOL_41; + flag |= Capabilities.CLIENT_INTERACTIVE; + // flag |= Capabilities.CLIENT_SSL; + flag |= Capabilities.CLIENT_IGNORE_SIGPIPE; + flag |= Capabilities.CLIENT_TRANSACTIONS; + // flag |= ServerDefs.CLIENT_RESERVED; + flag |= Capabilities.CLIENT_SECURE_CONNECTION; + return flag; + } + + /** + * 给客户端(front)发送认证报文 + * + * @throws IOException + */ + public void sendAuthPackge() throws IOException { + // 生成认证数据 + byte[] rand1 = RandomUtil.randomBytes(8); + byte[] rand2 = RandomUtil.randomBytes(12); + + // 保存认证数据 + byte[] seed = new byte[rand1.length + rand2.length]; + System.arraycopy(rand1, 0, seed, 0, rand1.length); + System.arraycopy(rand2, 0, seed, rand1.length, rand2.length); + this.seed = seed; + + // 发送握手数据包 + HandshakePacket hs = new HandshakePacket(); + hs.packetId = 0; + hs.protocolVersion = Version.PROTOCOL_VERSION; + hs.serverVersion = Version.SERVER_VERSION; + hs.threadId = this.getSessionId(); + hs.seed = rand1; + hs.serverCapabilities = getServerCapabilities(); + // hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); + hs.serverStatus = 2; + hs.restOfScrambleBuff = rand2; + hs.write(proxyBuffer); + // 设置frontBuffer 为读取状态 + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + this.writeToChannel(); + } + + public int getBackendConCounts(MySQLMetaBean metaBean) { + return (int) backendMap.values() + .stream() + .flatMap(f -> f.stream()) + .filter(f -> f.getMySQLMetaBean().equals(metaBean)) + .count(); + } + + /** + * 关闭后端连接,同时向前端返回错误信息 + * + * @param mysqlsession + * @param normal + * @param hint + */ + public void closeBackendAndResponseError(MySQLSession mysqlsession, boolean normal, ErrorPacket error) throws IOException { + unbindBeckend(mysqlsession); + mysqlsession.close(normal, error.message); + takeBufferOwnerOnly(); + responseOKOrError(error); + } + + /** + * 关闭后端连接,同时向前端返回错误信息 + * + * @param session + * @param mysqlsession + * @param normal + * @param errno + * @param error + * @throws IOException + */ + public void closeBackendAndResponseError(MySQLSession mysqlsession, boolean normal, int errno, String error) throws IOException { + unbindBeckend(mysqlsession); + mysqlsession.close(normal, error); + takeBufferOwnerOnly(); + sendErrorMsg(errno, error); + } + + /** + * 向客户端响应 错误信息 + * + * @param session + * @throws IOException + */ + public void sendErrorMsg(int errno, String errMsg) throws IOException { + ErrorPacket errPkg = new ErrorPacket(); + errPkg.packetId = (byte) (proxyBuffer.getByte(curMSQLPackgInf.startPos + + ParseUtil.mysql_packetHeader_length) + 1); + errPkg.errno = errno; + errPkg.message = errMsg; + proxyBuffer.reset(); + responseOKOrError(errPkg); + } + + /** + * 绑定后端MySQL会话 + * + * @param backend + */ + public void bindBackend(MySQLSession backend) { + this.curBackend = backend; + /* + * 这里,不能reset . + * 一个前端连接, 的sessionM安排 中有多个后端连接时, 多个后端连接和前端连接使用的是同一个buffer. + * 这里reset ,会把前端连接的buffer 也给reset的掉. + * 连接池 新创建的连接放入 reactor 时,会进行一次reset ,保证 session 拿到的连接 buffer 状态是正确的. + */ // backend.proxyBuffer.reset(); - putbackendMap(backend); - backend.setMycatSession(this); - backend.useSharedBuffer(this.proxyBuffer); - backend.setCurNIOHandler(this.getCurNIOHandler()); - backend.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); - logger.debug(" {} bind backConnection for {}", - this, - backend.toString()); - } - - /** - * 将所有后端连接归还到ds中 - */ - public void unbindAllBackend() { - final MycatReactorThread reactor = (MycatReactorThread) Thread.currentThread(); - backendMap.forEach((key, value) -> { - if (value != null) { - value.forEach(mySQLSession -> { - /*需要将前端的mycatSession设置为空 不然还会被使用*/ - MycatSession mycatSession = mySQLSession.getMycatSession(); - if(null != mycatSession) { - mycatSession.curBackend = null; - } - mySQLSession.unbindMycatSession(); - reactor.addMySQLSession(mySQLSession.getMySQLMetaBean(), mySQLSession); - }); - } - }); - backendMap.clear(); - } - - public void unbindBeckend(MySQLSession mysqlSession){ - List list = backendMap.get(mysqlSession.getMySQLMetaBean().getRepBean()); - if(list!=null){ - mysqlSession.unbindMycatSession(); - list.remove(mysqlSession); - } - clearBeckend(mysqlSession); - } - - public void clearBeckend(MySQLSession mysqlSession){ - if(curBackend!=null&&curBackend.equals(mysqlSession)){ - curBackend = null; - } - } - /** - * 解除绑定当前 metaBean 所有的后端连接 - * @param mySQLMetaBean - */ - public void unbindBackend(MySQLMetaBean mySQLMetaBean,String reason){ - List list = backendMap.get(mySQLMetaBean.getRepBean()); - - if(list!=null&&!list.isEmpty()){ - list.stream().forEach(f->{ - f.setMycatSession(null); - f.close(true, reason); - }); - } - if(curBackend!=null&&curBackend.getMySQLMetaBean().equals(mySQLMetaBean)){ - curBackend = null; - } - } - - public void takeBufferOwnerOnly(){ - this.curBufOwner = true; - if (this.curBackend != null) { - curBackend.setCurBufOwner(false); - } - } - - /** - * 获取ProxyBuffer控制权,同时设置感兴趣的事件,如SocketRead,Write,只能其一 - * - * @param intestOpts - * @return - */ - public void takeOwner(int intestOpts) { - this.curBufOwner = true; - if (intestOpts == SelectionKey.OP_READ) { - this.change2ReadOpts(); - } else { - this.change2WriteOpts(); - } - if (this.curBackend != null) { - curBackend.setCurBufOwner(false); - curBackend.clearReadWriteOpts(); - } - } - - /** - * 放弃控制权,同时设置对端MySQLSession感兴趣的事件,如SocketRead,Write,只能其一 - * - * @param intestOpts - */ - public void giveupOwner(int intestOpts) { - this.curBufOwner = false; - this.clearReadWriteOpts(); - curBackend.setCurBufOwner(true); - if (intestOpts == SelectionKey.OP_READ) { - curBackend.change2ReadOpts(); - } + putbackendMap(backend); + backend.setMycatSession(this); + backend.useSharedBuffer(this.proxyBuffer); + backend.setCurNIOHandler(this.getCurNIOHandler()); + backend.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); + logger.debug(" {} bind backConnection for {}", + this, + backend.toString()); + } + + /** + * 将所有后端连接归还到ds中 + */ + public void unbindAllBackend() { + final MycatReactorThread reactor = (MycatReactorThread) Thread.currentThread(); + backendMap.forEach((key, value) -> { + if (value != null) { + value.forEach(mySQLSession -> { + /*需要将前端的mycatSession设置为空 不然还会被使用*/ + MycatSession mycatSession = mySQLSession.getMycatSession(); + if (null != mycatSession) { + mycatSession.curBackend = null; + } + mySQLSession.unbindMycatSession(); + reactor.addMySQLSession(mySQLSession.getMySQLMetaBean(), mySQLSession); + }); + } + }); + backendMap.clear(); + } + + public void unbindBeckend(MySQLSession mysqlSession) { + List list = backendMap.get(mysqlSession.getMySQLMetaBean().getRepBean()); + if (list != null) { + mysqlSession.unbindMycatSession(); + list.remove(mysqlSession); + } + clearBeckend(mysqlSession); + } + + public void clearBeckend(MySQLSession mysqlSession) { + if (curBackend != null && curBackend.equals(mysqlSession)) { + curBackend = null; + } + } + + /** + * 解除绑定当前 metaBean 所有的后端连接 + * + * @param mySQLMetaBean + */ + public void unbindBackend(MySQLMetaBean mySQLMetaBean, String reason) { + List list = backendMap.get(mySQLMetaBean.getRepBean()); + + if (list != null && !list.isEmpty()) { + list.stream().forEach(f -> { + f.setMycatSession(null); + f.close(true, reason); + }); + } + if (curBackend != null && curBackend.getMySQLMetaBean().equals(mySQLMetaBean)) { + curBackend = null; + } + } + + public void takeBufferOwnerOnly() { + this.curBufOwner = true; + if (this.curBackend != null) { + curBackend.setCurBufOwner(false); + } + } + + /** + * 获取ProxyBuffer控制权,同时设置感兴趣的事件,如SocketRead,Write,只能其一 + * + * @param intestOpts + * @return + */ + public void takeOwner(int intestOpts) { + this.curBufOwner = true; + if (intestOpts == SelectionKey.OP_READ) { + this.change2ReadOpts(); + } else { + this.change2WriteOpts(); + } + if (this.curBackend != null) { + curBackend.setCurBufOwner(false); + curBackend.clearReadWriteOpts(); + } + } + + /** + * 放弃控制权,同时设置对端MySQLSession感兴趣的事件,如SocketRead,Write,只能其一 + * + * @param intestOpts + */ + public void giveupOwner(int intestOpts) { + this.curBufOwner = false; + this.clearReadWriteOpts(); + curBackend.setCurBufOwner(true); + if (intestOpts == SelectionKey.OP_READ) { + curBackend.change2ReadOpts(); + } // else { // curBackend.change2WriteOpts(); // } - } - - /** - * 向前端发送数据报文,需要先确定为Write状态并确保写入位置的正确(frontBuffer.writeState) - * - * @param rawPkg - * @throws IOException - */ - public void answerFront(byte[] rawPkg) throws IOException { - proxyBuffer.writeBytes(rawPkg); - proxyBuffer.flip(); - proxyBuffer.readIndex = proxyBuffer.writeIndex; - writeToChannel(); - } - - public void close(boolean normal, String hint) { - super.close(normal, hint); - //TODO 清理前后端资源 - this.unbindAllBackend(); - } - - @Override - protected void doTakeReadOwner() { - this.takeOwner(SelectionKey.OP_READ); - } - - - private String getbackendName(){ - String backendName = null; - switch (schema.getSchemaType()) { - case DB_IN_ONE_SERVER: + } + + /** + * 向前端发送数据报文,需要先确定为Write状态并确保写入位置的正确(frontBuffer.writeState) + * + * @param rawPkg + * @throws IOException + */ + public void answerFront(byte[] rawPkg) throws IOException { + proxyBuffer.writeBytes(rawPkg); + proxyBuffer.flip(); + proxyBuffer.readIndex = proxyBuffer.writeIndex; + writeToChannel(); + } + + public void close(boolean normal, String hint) { + super.close(normal, hint); + //TODO 清理前后端资源 + this.unbindAllBackend(); + } + + @Override + protected void doTakeReadOwner() { + this.takeOwner(SelectionKey.OP_READ); + } + + + private String getbackendName() { + String backendName = null; + switch (schema.getSchemaType()) { + case DB_IN_ONE_SERVER: backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() .get(schema.getDefaultDataNode()).getReplica(); - break; - case ANNOTATION_ROUTE: - break; - case DB_IN_MULTI_SERVER: -// RouteResultsetNode[] nodes = this.curRouteResultset.getNodes(); -// String dataNodeName = ""; -// if (nodes != null && nodes.length == 1) { -// dataNodeName = nodes[0].getName(); -// } else if (nodes != null && nodes.length > 1 && curRouteResultsetNode != null) { -// dataNodeName = curRouteResultsetNode.getName(); -// } -// DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); -// if (dnBean != null) { -// backendName = dnBean.getReplica(); -// } -// if (StringUtils.isEmpty(backendName)) { -// backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() -// .get(schema.getDefaultDataNode()).getReplica(); -// } - return "repli"; -// case SQL_PARSE_ROUTE: -// break; - default: - break; - } - if (backendName == null){ - throw new InvalidParameterException("the backendName must not be null"); - } - return backendName; - } - - /** - * 将后端连接放入到后端连接缓存中 - * @param mysqlSession - */ - private void putbackendMap(MySQLSession mysqlSession){ - - List list = backendMap.get(mysqlSession.getMySQLMetaBean().getRepBean()); - if (list == null){ - list = new ArrayList<>(); - backendMap.putIfAbsent(mysqlSession.getMySQLMetaBean().getRepBean(), list); - } - logger.debug("add backend connection in mycatSession . {}",mysqlSession); - list.add(mysqlSession); - } - /** - * 根据datanode名称获取后端会话连接 - * - * @return - */ - public void getBackendByDataNodeName(String dataNodeName,AsynTaskCallBack callback) throws IOException { - DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); - String repBeanName = dnBean.getReplica(); - getBackendByRepBeanName(repBeanName,callback); - } - - /** - * 当前操作的后端会话连接 - * - * @return - */ - public void getBackend(AsynTaskCallBack callback) throws IOException { - getBackendByRepBeanName(getbackendName(),callback); - } - - /** - *根据复制组名称获取后端会话连接 - * @param repBeanName 复制组名称 - * @param callback cjw - * @throws IOException - */ - public void getBackendByRepBeanName(String repBeanName,AsynTaskCallBack callback) throws IOException { - MycatReactorThread reactorThread = (MycatReactorThread) Thread.currentThread(); - - final boolean runOnSlave = canRunOnSlave(); - - MySQLRepBean repBean = getMySQLRepBean(repBeanName); - - /** - * 本次根据读写分离策略要使用的metaBean - */ - MySQLMetaBean targetMetaBean = repBean.getBalanceMetaBean(runOnSlave); - - if(targetMetaBean==null){ - String errmsg = " the metaBean is not found,please check datasource.yml!!! [balance] and [type] propertie or see debug log or check heartbeat task!!"; - if(logger.isDebugEnabled()){ - logger.error(errmsg); - } - ErrorPacket error = new ErrorPacket(); + break; + case ANNOTATION_ROUTE: + break; + case DB_IN_MULTI_SERVER: + //在 DB_IN_MULTI_SERVER 模式中,如果不指定datanode以及Replica名字取得backendName,则使用默认的 + backendName = ProxyRuntime.INSTANCE.getConfig().getMycatDataNodeMap() + .get(schema.getDefaultDataNode()).getReplica(); + default: + break; + } + if (backendName == null) { + throw new InvalidParameterException("the backendName must not be null"); + } + return backendName; + } + + /** + * 将后端连接放入到后端连接缓存中 + * + * @param mysqlSession + */ + private void putbackendMap(MySQLSession mysqlSession) { + List list = backendMap.get(mysqlSession.getMySQLMetaBean().getRepBean()); + if (list == null) { + list = new ArrayList<>(); + backendMap.putIfAbsent(mysqlSession.getMySQLMetaBean().getRepBean(), list); + } + logger.debug("add backend connection in mycatSession . {}", mysqlSession); + list.add(mysqlSession); + } + + /** + * 根据datanode名称获取后端会话连接 + * + * @return + */ + public void getBackendByDataNodeName(String dataNodeName, AsynTaskCallBack callback) throws IOException { + DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean(dataNodeName); + String repBeanName = dnBean.getReplica(); + getBackendByRepBeanName(repBeanName, callback); + } + + /** + * 当前操作的后端会话连接 + * + * @return + */ + public void getBackend(AsynTaskCallBack callback) throws IOException { + getBackendByRepBeanName(getbackendName(), callback); + } + + /** + * 根据复制组名称获取后端会话连接 + * + * @param repBeanName 复制组名称 + * @param callback cjw + * @throws IOException + */ + public void getBackendByRepBeanName(String repBeanName, AsynTaskCallBack callback) throws IOException { + MycatReactorThread reactorThread = (MycatReactorThread) Thread.currentThread(); + + final boolean runOnSlave = canRunOnSlave(); + + MySQLRepBean repBean = getMySQLRepBean(repBeanName); + + /** + * 本次根据读写分离策略要使用的metaBean + */ + MySQLMetaBean targetMetaBean = repBean.getBalanceMetaBean(runOnSlave); + + if (targetMetaBean == null) { + String errmsg = " the metaBean is not found,please check datasource.yml!!! [balance] and [type] propertie or see debug log or check heartbeat task!!"; + if (logger.isDebugEnabled()) { + logger.error(errmsg); + } + ErrorPacket error = new ErrorPacket(); error.errno = ErrorCode.ER_BAD_DB_ERROR; error.packetId = 1; error.message = errmsg; - responseOKOrError(error); - return; - } - - /* - * 连接复用优先级 - * 1. 当前正在使用的 backend - * 2. 当前session 缓存的 backend - */ - - //1. 当前正在使用的 backend - // 当前连接如果本次不被使用,会被自动放入 currSessionMap 中 - if (curBackend != null - && canUseforCurrent(curBackend,targetMetaBean,runOnSlave)){ - logger.debug("Using cached backend connections for {}。{}" - ,(runOnSlave ? "read" : "write"), - curBackend); - reactorThread.syncAndExecute(curBackend,callback); - return; - } - - //2. 当前session 缓存的 backend - MySQLSession mysqlSession = getCurrCachedSession(targetMetaBean, runOnSlave,true); - - if(mysqlSession!=null){ - bindBackend(mysqlSession); - reactorThread.syncAndExecute(mysqlSession,callback); - }else{ - //3. 从当前 actor 中获取连接 - reactorThread.getMySQLSession(this,runOnSlave,targetMetaBean,callback); - } - } - - /** - * 判断连接是否可以被 当前操作使用 - * @param backend - * @param targetMetaBean - * @param runOnSlave - * @return - */ - private boolean canUseforCurrent(MySQLSession backend,MySQLMetaBean targetMetaBean,boolean runOnSlave){ - - MySQLMetaBean currMetaBean = backend.getMySQLMetaBean(); - - if(targetMetaBean==null){ - return false; - } - - if(currMetaBean.equals(targetMetaBean)){ - return true; - }else{ - return false; - } - } - + responseOKOrError(error); + return; + } + + /* + * 连接复用优先级 + * 1. 当前正在使用的 backend + * 2. 当前session 缓存的 backend + */ + + //1. 当前正在使用的 backend + // 当前连接如果本次不被使用,会被自动放入 currSessionMap 中 + if (curBackend != null + && canUseforCurrent(curBackend, targetMetaBean, runOnSlave)) { + logger.debug("Using cached backend connections for {}。{}" + , (runOnSlave ? "read" : "write"), + curBackend); + reactorThread.syncAndExecute(curBackend, callback); + return; + } + + //2. 当前session 缓存的 backend + MySQLSession mysqlSession = getCurrCachedSession(targetMetaBean, runOnSlave, true); + + if (mysqlSession != null) { + bindBackend(mysqlSession); + reactorThread.syncAndExecute(mysqlSession, callback); + } else { + //3. 从当前 actor 中获取连接 + reactorThread.getMySQLSession(this, runOnSlave, targetMetaBean, callback); + } + } + + /** + * 判断连接是否可以被 当前操作使用 + * + * @param backend + * @param targetMetaBean + * @param runOnSlave + * @return + */ + private boolean canUseforCurrent(MySQLSession backend, MySQLMetaBean targetMetaBean, boolean runOnSlave) { + + MySQLMetaBean currMetaBean = backend.getMySQLMetaBean(); + + if (targetMetaBean == null) { + return false; + } + + return currMetaBean.equals(targetMetaBean); + } + /** * 获取指定的复制组 + * * @param replicaName * @return */ - private MySQLRepBean getMySQLRepBean(String replicaName){ - MycatConfig conf = ProxyRuntime.INSTANCE.getConfig(); - MySQLRepBean repBean = conf.getMySQLRepBean(replicaName); - if (repBean == null) { - throw new RuntimeException("no such MySQLRepBean " + replicaName); - } - return repBean; - } - + private MySQLRepBean getMySQLRepBean(String replicaName) { + MycatConfig conf = ProxyRuntime.INSTANCE.getConfig(); + MySQLRepBean repBean = conf.getMySQLRepBean(replicaName); + if (repBean == null) { + throw new RuntimeException("no such MySQLRepBean " + replicaName); + } + return repBean; + } + /** * 获取 MySQLMetaBean 的一个空闲连接 + * * @param metaBean * @return */ - public MySQLSession getMySQLSession(MySQLMetaBean metaBean){ - List backendList = backendMap.get(metaBean.getRepBean()); - if (backendList == null || backendList.isEmpty()) { - return null; - } - + public MySQLSession getMySQLSession(MySQLMetaBean metaBean) { + List backendList = backendMap.get(metaBean.getRepBean()); + if (backendList == null || backendList.isEmpty()) { + return null; + } + return backendList.stream().filter(f -> { return metaBean == f.getMySQLMetaBean() && f.isIDLE(); }).findFirst().orElse(null); - } + } /** * 从后端连接中获取满足条件的连接 * 1. 主从节点 * 2. 空闲节点 */ - public MySQLSession getCurrCachedSession(MySQLMetaBean targetMetaBean, boolean runOnSlave,boolean isOnlyIdle) { - MySQLSession result = null; - - MySQLRepBean repBean = targetMetaBean.getRepBean(); - - List backendList = backendMap.get(repBean); - if (backendList == null || backendList.isEmpty()) { - return null; - } - //TODO 暂时不考虑分片情况下,分布式事务的问题。 - result = backendList.stream().filter(f -> { - - if(!targetMetaBean.equals(f.getMySQLMetaBean())){ - return false; - } - - if (isOnlyIdle) { - return f.isIDLE(); + public MySQLSession getCurrCachedSession(MySQLMetaBean targetMetaBean, boolean runOnSlave, boolean isOnlyIdle) { + MySQLSession result = null; + + MySQLRepBean repBean = targetMetaBean.getRepBean(); + + List backendList = backendMap.get(repBean); + if (backendList == null || backendList.isEmpty()) { + return null; + } + //TODO 暂时不考虑分片情况下,分布式事务的问题。 + result = backendList.stream().filter(f -> { + + if (!targetMetaBean.equals(f.getMySQLMetaBean())) { + return false; + } + + if (isOnlyIdle) { + return f.isIDLE(); // Boolean flag = (Boolean) f.getSessionAttrMap().get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); // return (flag == null) ? false : flag; - } - return true; - }) - .findFirst() - .orElse(null); - - if (result != null) { - //取消绑定,不主动绑定.当前方法可能被 reactor 其他session 调用。在这里只取消绑定 - if(curBackend!=null&& - curBackend.equals(result)){ - curBackend = null; - } - backendList.remove(result); - logger.debug("Using SessionMap backend connections for {} {}", - (runOnSlave ? "read" : "write"), - result); - return result; - } - return result; - } - - /* - * 判断后端连接 是否可以走从节点 - * @return - */ - private boolean canRunOnSlave(){ - //静态注解情况下 走读写分离 + } + return true; + }) + .findFirst() + .orElse(null); + + if (result != null) { + //取消绑定,不主动绑定.当前方法可能被 reactor 其他session 调用。在这里只取消绑定 + if (curBackend != null && + curBackend.equals(result)) { + curBackend = null; + } + backendList.remove(result); + logger.debug("Using SessionMap backend connections for {} {}", + (runOnSlave ? "read" : "write"), + result); + return result; + } + return result; + } + + /* + * 判断后端连接 是否可以走从节点 + * @return + */ + private boolean canRunOnSlave() { + //静态注解情况下 走读写分离 if (BufferSQLContext.ANNOTATION_BALANCE == sqlContext.getAnnotationType()) { final long balancevalue = sqlContext.getAnnotationValue(BufferSQLContext.ANNOTATION_BALANCE); - if(TokenHash.MASTER == balancevalue){ - return false; - }else if(TokenHash.SLAVE == balancevalue){ - return true; - }else{ - logger.error("sql balance type is invalid, run on slave [{}]",sqlContext.getRealSQL(0)); - } - return true; - } - - //非事务场景下,走从节点 - if(AutoCommit.ON ==autoCommit){ - if(masterSqlList.contains(sqlContext.getSQLType())){ - return false; - }else{ - //走从节点 - return true; - } - }else{ - return false; - } - } + if (TokenHash.MASTER == balancevalue) { + return false; + } else if (TokenHash.SLAVE == balancevalue) { + return true; + } else { + logger.error("sql balance type is invalid, run on slave [{}]", sqlContext.getRealSQL(0)); + } + return true; + } + + //非事务场景下,走从节点 + if (AutoCommit.ON == autoCommit) { + return !masterSqlList.contains(sqlContext.getSQLType()); + } else { + return false; + } + } + + public ExecutorService getExecutor() { + return executor; + } + + public void setExecutor(ExecutorService executor) { + this.executor = executor; + } + + public RouteResultset getCurRouteResultset() { + return curRouteResultset; + } + + public void setCurRouteResultset(RouteResultset curRouteResultset) { + this.curRouteResultset = curRouteResultset; + } } diff --git a/source/src/main/java/io/mycat/mycat2/beans/ColumnMeta.java b/source/src/main/java/io/mycat/mycat2/beans/ColumnMeta.java new file mode 100644 index 0000000..a1c4415 --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/beans/ColumnMeta.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.mycat2.beans; + +import java.io.Serializable; + +public class ColumnMeta implements Serializable { + public static final int COL_TYPE_DECIMAL = 0; + public static final int COL_TYPE_INT = 1; + public static final int COL_TYPE_SHORT = 2; + public static final int COL_TYPE_LONG = 3; + public static final int COL_TYPE_FLOAT = 4; + public static final int COL_TYPE_DOUBLE = 5; + public static final int COL_TYPE_NULL = 6; + public static final int COL_TYPE_TIMSTAMP = 7; + public static final int COL_TYPE_LONGLONG = 8; + public static final int COL_TYPE_INT24 = 9; + public static final int COL_TYPE_DATE = 0x0a; + public static final int COL_TYPE_DATETIME = 0X0C; + public static final int COL_TYPE_TIME = 0x0b; + public static final int COL_TYPE_YEAR = 0x0d; + public static final int COL_TYPE_NEWDATE = 0x0e; + public static final int COL_TYPE_VACHAR = 0x0f; + public static final int COL_TYPE_BIT = 0x10; + public static final int COL_TYPE_NEWDECIMAL = 0xf6; + public static final int COL_TYPE_ENUM = 0xf7; + public static final int COL_TYPE_SET = 0xf8; + public static final int COL_TYPE_TINY_BLOB = 0xf9; + public static final int COL_TYPE_TINY_TYPE_MEDIUM_BLOB = 0xfa; + public static final int COL_TYPE_TINY_TYPE_LONG_BLOB = 0xfb; + public static final int COL_TYPE_BLOB = 0xfc; + public static final int COL_TYPE_VAR_STRING = 0xfd; + public static final int COL_TYPE_STRING = 0xfe; + public static final int COL_TYPE_GEOMETRY = 0xff; + public final int colType; + public int colIndex; + public int decimals; + + public int avgSumIndex; + public int avgCountIndex; + + public ColumnMeta(int colIndex, int colType) { + super(); + this.colIndex = colIndex; + this.colType = colType; + } + + public ColumnMeta(int avgSumIndex, int avgCountIndex, int colType) { + super(); + this.avgSumIndex = avgSumIndex; + this.avgCountIndex = avgCountIndex; + this.colType = colType; + } + + public int getColIndex() { + return colIndex; + } + + public int getColType() { + return colType; + } + + @Override + public String toString() { + return "ColMeta [colIndex=" + colIndex + ", colType=" + colType + "]"; + } + +} \ No newline at end of file diff --git a/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java b/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java index 9fbd5dd..c8e7f84 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/interceptor/SQLAnnotationChain.java @@ -1,5 +1,11 @@ package io.mycat.mycat2.cmds.interceptor; +import io.mycat.mycat2.MySQLCommand; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.sqlannotations.AnnotationProcessor; +import io.mycat.mycat2.sqlannotations.SQLAnnotation; +import io.mycat.mycat2.sqlparser.BufferSQLContext; + import java.security.InvalidParameterException; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -7,13 +13,6 @@ import java.util.Map; import java.util.stream.Collectors; -import io.mycat.mycat2.MySQLCommand; -import io.mycat.mycat2.MycatSession; -import io.mycat.mycat2.cmds.multinode.DbInMultiServerCmd; -import io.mycat.mycat2.sqlannotations.AnnotationProcessor; -import io.mycat.mycat2.sqlannotations.SQLAnnotation; -import io.mycat.mycat2.sqlparser.BufferSQLContext; - public class SQLAnnotationChain { private MySQLCommand target; @@ -57,11 +56,11 @@ public SQLAnnotationChain processRoute(MycatSession session) { case DB_IN_ONE_SERVER: break; case DB_IN_MULTI_SERVER: - if (session.curRouteResultset != null - && session.curRouteResultset.getNodes().length > 1) { - // DB_IN_MULTI_SERVER 模式下 - this.target = DbInMultiServerCmd.INSTANCE; - } +// if (session.curRouteResultset != null +// && session.curRouteResultset.getNodes().length > 1) { +// // DB_IN_MULTI_SERVER 模式下 +// this.target = DbInMultiServerCmd.INSTANCE; +// } break; case ANNOTATION_ROUTE: break; diff --git a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java index ceec989..4a13682 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java @@ -4,17 +4,18 @@ import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.beans.conf.DNBean; import io.mycat.mycat2.cmds.AbstractMultiDNExeCmd; -import io.mycat.mycat2.cmds.ComInitDB; import io.mycat.mycat2.cmds.DirectPassthrouhCmd; import io.mycat.mycat2.console.SessionKeyEnum; +import io.mycat.mycat2.hbt.TableMeta; import io.mycat.mycat2.route.RouteResultset; import io.mycat.mycat2.route.RouteResultsetNode; +import io.mycat.mycat2.tasks.DataNodeManager; import io.mycat.mycat2.tasks.HeapDataNodeMergeManager; -import io.mycat.mycat2.tasks.MyRowStream; -import io.mycat.mycat2.tasks.SQLQueryResultTask; +import io.mycat.mycat2.tasks.SQLQueryStream; import io.mycat.mysql.packet.ErrorPacket; import io.mycat.proxy.ProxyBuffer; import io.mycat.proxy.ProxyRuntime; +import io.mycat.util.ErrorCode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,174 +30,133 @@ * Creation Time: 2018-01-20 * * @author zhangsiwei + * @author jamie * @since 2.0 */ public class DbInMultiServerCmd extends AbstractMultiDNExeCmd { public static final DbInMultiServerCmd INSTANCE = new DbInMultiServerCmd(); - private static final Logger logger = LoggerFactory.getLogger(ComInitDB.class); - private void broadcast(MycatSession mycatSession, RouteResultsetNode[] nodes) throws IOException { - ProxyBuffer curBuffer = mycatSession.proxyBuffer; - int readIndex = curBuffer.writeIndex; - int readMark = curBuffer.readMark; + private static final DirectPassthrouhCmd inner = DirectPassthrouhCmd.INSTANCE; + + private static final Logger logger = LoggerFactory.getLogger(DbInMultiServerCmd.class); + + private static void broadcast(MycatSession mycatSession, RouteResultsetNode[] nodes) throws IOException { int size = nodes.length; for (int i = 0; i < size; i++) { RouteResultsetNode node = nodes[i]; - /* - * 获取后端连接可能涉及到异步处理,这里需要先取消前端读写事件 - */ -// curBuffer.readIndex = readIndex; -// curBuffer.readMark = readMark; + DataNodeManager manager = mycatSession.merge; mycatSession.getBackendByDataNodeName(node.getName(), (mysqlsession, sender, success, result) -> { - if (success) { - try { - MyRowStream stream = new MyRowStream(mysqlsession); - stream.setAbstractDataNodeMerge(mycatSession.merge); - stream.fetchStream(node.getStatement()); - } catch (Exception e) { - mycatSession.closeBackendAndResponseError(mysqlsession, success, + try { + if (success) { + SQLQueryStream stream = new SQLQueryStream(node.getName(), mysqlsession, manager); + manager.addSQLQueryStream(stream); + stream.fetchSQL(node.getStatement()); + } else { + //这个关闭方法会把所有backend都解除绑定并关闭 + manager.closeMutilBackendAndResponseError(success, ((ErrorPacket) result)); } - } else { - mycatSession.closeBackendAndResponseError(mysqlsession, success, - ((ErrorPacket) result)); + } catch (Exception e) { + String errmsg = "db in multi server cmd Error. " + e.getMessage(); + manager.closeMutilBackendAndResponseError(false, ErrorCode.ERR_MULTI_NODE_FAILED, errmsg); } }); } + } @Override public boolean procssSQL(MycatSession mycatSession) throws IOException { DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn1"); - logger.warn("dev 版本暂时还没有实现路由,默认路由到dn1,dn2"); DNBean dnBean2 = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn2"); - mycatSession.curRouteResultset = new RouteResultset("", (byte) 0); - mycatSession.curRouteResultset.setNodes(new RouteResultsetNode[]{ - new RouteResultsetNode(dnBean.getName(), (byte) 1, mycatSession.sqlContext.getRealSQL(0)), - new RouteResultsetNode(dnBean2.getName(), (byte) 1, mycatSession.sqlContext.getRealSQL(0)) - }); - RouteResultsetNode[] nodes = mycatSession.curRouteResultset.getNodes(); - if (true) { - if (null != mycatSession.curRouteResultset) { - mycatSession.merge = new HeapDataNodeMergeManager(mycatSession.curRouteResultset, mycatSession); - if (nodes != null && nodes.length > 0) { - broadcast(mycatSession, nodes); - return false; - } - } - } else { - //lobal table optimization - - + RouteResultset curRouteResultset = new RouteResultset("select * from travelrecord", (byte) 0); + curRouteResultset.setNodes( + new RouteResultsetNode[]{ + new RouteResultsetNode(dnBean.getName(), (byte) 1, "select * from travelrecord"), + new RouteResultsetNode(dnBean2.getName(), (byte) 1, "select * from travelrecord")}); + mycatSession.setCurRouteResultset(curRouteResultset); + mycatSession.merge = new HeapDataNodeMergeManager(mycatSession.getCurRouteResultset(), mycatSession); + RouteResultsetNode[] nodes = mycatSession.merge.getRouteResultset().getNodes(); + if (nodes != null && nodes.length > 0) { + broadcast(mycatSession, nodes); + return false; } - return DirectPassthrouhCmd.INSTANCE.procssSQL(mycatSession); + return inner.procssSQL(mycatSession); } + @Override - public boolean onBackendResponse(MySQLSession session) throws IOException { -// session.proxyBuffer. -// - -// task.onSocketRead(session); - // 首先进行一次报文的读取操作 -// if (!session.readFromChannel()) { -// return false; -// } -// // 进行报文处理的流程化 -// boolean nextReadFlag = false; -// do { -// // 进行报文的处理流程 -// CommandHandler commandHandler = session.getMycatSession().commandHandler; -// if (commandHandler == C) -// nextReadFlag = commandHandler.procss(session); -// } while (nextReadFlag); -// -// // 获取当前是否结束标识 -// Boolean check = (Boolean) session.getSessionAttrMap() -// .get(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); -// -// MycatSession mycatSession = session.getMycatSession(); -// ProxyBuffer buffer = session.getProxyBuffer(); -// -//// if (++executeCount < session.getMycatSession().curRouteResultset.getNodes().length) { -//// // DbInMultiServer模式下,不考虑show tables等DSL语句的话,只有对全局表的操作才会跨节点,也就是对全局表的DDL,DML语句, -//// // 而对每个节点的全局表操作完后返回的报文都是一样的,因此只需要拿最后一次的报文返回给客户端即可 -//// if (session.curMSQLPackgInf.pkgType == MySQLPacket.OK_PACKET) { -//// // 因为不是最后一个节点的返回报文,所以这里讲readmark设为readIndex,也就是丢弃掉这次报文(仅考虑全局表的DDL, DML返回报文) -//// // TODO show tables类的DSL语句就不适用,这个后续考虑时再优化 -//// session.getProxyBuffer().readMark = session.getProxyBuffer().readIndex; -//// } -//// return false; -//// } -// -// // 检查到当前已经完成,执行添加操作 -// if (null != check && check) { -// // 当知道操作完成后,前段的注册感兴趣事件为读取 -// mycatSession.takeOwner(SelectionKey.OP_READ); -// } -// // 未完成执行继续读取操作 -// else { -// // 直接透传报文 -// mycatSession.takeOwner(SelectionKey.OP_WRITE); -// } -// buffer.flip(); -// // executeCount = 0; -// mycatSession.writeToChannel(); -// return true; - - - return false; + public boolean onFrontWriteFinished(MycatSession session) throws IOException { + if (session.merge.isMultiBackendMoreOne()) { + //@todo 改为迭代器实现 + TableMeta tableMeta = ((HeapDataNodeMergeManager) session.merge).getTableMeta(); + if (session.getSessionAttrMap().containsKey(SessionKeyEnum.SESSION_KEY_MERGE_OVER_FLAG.getKey()) && !tableMeta.isWriteFinish()) { + ProxyBuffer buffer = session.proxyBuffer; + buffer.reset(); + tableMeta.writeRowData(buffer); + buffer.flip(); + buffer.readIndex = buffer.writeIndex; + session.takeOwner(SelectionKey.OP_WRITE); + session.writeToChannel(); + return false; + } else { + session.merge.onfinished(); + session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_MERGE_OVER_FLAG.getKey()); + session.proxyBuffer.flip(); + session.takeOwner(SelectionKey.OP_READ); + return true; + } + } else { + return inner.onFrontWriteFinished(session); + } } @Override - public boolean onBackendClosed(MySQLSession session, boolean normal) throws IOException { - // TODO Auto-generated method stub - return super.onBackendClosed(session, normal); + public void clearFrontResouces(MycatSession session, boolean sessionCLosed) { + if (session.merge != null && session.merge.isMultiBackendMoreOne()) { + session.merge.clearResouces(); + } + inner.clearFrontResouces(session, sessionCLosed); } + /** + * @param session + * @return + * @throws IOException + */ @Override - public boolean onFrontWriteFinished(MycatSession session) throws IOException { - // 判断是否结果集传输完成,决定命令是否结束,切换到前端读取数据 - // 检查当前已经结束,进行切换 - // 检查如果存在传输的标识,说明后传数据向前传传输未完成,注册后端的读取事件 - if (session.getSessionAttrMap() - .containsKey(SessionKeyEnum.SESSION_KEY_TRANSFER_OVER_FLAG.getKey())) { - session.proxyBuffer.flip(); - session.giveupOwner(SelectionKey.OP_READ); - return false; - } - // 当传输标识不存在,则说已经结束,则切换到前端的读取 - else { - session.proxyBuffer.flip(); - // session.chnageBothReadOpts(); - session.takeOwner(SelectionKey.OP_READ); - return true; + public boolean onBackendResponse(MySQLSession session) throws IOException { + if (session.getMycatSession().merge.isMultiBackendMoreOne()) { + throw new IOException("it is a bug , should call Stream"); } + return inner.onBackendResponse(session); } + @Override public boolean onBackendWriteFinished(MySQLSession session) throws IOException { - SQLQueryResultTask task = new SQLQueryResultTask(session.getMycatSession().merge); - session.setCurNIOHandler(task); - session.proxyBuffer.flip(); - session.change2ReadOpts(); - return false; + if (session.getMycatSession().merge.isMultiBackendMoreOne()) { + throw new IOException("it is a bug , should call Stream"); + } + return inner.onBackendWriteFinished(session); } + @Override - public void clearFrontResouces(MycatSession session, boolean sessionCLosed) { - if (sessionCLosed) { - session.recycleAllocedBuffer(session.getProxyBuffer()); - session.unbindAllBackend(); + public boolean onBackendClosed(MySQLSession session, boolean normal) throws IOException { + if (session.getMycatSession().merge.isMultiBackendMoreOne()) { + throw new IOException("it is a bug , should call Stream"); } + return inner.onBackendResponse(session); } @Override - public void clearBackendResouces(MySQLSession mysqlSession, boolean sessionCLosed) { - if (sessionCLosed) { - mysqlSession.recycleAllocedBuffer(mysqlSession.getProxyBuffer()); + public void clearBackendResouces(MySQLSession session, boolean sessionCLosed) { + if (session.getMycatSession().merge.isMultiBackendMoreOne()) { + logger.error("it is a bug , should call Stream"); } + inner.clearBackendResouces(session, sessionCLosed); } } diff --git a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java index b5f96e6..027e6a9 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/strategy/DBINMultiServerCmdStrategy.java @@ -1,31 +1,15 @@ package io.mycat.mycat2.cmds.strategy; -import java.io.IOException; - -import io.mycat.mycat2.cmds.multinode.DbInMultiServerCmd; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.MycatSession; -import io.mycat.mycat2.cmds.ComChangeUserCmd; -import io.mycat.mycat2.cmds.ComFieldListCmd; -import io.mycat.mycat2.cmds.ComInitDB; -import io.mycat.mycat2.cmds.ComPingCmd; -import io.mycat.mycat2.cmds.ComQuitCmd; -import io.mycat.mycat2.cmds.ComStatisticsCmd; -import io.mycat.mycat2.cmds.DirectPassthrouhCmd; -import io.mycat.mycat2.cmds.NotSupportCmd; -import io.mycat.mycat2.cmds.sqlCmds.SqlComBeginCmd; -import io.mycat.mycat2.cmds.sqlCmds.SqlComCommitCmd; -import io.mycat.mycat2.cmds.sqlCmds.SqlComRollBackCmd; -import io.mycat.mycat2.cmds.sqlCmds.SqlComShutdownCmd; -import io.mycat.mycat2.cmds.sqlCmds.SqlComStartCmd; -import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mycat2.cmds.*; +import io.mycat.mycat2.cmds.multinode.DbInMultiServerCmd; +import io.mycat.mycat2.cmds.sqlCmds.*; import io.mycat.mycat2.route.RouteStrategy; import io.mycat.mycat2.route.impl.DBInMultiServerRouteStrategy; import io.mycat.mycat2.sqlparser.BufferSQLContext; import io.mycat.mysql.packet.MySQLPacket; -import io.mycat.util.ErrorCode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class DBINMultiServerCmdStrategy extends AbstractCmdStrategy { @@ -88,29 +72,29 @@ protected void initMySqlCmdHandler() { @Override protected boolean delegateRoute(MycatSession session) { - - byte sqltype = session.sqlContext.getSQLType() != 0 ? session.sqlContext.getSQLType() - : session.sqlContext.getCurSQLType(); - RouteResultset rrs = routeStrategy.route(session.schema, sqltype, - session.sqlContext.getRealSQL(0), null, session); - - if (rrs.getNodes() != null && rrs.getNodes().length > 1 && !rrs.isGlobalTable()) { - - session.curRouteResultset = null; - try { - logger.error( - "Multi node error! Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType.\n" - + "Original SQL:[{}]", - session.sqlContext.getRealSQL(0)); - session.sendErrorMsg(ErrorCode.ERR_MULTI_NODE_FAILED, - "Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType."); - } catch (IOException e) { - session.close(false, e.getMessage()); - } - return false; - } else { - session.curRouteResultset = rrs; - } +// +// byte sqltype = session.sqlContext.getSQLType() != 0 ? session.sqlContext.getSQLType() +// : session.sqlContext.getCurSQLType(); +// RouteResultset routeResultset = routeStrategy.route(session.schema, sqltype, +// session.sqlContext.getRealSQL(0), null, session); +// +// if (routeResultset.getNodes() != null && routeResultset.getNodes().length > 1 && !routeResultset.isGlobalTable()) { +// +// session.curRouteResultset = null; +// try { +// logger.error( +// "Multi node error! Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType.\n" +// + "Original SQL:[{}]", +// session.sqlContext.getRealSQL(0)); +// session.sendErrorMsg(ErrorCode.ERR_MULTI_NODE_FAILED, +// "Not allowed to execute SQL statement across data nodes in DB_IN_MULTI_SERVER schemaType."); +// } catch (IOException e) { +// session.close(false, e.getMessage()); +// } +// return false; +// } else { +// session.curRouteResultset = routeResultset; +// } return true; } } diff --git a/source/src/main/java/io/mycat/mycat2/console/SessionKeyEnum.java b/source/src/main/java/io/mycat/mycat2/console/SessionKeyEnum.java index 0d88b93..2bcab00 100644 --- a/source/src/main/java/io/mycat/mycat2/console/SessionKeyEnum.java +++ b/source/src/main/java/io/mycat/mycat2/console/SessionKeyEnum.java @@ -103,11 +103,15 @@ public enum SessionKeyEnum { */ SESSION_KEY_HBT_TABLE_META("session_key_hbt_table_meta"), + /** + * 标识当前merge数据是否结束的标识,存在此标识,标识未结束,否则即为结束 + */ + SESSION_KEY_MERGE_OVER_FLAG("session_key_merge_over_flag"), ; private String key; - private SessionKeyEnum(String key) { + SessionKeyEnum(String key) { this.key = key; } diff --git a/source/src/main/java/io/mycat/mycat2/hbt/ResultSetMeta.java b/source/src/main/java/io/mycat/mycat2/hbt/ResultSetMeta.java index c9d349b..67d5705 100644 --- a/source/src/main/java/io/mycat/mycat2/hbt/ResultSetMeta.java +++ b/source/src/main/java/io/mycat/mycat2/hbt/ResultSetMeta.java @@ -1,13 +1,13 @@ package io.mycat.mycat2.hbt; +import io.mycat.mysql.packet.FieldPacket; +import io.mycat.proxy.ProxyBuffer; +import io.mycat.util.PacketUtil; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; - -import io.mycat.mysql.packet.FieldPacket; -import io.mycat.proxy.ProxyBuffer; -import io.mycat.util.PacketUtil; /** * 字段以及字段所有對應的類型 * */ @@ -31,9 +31,9 @@ public ResultSetMeta(List fieldNameList, int[] fieldTypeList) { fieldPosMap.put(fieldNameList.get(i), i); } } - - - public void addFiled(String fieldName, int fieldType) { + + + public void addField(String fieldName, int fieldType) { int pos = fieldNameList.size(); fieldNameList.add(fieldName); fieldPosMap.put(fieldName, pos); diff --git a/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java b/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java index efde6a0..b7235b3 100644 --- a/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java +++ b/source/src/main/java/io/mycat/mycat2/hbt/TableMeta.java @@ -1,97 +1,101 @@ package io.mycat.mycat2.hbt; +import io.mycat.mysql.packet.EOFPacket; +import io.mycat.mysql.packet.RowDataPacket; +import io.mycat.proxy.ProxyBuffer; +import io.mycat.util.ParseUtil; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collector; import java.util.stream.Collectors; -import io.mycat.mysql.packet.EOFPacket; -import io.mycat.mysql.packet.RowDataPacket; -import io.mycat.proxy.ProxyBuffer; -import io.mycat.util.ParseUtil; - /** - * 用于存储sql的结果集 + * 用于存储sql的结果集 + * * @author zhangwy - * */ + */ public class TableMeta { - public String table; - public String alias; - public List> fieldValues; - public int fieldCount; - public ResultSetMeta headerResultSetMeta; - private byte packetId; - private int writeRowDataIndex; - public TableMeta(String table, String alias) { - this.table = table; - this.alias = alias; - } - public TableMeta() { - } - /**/ - public void init(int fieldCount) { - this.fieldCount = fieldCount; - headerResultSetMeta = new ResultSetMeta(fieldCount); - this.fieldValues = new ArrayList>(); - } - - public void init(ResultSetMeta resultSetMeta) { - this.fieldCount = resultSetMeta.getFiledCount(); - if(this.fieldValues != null) throw new IllegalArgumentException("TableMeta has init"); - this.fieldValues = new ArrayList>(); - headerResultSetMeta = resultSetMeta; - - } - - public synchronized void addFieldValues(List row) { - fieldValues.add(row); - } - - public ResultSetMeta getHeaderResultSet() { - return headerResultSetMeta; - } - - public void writeBegin(ProxyBuffer buffer) { - this.packetId = 1; - packetId = writeResultSetHeaderPacket(packetId, buffer); - packetId = headerResultSetMeta.write(packetId, buffer); - packetId = writeEofPacket(packetId, buffer); - this.writeRowDataIndex = 0; - } - - public void writeRowData(ProxyBuffer buffer) { - - for(int index = this.writeRowDataIndex; index < fieldValues.size(); index++) { - - RowDataPacket dataPacket = new RowDataPacket(fieldCount); - List fieldValue = fieldValues.get(index); - for(byte[] value : fieldValue) { - dataPacket.add(value); - } - int size = dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize; - if(size <= buffer.getBuffer().remaining()) { - dataPacket.packetId = packetId ++; - dataPacket.write(buffer); - this.writeRowDataIndex ++; - } else { - break; - } - } - //是否可以写入Eof包 - if(5 <= buffer.getBuffer().remaining() && this.writeRowDataIndex == fieldValues.size()) { - packetId = writeEofPacket(packetId, buffer); - this.writeRowDataIndex ++; - } - } - public boolean isWriteFinish() { - return this.writeRowDataIndex > fieldValues.size(); - } - - -// public void write(ProxyBuffer buffer) { + public String table; + public String alias; + public List> fieldValues; + public int fieldCount; + public ResultSetMeta headerResultSetMeta; + private byte packetId; + private int writeRowDataIndex; + + public TableMeta(String table, String alias) { + this.table = table; + this.alias = alias; + } + + public TableMeta() { + } + + /**/ + public void init(int fieldCount) { + this.fieldCount = fieldCount; + headerResultSetMeta = new ResultSetMeta(fieldCount); + this.fieldValues = new ArrayList<>(); + } + + public void init(ResultSetMeta resultSetMeta) { + this.fieldCount = resultSetMeta.getFiledCount(); + if (this.fieldValues != null) throw new IllegalArgumentException("TableMeta has init"); + this.fieldValues = new ArrayList<>(); + headerResultSetMeta = resultSetMeta; + + } + + public void addFieldValues(List row) { + fieldValues.add(row); + } + + public ResultSetMeta getHeaderResultSet() { + return headerResultSetMeta; + } + + public void writeBegin(ProxyBuffer buffer) { + this.packetId = 1; + packetId = writeResultSetHeaderPacket(packetId, buffer); + packetId = headerResultSetMeta.write(packetId, buffer); + packetId = writeEofPacket(packetId, buffer); + this.writeRowDataIndex = 0; + } + + public void writeRowData(ProxyBuffer buffer) { + + for (int index = this.writeRowDataIndex; index < fieldValues.size(); index++) { + + RowDataPacket dataPacket = new RowDataPacket(fieldCount); + List fieldValue = fieldValues.get(index); + for (byte[] value : fieldValue) { + dataPacket.add(value); + } + int size = dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize; + if (size <= buffer.getBuffer().remaining()) { + dataPacket.packetId = packetId++; + dataPacket.write(buffer); + this.writeRowDataIndex++; + } else { + break; + } + } + //是否可以写入Eof包 + if (5 <= buffer.getBuffer().remaining() && this.writeRowDataIndex == fieldValues.size()) { + packetId = writeEofPacket(packetId, buffer); + this.writeRowDataIndex++; + } + } + + public boolean isWriteFinish() { + return this.writeRowDataIndex > fieldValues.size(); + } + + + // public void write(ProxyBuffer buffer) { // byte packetId = 1; // packetId = writeResultSetHeaderPacket(packetId, buffer); // packetId = headerResultSetMeta.write(packetId, buffer); @@ -100,24 +104,24 @@ public boolean isWriteFinish() { // packetId = writeEofPacket(packetId, buffer); // } // - private byte writeEofPacket(byte packetId, ProxyBuffer buffer) { - EOFPacket eofPacket = new EOFPacket(); - eofPacket.packetId = packetId ++; - eofPacket.warningCount = 0; - eofPacket.write(buffer); - return packetId; - } - - private byte writeResultSetHeaderPacket(byte packetId, ProxyBuffer buffer) { - buffer.writeFixInt(3, ProxyBuffer.getLenencLength(fieldCount)); - buffer.writeByte(packetId ++); - buffer.writeLenencInt(fieldCount); - return packetId; - } - -// - private byte writeRowData(byte packetId, ProxyBuffer buffer) { - for(List fieldValue : fieldValues) { + private byte writeEofPacket(byte packetId, ProxyBuffer buffer) { + EOFPacket eofPacket = new EOFPacket(); + eofPacket.packetId = packetId++; + eofPacket.warningCount = 0; + eofPacket.write(buffer); + return packetId; + } + + private byte writeResultSetHeaderPacket(byte packetId, ProxyBuffer buffer) { + buffer.writeFixInt(3, ProxyBuffer.getLenencLength(fieldCount)); + buffer.writeByte(packetId++); + buffer.writeLenencInt(fieldCount); + return packetId; + } + + // + private byte writeRowData(byte packetId, ProxyBuffer buffer) { + for (List fieldValue : fieldValues) { // int tmpWriteIndex = buffer.writeIndex; // buffer.writeIndex +=3; // buffer.writeByte(packetId ++); @@ -127,76 +131,79 @@ private byte writeRowData(byte packetId, ProxyBuffer buffer) { // //写入长度 // buffer.putFixInt(tmpWriteIndex, 3, buffer.writeIndex - tmpWriteIndex - ParseUtil.msyql_packetHeaderSize); // - RowDataPacket dataPacket = new RowDataPacket(fieldCount); - for(byte[] value : fieldValue) { - dataPacket.add(value); - } - dataPacket.packetId = packetId ++; - if(dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize > buffer.getBuffer().remaining()) { - dataPacket.write(buffer); - } - } - return packetId; - } - public void print() { - for(List fieldValue : fieldValues) { - - for(byte[] value : fieldValue) { - System.out.print(String.format("%s ", new String(value))); - } - System.out.println(""); - } - } - public String getFileds(String lJoinKey) { - - Integer pos = getHeaderResultSet().getFieldPos(lJoinKey); - if(pos == null) throw new IllegalArgumentException("fileds not found joinkey " + lJoinKey); - StringBuilder sb = new StringBuilder(""); - fieldValues.forEach(row -> { - String value = new String(row.get(pos)); - sb.append("'").append(value).append("',"); - }); - String ids = "''"; - if(sb.length() > 1) { - ids = sb.substring(0, sb.length() - 1); - } - return ids; - - } - - public Map>> getKeyMap(String key) { - Map>> fieldMap = new HashMap>>(); - fieldValues.forEach(row -> { - int pos = headerResultSetMeta.getFieldPos(key); - String value = new String(row.get(pos)); - List> rowDataList = fieldMap.get(value); - if (rowDataList == null) { - rowDataList = new ArrayList>(); - fieldMap.put(value, rowDataList); - } - rowDataList.add(row); - }); - return fieldMap; - } - - public List> getFieldValues() { - return fieldValues; - } - public void setFieldValues(List> fieldValues) { - this.fieldValues = fieldValues; - } - - @Override - public String toString() { - return "TableMeta{" + - "table='" + table + '\'' + - ", alias='" + alias + '\'' + - ", fieldValues=" + fieldValues.stream().flatMap(i->i.stream()).map(i->new String(i)) - .collect(Collectors.joining(","))+ - ", fieldCount=" + fieldCount + - ", headerResultSetMeta=" + headerResultSetMeta + - ", packetId=" + packetId + - ", writeRowDataIndex=" + writeRowDataIndex + - '}'; - } + RowDataPacket dataPacket = new RowDataPacket(fieldCount); + for (byte[] value : fieldValue) { + dataPacket.add(value); + } + dataPacket.packetId = packetId++; + if (dataPacket.calcPacketSize() + ParseUtil.msyql_packetHeaderSize > buffer.getBuffer().remaining()) { + dataPacket.write(buffer); + } + } + return packetId; + } + + public void print() { + for (List fieldValue : fieldValues) { + + for (byte[] value : fieldValue) { + System.out.print(String.format("%s ", new String(value))); + } + System.out.println(""); + } + } + + public String getFileds(String lJoinKey) { + + Integer pos = getHeaderResultSet().getFieldPos(lJoinKey); + if (pos == null) throw new IllegalArgumentException("fileds not found joinkey " + lJoinKey); + StringBuilder sb = new StringBuilder(""); + fieldValues.forEach(row -> { + String value = new String(row.get(pos)); + sb.append("'").append(value).append("',"); + }); + String ids = "''"; + if (sb.length() > 1) { + ids = sb.substring(0, sb.length() - 1); + } + return ids; + + } + + public Map>> getKeyMap(String key) { + Map>> fieldMap = new HashMap>>(); + fieldValues.forEach(row -> { + int pos = headerResultSetMeta.getFieldPos(key); + String value = new String(row.get(pos)); + List> rowDataList = fieldMap.get(value); + if (rowDataList == null) { + rowDataList = new ArrayList>(); + fieldMap.put(value, rowDataList); + } + rowDataList.add(row); + }); + return fieldMap; + } + + public List> getFieldValues() { + return fieldValues; + } + + public void setFieldValues(List> fieldValues) { + this.fieldValues = fieldValues; + } + + @Override + public String toString() { + return "TableMeta{" + + "table='" + table + '\'' + + ", alias='" + alias + '\'' + + ", fieldValues=" + fieldValues.stream().flatMap(i -> i.stream()).map(i -> new String(i)) + .collect(Collectors.joining(",")) + + ", fieldCount=" + fieldCount + + ", headerResultSetMeta=" + headerResultSetMeta + + ", packetId=" + packetId + + ", writeRowDataIndex=" + writeRowDataIndex + + '}'; + } } diff --git a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java index a026253..5596001 100644 --- a/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java +++ b/source/src/main/java/io/mycat/mycat2/route/RouteResultset.java @@ -89,14 +89,14 @@ public void setSelectForUpdate(boolean selectForUpdate) { public AtomicInteger count = new AtomicInteger(0); - public void countDown(MySQLSession session, Runnable runnable){ + public void countDown(MySQLSession session, Runnable runnable) { int c = count.decrementAndGet(); - if (c ==0){ - System.out.println("count=>"+c); + if (c == 0) { runnable.run(); count.set(nodes.length); } } + public List getTables() { return tables; } @@ -300,10 +300,7 @@ public Set getSubTables() { } public boolean isDistTable() { - if (this.getSubTables() != null && !this.getSubTables().isEmpty()) { - return true; - } - return false; + return this.getSubTables() != null && !this.getSubTables().isEmpty(); } @Override diff --git a/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java index 2f42c00..8697d0f 100644 --- a/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java +++ b/source/src/main/java/io/mycat/mycat2/route/impl/AbstractRouteStrategy.java @@ -1,17 +1,16 @@ package io.mycat.mycat2.route.impl; -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.beans.conf.SchemaBean; import io.mycat.mycat2.beans.conf.SchemaConfig; import io.mycat.mycat2.route.RouteResultset; import io.mycat.mycat2.route.RouteStrategy; import io.mycat.mycat2.sqlparser.BufferSQLContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; public abstract class AbstractRouteStrategy implements RouteStrategy { @@ -54,7 +53,7 @@ public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, Str */ /* * if (LOGGER.isDebugEnabled() && origSQL.startsWith(LoadData.loadDataHint)) { - * rrs.setCacheAble(false); } + * routeResultset.setCacheAble(false); } */ /** @@ -62,14 +61,14 @@ public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, Str * select ... for update的时候动态设定RouteResultsetNode的canRunInReadDB属性 */ /* - * if (sc != null ) { rrs.setAutocommit(sc.isAutocommit()); } + * if (sc != null ) { routeResultset.setAutocommit(sc.isAutocommit()); } */ /** * DDL 语句的路由 */ if (BufferSQLContext.ALTER_SQL == sqlType) { - // return RouterUtil.routeToDDLNode(rrs, sqlType, stmt, schema); + // return RouterUtil.routeToDDLNode(routeResultset, sqlType, stmt, schema); return null; } @@ -78,12 +77,12 @@ public RouteResultset route(SchemaBean schema, byte sqlType, String origSQL, Str */ if ((schema.getTables() == null || schema.getTables().isEmpty()) && BufferSQLContext.SHOW_SQL != sqlType) { - // rrs = RouterUtil.routeToSingleNode(rrs, schema.getDataNode(), stmt); + // routeResultset = RouterUtil.routeToSingleNode(routeResultset, schema.getDataNode(), stmt); rrs = null; } else { - // RouteResultset returnedSet = routeSystemInfo(schema, sqlType, stmt, rrs); + // RouteResultset returnedSet = routeSystemInfo(schema, sqlType, stmt, routeResultset); // if (returnedSet == null) { - // rrs = routeNormalSqlWithAST(schema, stmt, rrs, charset, sqlType, mycatSession); + // routeResultset = routeNormalSqlWithAST(schema, stmt, routeResultset, charset, sqlType, mycatSession); // } } diff --git a/source/src/main/java/io/mycat/mycat2/sqlannotations/CacheResult.java b/source/src/main/java/io/mycat/mycat2/sqlannotations/CacheResult.java index 3ddf8f4..5f6b140 100644 --- a/source/src/main/java/io/mycat/mycat2/sqlannotations/CacheResult.java +++ b/source/src/main/java/io/mycat/mycat2/sqlannotations/CacheResult.java @@ -1,14 +1,12 @@ package io.mycat.mycat2.sqlannotations; -import java.util.Map; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.cmds.interceptor.SQLAnnotationChain; -import io.mycat.mycat2.cmds.interceptor.SQLAnnotationCmd; import io.mycat.mycat2.sqlparser.BufferSQLContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; /** * Created by jamie on 2017/9/15. @@ -27,8 +25,8 @@ public boolean apply(MycatSession session,SQLAnnotationChain chain) { // SQLAnnotationCmd cmd = meta.getSQLAnnotationCmd(); // cmd.setSqlAnnotationChain(chain); // chain.addCmdChain(this,cmd); - - BufferSQLContext context = session.sqlContext; + + BufferSQLContext context = session.sqlContext; context.setAnnotationType(BufferSQLContext.ANNOTATION_SQL_CACHE); context.setAnnotationValue(BufferSQLContext.ANNOTATION_CACHE_TIME,meta.getCacheTime()); context.setAnnotationValue(BufferSQLContext.ANNOTATION_ACCESS_COUNT,meta.getAccessCount()); diff --git a/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java b/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java deleted file mode 100644 index 976d842..0000000 --- a/source/src/main/java/io/mycat/mycat2/tasks/AbstractDataNodeMerge.java +++ /dev/null @@ -1,85 +0,0 @@ -package io.mycat.mycat2.tasks; - -import io.mycat.mycat2.ColumnMeta; -import io.mycat.mycat2.MycatSession; -import io.mycat.mycat2.PackWraper; -import io.mycat.mycat2.route.RouteResultset; -import org.apache.log4j.Logger; - -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; - -public abstract class AbstractDataNodeMerge implements Runnable { - - private static Logger LOGGER = Logger.getLogger(AbstractDataNodeMerge.class); - /** - * row 有多少col - */ - protected int fieldCount; - - /** - * 本次select的路由缓存集 - */ - protected final RouteResultset rrs; - /** - * 夸分片处理handler - */ - protected MultiNodeQueryHandler multiQueryHandler = null; - - /** - * 是否执行流式结果集输出 - */ - - protected boolean isStreamOutputResult = false; - - /** - * rowData缓存队列 - */ - protected LinkedBlockingQueue packs = new LinkedBlockingQueue<>(); - - MycatSession mycatSession; - /** - * 分片结束包 - */ - protected static final PackWraper END_FLAG_PACK = new PackWraper(null,null); - public AbstractDataNodeMerge(RouteResultset rrs, MycatSession mycatSession) { - this.rrs = rrs; - this.mycatSession = mycatSession;; - this.executor = Executors.newSingleThreadExecutor(); - } - public void onEOF() { - packs.add(END_FLAG_PACK); - executor.submit(this); - } - ExecutorService executor; - - public boolean onNewRecords(String repName, ByteBuffer rowData) { - /* - 读取的数据范围是 readIndex --- writeIndex 之间的数据. - */ - System.out.println("onNewRecords"+repName + rowData); - if(packs.offer(new PackWraper(rowData,repName))){ - executor.submit(this); - return true; - }else{ - return false; - } - } - - public abstract void onRowMetaData(Map columToIndx, int fieldCount); - - public RouteResultset getRrs() { - return this.rrs; - } - - /** - * 做最后的结果集输出 - * @return (最多i*(offset+size)行数据) - */ - public abstract Iterator getResults(byte[] eof); - public abstract void clear(); -} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/DataNodeManager.java b/source/src/main/java/io/mycat/mycat2/tasks/DataNodeManager.java new file mode 100644 index 0000000..6d2705a --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/DataNodeManager.java @@ -0,0 +1,171 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.MycatSession; +import io.mycat.mycat2.PackWraper; +import io.mycat.mycat2.beans.ColumnMeta; +import io.mycat.mycat2.route.RouteResultset; +import io.mycat.mysql.packet.ErrorPacket; +import org.apache.log4j.Logger; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; + +public abstract class DataNodeManager implements Runnable { + + /** + * 分片结束包 + */ + protected static final PackWraper END_FLAG_PACK = new PackWraper(null, null); + private static Logger LOGGER = Logger.getLogger(DataNodeManager.class); + + /** + * row 有多少col + */ + protected int fieldCount; + /** + * 本次select的路由缓存集 + */ + protected RouteResultset routeResultset; + + /** + * rowData缓存队列 + */ + protected LinkedBlockingQueue packs = new LinkedBlockingQueue<>(); + + MycatSession mycatSession; + ExecutorService executor; + ArrayList backendStreams = new ArrayList<>(); + + + public DataNodeManager(RouteResultset routeResultset, MycatSession mycatSession) { + this.routeResultset = routeResultset; + this.mycatSession = mycatSession; + this.executor = Executors.newSingleThreadExecutor(); + } + + protected void init(RouteResultset routeResultset, MycatSession mycatSession) { + this.routeResultset = routeResultset; + this.mycatSession = mycatSession; + this.executor = Executors.newSingleThreadExecutor(); + } + + + public void onEOF(String dataNode) { + packs.add(END_FLAG_PACK); + executor.submit(this); + } + + public abstract void onError(String dataNode, String msg); + + public boolean onNewRecords(String dataNode, ByteBuffer rowData) { + /* + 读取的数据范围是 readIndex --- writeIndex 之间的数据. + */ + System.out.println("onNewRecords" + dataNode + rowData); + if (packs.offer(new PackWraper(rowData, dataNode))) { + executor.submit(this); + return true; + } else { + return false; + } + } + + public abstract void onRowMetaData(String dataNode, Map columToIndx, int fieldCount); + + public RouteResultset getRouteResultset() { + return this.routeResultset; + } + + /** + * 做最后的结果集输出 + * + * @return (最多i * ( offset + size)行数据) + */ + public abstract Iterator getResults(); + + public void clearResouces() { + this.packs.clear(); + this.fieldCount = 0; + this.routeResultset = null; + clearSQLQueryStreamResouces(); + } + + public void close(boolean normal, String error) { + this.executor.shutdown(); + unbindSQLQueryStreams(normal, error); + this.packs = null; + this.executor = null; + mycatSession.merge = null; + } + + public abstract void onfinished(); + + + public void closeMutilBackendAndResponseError(boolean normal, int errno, String error) { + clearResouces(); + unbindSQLQueryStreams(normal, error); + close(normal, error); + this.mycatSession.takeBufferOwnerOnly(); + try { + this.mycatSession.sendErrorMsg(errno, error); + } catch (IOException e) { + e.printStackTrace(); + } + } + + public void closeMutilBackendAndResponseError(boolean normal, ErrorPacket error) { + clearResouces(); + unbindSQLQueryStreams(normal, error.message); + close(normal, error.message); + this.mycatSession.takeBufferOwnerOnly(); + try { + this.mycatSession.responseOKOrError(error); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * 此方法不会进行 bind 操作 + * + * @param + */ + public void addSQLQueryStream(SQLQueryStream stream) { + this.backendStreams.add(stream); + } + + public void unbindSQLQueryStreams(boolean normal, String error) { + Iterator iterator = this.backendStreams.iterator(); + while (iterator.hasNext()) { + MySQLSession mysqlsession = iterator.next().session; + this.mycatSession.unbindBeckend(mysqlsession); + if (!normal) { + mysqlsession.close(normal, error); + } + iterator.remove();// ArrayList iterator support remove to without advantage loop + } + } + + /** + * 正常情况下unbind + * 解除 backend mysql session 对 mycat session 的绑定 不解除 mycat 对 mysql session 的缓存 + * 即不清除 backendMap 对 mysql session的引用 + * cjw + */ + public void clearSQLQueryStreamResouces() { + for (SQLQueryStream backend : this.backendStreams) { + backend.clearResouces(); + } + } + + public boolean isMultiBackendMoreOne() { + return backendStreams.size() > 0; + } +} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java b/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java deleted file mode 100644 index 04846c9..0000000 --- a/source/src/main/java/io/mycat/mycat2/tasks/DataNodeMergeManager.java +++ /dev/null @@ -1,435 +0,0 @@ -//package io.mycat.mycat2.tasks; -// -//import io.mycat.mycat2.route.RouteResultset; -//import io.mycat.mysql.packet.RowDataPacket; -//import io.mycat.util.StringUtil; -// -//import java.io.IOException; -//import java.nio.ByteBuffer; -//import java.util.*; -//import java.util.concurrent.atomic.AtomicBoolean; -// -//public class DataNodeMergeManager { -// public DataNodeMergeManager(MultiNodeQueryHandler handler, RouteResultset rrs, AtomicBoolean isMiddleResultDone) { -// super(handler,rrs); -// this.isMiddleResultDone = isMiddleResultDone; -// this.myCatMemory = MycatServer.getInstance().getMyCatMemory(); -// this.memoryManager = myCatMemory.getResultMergeMemoryManager(); -// this.conf = myCatMemory.getConf(); -// this.limitStart = rrs.getLimitStart(); -// this.limitSize = rrs.getLimitSize(); -// } -// -// -// public void onRowMetaData(Map columToIndx, int fieldCount) throws IOException { -// -// if (LOGGER.isDebugEnabled()) { -// LOGGER.debug("field metadata keys:" + columToIndx != null ? columToIndx.keySet() : "null"); -// LOGGER.debug("field metadata values:" + columToIndx != null ? columToIndx.values() : "null"); -// } -// -// OrderCol[] orderCols = null; -// StructType schema = null; -// UnsafeExternalRowSorter.PrefixComputer prefixComputer = null; -// PrefixComparator prefixComparator = null; -// -// -// DataNodeMemoryManager dataNodeMemoryManager = null; -// UnsafeExternalRowSorter sorter = null; -// -// int[] groupColumnIndexs = null; -// this.fieldCount = fieldCount; -// -// if (rrs.getGroupByCols() != null) { -// groupColumnIndexs = toColumnIndex(rrs.getGroupByCols(), columToIndx); -// if (LOGGER.isDebugEnabled()) { -// for (int i = 0; i mergCols = new LinkedList(); -// Map mergeColsMap = rrs.getMergeCols(); -// -// if (mergeColsMap != null) { -// -// if (LOGGER.isDebugEnabled() && rrs.getMergeCols() != null) { -// LOGGER.debug("isHasAggrColumn:" + rrs.getMergeCols().toString()); -// } -// for (Map.Entry mergEntry : mergeColsMap -// .entrySet()) { -// String colName = mergEntry.getKey().toUpperCase(); -// int type = mergEntry.getValue(); -// if (MergeCol.MERGE_AVG == type) { -// ColMeta sumColMeta = columToIndx.get(colName + "SUM"); -// ColMeta countColMeta = columToIndx.get(colName -// + "COUNT"); -// if (sumColMeta != null && countColMeta != null) { -// ColMeta colMeta = new ColMeta(sumColMeta.colIndex, -// countColMeta.colIndex, -// sumColMeta.getColType()); -// mergCols.add(new MergeCol(colMeta, mergEntry -// .getValue())); -// } -// } else { -// ColMeta colMeta = columToIndx.get(colName); -// mergCols.add(new MergeCol(colMeta, mergEntry.getValue())); -// } -// } -// } -// -// // add no alias merg column -// for (Map.Entry fieldEntry : columToIndx.entrySet()) { -// String colName = fieldEntry.getKey(); -// int result = MergeCol.tryParseAggCol(colName); -// if (result != MergeCol.MERGE_UNSUPPORT -// && result != MergeCol.MERGE_NOMERGE) { -// mergCols.add(new MergeCol(fieldEntry.getValue(), result)); -// } -// } -// -// /** -// * Group操作 -// */ -// MergeCol[] mergColsArrays = mergCols.toArray(new MergeCol[mergCols.size()]); -// unsafeRowGrouper = new UnsafeRowGrouper(columToIndx,rrs.getGroupByCols(), -// mergColsArrays, -// rrs.getHavingCols()); -// -// if(mergColsArrays!=null&&mergColsArrays.length>0){ -// mergeColsIndex = new int[mergColsArrays.length]; -// for(int i = 0;i orders = rrs.getOrderByCols(); -// orderCols = new OrderCol[orders.size()]; -// int i = 0; -// for (Map.Entry entry : orders.entrySet()) { -// String key = StringUtil.removeBackquote(entry.getKey() -// .toUpperCase()); -// ColMeta colMeta = columToIndx.get(key); -// if (colMeta == null) { -// throw new IllegalArgumentException( -// "all columns in order by clause should be in the selected column list!" -// + entry.getKey()); -// } -// orderCols[i++] = new OrderCol(colMeta, entry.getValue()); -// } -// -// /** -// * 构造全局排序器 -// */ -// schema = new StructType(columToIndx,fieldCount); -// schema.setOrderCols(orderCols); -// -// prefixComputer = new RowPrefixComputer(schema); -// -//// if(orderCols.length>0 -//// && orderCols[0].getOrderType() -//// == OrderCol.COL_ORDER_TYPE_ASC){ -//// prefixComparator = PrefixComparators.LONG; -//// }else { -//// prefixComparator = PrefixComparators.LONG_DESC; -//// } -// -// prefixComparator = getPrefixComparator(orderCols); -// -// dataNodeMemoryManager = -// new DataNodeMemoryManager(memoryManager,Thread.currentThread().getId()); -// -// /** -// * 默认排序,只是将数据连续存储到内存中即可。 -// */ -// globalSorter = new UnsafeExternalRowSorter( -// dataNodeMemoryManager, -// myCatMemory, -// schema, -// prefixComparator, prefixComputer, -// conf.getSizeAsBytes("mycat.buffer.pageSize","32k"), -// false/**是否使用基数排序*/, -// true/**排序*/); -// } -// -// -// if(conf.getBoolean("mycat.stream.output.result",false) -// && globalSorter == null -// && unsafeRowGrouper == null){ -// setStreamOutputResult(true); -// }else { -// -// /** -// * 1.schema -// */ -// -// schema = new StructType(columToIndx,fieldCount); -// schema.setOrderCols(orderCols); -// -// /** -// * 2 .PrefixComputer -// */ -// prefixComputer = new RowPrefixComputer(schema); -// -// /** -// * 3 .PrefixComparator 默认是ASC,可以选择DESC -// */ -// -// prefixComparator = PrefixComparators.LONG; -// -// -// dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, -// Thread.currentThread().getId()); -// -// globalMergeResult = new UnsafeExternalRowSorter( -// dataNodeMemoryManager, -// myCatMemory, -// schema, -// prefixComparator, -// prefixComputer, -// conf.getSizeAsBytes("mycat.buffer.pageSize", "32k"), -// false,/**是否使用基数排序*/ -// false/**不排序*/); -// } -// } -// -// private PrefixComparator getPrefixComparator(OrderCol[] orderCols) { -// PrefixComparator prefixComparator = null; -// OrderCol firstOrderCol = orderCols[0]; -// int orderType = firstOrderCol.getOrderType(); -// int colType = firstOrderCol.colMeta.colType; -// -// switch (colType) { -// case ColMeta.COL_TYPE_INT: -// case ColMeta.COL_TYPE_LONG: -// case ColMeta.COL_TYPE_INT24: -// case ColMeta.COL_TYPE_SHORT: -// case ColMeta.COL_TYPE_LONGLONG: -// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); -// break; -// case ColMeta.COL_TYPE_FLOAT: -// case ColMeta.COL_TYPE_DOUBLE: -// case ColMeta.COL_TYPE_DECIMAL: -// case ColMeta.COL_TYPE_NEWDECIMAL: -// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.DOUBLE : PrefixComparators.DOUBLE_DESC); -// break; -// case ColMeta.COL_TYPE_DATE: -// case ColMeta.COL_TYPE_TIMSTAMP: -// case ColMeta.COL_TYPE_TIME: -// case ColMeta.COL_TYPE_YEAR: -// case ColMeta.COL_TYPE_DATETIME: -// case ColMeta.COL_TYPE_NEWDATE: -// case ColMeta.COL_TYPE_BIT: -// case ColMeta.COL_TYPE_VAR_STRING: -// case ColMeta.COL_TYPE_STRING: -// case ColMeta.COL_TYPE_ENUM: -// case ColMeta.COL_TYPE_SET: -// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.BINARY : PrefixComparators.BINARY_DESC); -// break; -// default: -// prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); -// break; -// } -// -// return prefixComparator; -// } -// -// @Override -// public List getResults(byte[] eof) -// -// @Override -// public void run() { -// -// if (!running.compareAndSet(false, true)) { -// return; -// } -// -// boolean nulpack = false; -// -// try { -// for (; ; ) { -// final PackWraper pack = packs.poll(); -// -// if (pack == null) { -// nulpack = true; -// break; -// } -// if (pack == END_FLAG_PACK) { -// -// hasEndFlag = true; -// -// if(packs.peek()!=null){ -// packs.add(pack); -// continue; -// } -// -// /** -// * 最后一个节点datenode发送了row eof packet说明了整个 -// * 分片数据全部接收完成,进而将结果集全部发给你Mycat 客户端 -// */ -// final int warningCount = 0; -// final EOFPacket eofp = new EOFPacket(); -// final ByteBuffer eof = ByteBuffer.allocate(9); -// BufferUtil.writeUB3(eof, eofp.calcPacketSize()); -// eof.put(eofp.packetId); -// eof.put(eofp.fieldCount); -// BufferUtil.writeUB2(eof,warningCount); -// BufferUtil.writeUB2(eof,eofp.status); -// final ServerConnection source = multiQueryHandler.getSession().getSource(); -// final byte[] array = eof.array(); -// -// -// Iterator iters = null; -// -// -// if (unsafeRowGrouper != null){ -// /** -// * group by里面需要排序情况 -// */ -// if (globalSorter != null){ -// iters = unsafeRowGrouper.getResult(globalSorter); -// }else { -// iters = unsafeRowGrouper.getResult(globalMergeResult); -// } -// -// }else if(globalSorter != null){ -// -// iters = globalSorter.sort(); -// -// }else if (!isStreamOutputResult){ -// -// iters = globalMergeResult.sort(); -// -// } -// -// if(iters != null){ -// multiQueryHandler.outputMergeResult(source,array,iters,isMiddleResultDone); -// } -// break; -// } -// -// unsafeRow = new UnsafeRow(fieldCount); -// bufferHolder = new BufferHolder(unsafeRow,0); -// unsafeRowWriter = new UnsafeRowWriter(bufferHolder,fieldCount); -// bufferHolder.reset(); -// -// /** -// *构造一行row,将对应的col填充. -// */ -// MySQLMessage mm = new MySQLMessage(pack.rowData); -// mm.readUB3(); -// mm.read(); -// -// int nullnum = 0; -// for (int i = 0; i < fieldCount; i++) { -// byte[] colValue = mm.readBytesWithLength(); -// if (colValue != null) -// unsafeRowWriter.write(i,colValue); -// else -// { -// if(mergeColsIndex!=null&&mergeColsIndex.length>0){ -// -// if(Arrays.binarySearch(mergeColsIndex, i)<0){ -// nullnum++; -// } -// } -// unsafeRow.setNullAt(i); -// } -// } -// -// if(mergeColsIndex!=null&&mergeColsIndex.length>0){ -// if(nullnum == (fieldCount - mergeColsIndex.length)){ -// if(!hasEndFlag){ -// packs.add(pack); -// continue; -// } -// } -// } -// -// unsafeRow.setTotalSize(bufferHolder.totalSize()); -// -// if(unsafeRowGrouper != null){ -// unsafeRowGrouper.addRow(unsafeRow); -// }else if (globalSorter != null){ -// globalSorter.insertRow(unsafeRow); -// }else { -// globalMergeResult.insertRow(unsafeRow); -// } -// -// unsafeRow = null; -// bufferHolder = null; -// unsafeRowWriter = null; -// } -// -// } catch (final Exception e) { -// e.printStackTrace(); -// multiQueryHandler.handleDataProcessException(e); -// } finally { -// running.set(false); -// if (nulpack && !packs.isEmpty()) { -// this.run(); -// } -// } -// } -// -// /** -// * 释放DataNodeMergeManager所申请的资源 -// */ -// public void clear() { -// -// unsafeRows.clear(); -// -// synchronized (this) -// { -// if (unsafeRowGrouper != null) { -// unsafeRowGrouper.free(); -// unsafeRowGrouper = null; -// } -// } -// -// if(globalSorter != null){ -// globalSorter.cleanupResources(); -// globalSorter = null; -// } -// -// if (globalMergeResult != null){ -// globalMergeResult.cleanupResources(); -// globalMergeResult = null; -// } -// } -//} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java b/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java index e9e5522..22e3fb5 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/HeapDataNodeMergeManager.java @@ -1,83 +1,67 @@ package io.mycat.mycat2.tasks; -import io.mycat.mycat2.ColumnMeta; import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.PackWraper; +import io.mycat.mycat2.beans.ColumnMeta; import io.mycat.mycat2.console.SessionKeyEnum; import io.mycat.mycat2.hbt.TableMeta; import io.mycat.mycat2.route.RouteResultset; import io.mycat.proxy.ProxyBuffer; +import io.mycat.util.ErrorCode; +import org.apache.log4j.Logger; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Iterator; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -public class HeapDataNodeMergeManager extends AbstractDataNodeMerge { +public class HeapDataNodeMergeManager extends DataNodeManager { + private static Logger LOGGER = Logger.getLogger(HeapDataNodeMergeManager.class); TableMeta tableMeta; - /** - * 标志业务线程是否启动了? - */ - protected final AtomicBoolean running = new AtomicBoolean(false); public HeapDataNodeMergeManager(RouteResultset rrs, MycatSession mycatSession) { super(rrs, mycatSession); } @Override - public void onRowMetaData(Map columToIndx, int fieldCount) { + public void onRowMetaData(String datanode, Map columToIndx, int fieldCount) { if (tableMeta == null) { tableMeta = new TableMeta(); tableMeta.init(fieldCount); Set> entries = columToIndx.entrySet(); for (Map.Entry entry : entries) { - tableMeta.headerResultSetMeta.addFiled(entry.getKey(), entry.getValue().colType); + tableMeta.headerResultSetMeta.addField(entry.getKey(), entry.getValue().colType); } } } @Override - public Iterator getResults(byte[] eof) { + public Iterator getResults() { return null; } + public TableMeta getTableMeta() { + return tableMeta; + } + - @Override public void clear() { this.tableMeta = null; + this.mycatSession.merge = null; } @Override public void run() { - // sort-or-group: no need for us to using multi-threads, because - //both sorter and group are synchronized!! - // @author Uncle-pan - // @since 2016-03-23 - if (!running.compareAndSet(false, true)) { - return; - } - - // eof handler has been placed to "if (pack == END_FLAG_PACK){}" in for-statement - // @author Uncle-pan - // @since 2016-03-23 - boolean nulpack = false; try { // loop-on-packs for (; ; ) { final PackWraper pack = packs.take(); - System.out.println(packs.size()); - // async: handling row pack queue, this business thread should exit when no pack - // @author Uncle-pan - // @since 2016-03-23 - if (pack == null) { - nulpack = true; - break; - } if (pack == END_FLAG_PACK) { - System.out.println("END_FLAG_PACK"); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("END_FLAG_PACK"); + } ProxyBuffer proxyBuffer = mycatSession.proxyBuffer; proxyBuffer.reset(); tableMeta.writeBegin(proxyBuffer); @@ -86,15 +70,9 @@ public void run() { proxyBuffer.readIndex = proxyBuffer.writeIndex; mycatSession.takeBufferOwnerOnly(); if (!tableMeta.isWriteFinish()) { - mycatSession.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_HBT_TABLE_META.getKey(), tableMeta); - } - try { - System.out.println("开始发送"); - mycatSession.writeToChannel(); - clear(); - } catch (Exception e) { - e.printStackTrace(); + mycatSession.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_MERGE_OVER_FLAG.getKey(), null); } + mycatSession.writeToChannel(); return; } else { ArrayList v = new ArrayList<>(tableMeta.fieldCount); @@ -106,18 +84,19 @@ public void run() { tableMeta.addFieldValues(v); } } - } catch (final Exception e) { + } catch (Exception e) { e.printStackTrace(); - // multiQueryHandler.handleDataProcessException(e); - } finally { - running.set(false); - } - // try to check packs, it's possible that adding a pack after polling a null pack - //and before this time pointer!! - // @author Uncle-pan - // @since 2016-03-23 - if (nulpack && !packs.isEmpty()) { - this.run(); + this.closeMutilBackendAndResponseError(false, ErrorCode.ER_UNKNOWN_ERROR, e.getMessage()); } } + + @Override + public void onError(String dataNode, String msg) { + this.closeMutilBackendAndResponseError(false, ErrorCode.ER_UNKNOWN_ERROR, msg); + } + + @Override + public void onfinished() { + clearSQLQueryStreamResouces(); + } } diff --git a/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java b/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java deleted file mode 100644 index b90a169..0000000 --- a/source/src/main/java/io/mycat/mycat2/tasks/MyRowStream.java +++ /dev/null @@ -1,185 +0,0 @@ -package io.mycat.mycat2.tasks; - -import io.mycat.mycat2.ColumnMeta; -import io.mycat.mycat2.MySQLSession; -import io.mycat.mycat2.MycatSession; -import io.mycat.mycat2.beans.MySQLPackageInf; -import io.mycat.mycat2.console.SessionKeyEnum; -import io.mycat.mycat2.hbt.MyFunction; -import io.mycat.mycat2.hbt.ResultSetMeta; -import io.mycat.mycat2.hbt.SqlMeta; -import io.mycat.mycat2.net.DefaultMycatSessionHandler; -import io.mycat.mysql.packet.ErrorPacket; -import io.mycat.mysql.packet.MySQLPacket; -import io.mycat.mysql.packet.QueryPacket; -import io.mycat.proxy.ProxyBuffer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; - -public class MyRowStream extends BackendIOTaskWithResultSet { - private static Logger logger = LoggerFactory.getLogger(MyRowStream.class); - AbstractDataNodeMerge merge; - Map columToIndx = new HashMap<>(); - private ResultSetMeta resultSetMeta; - int fieldCount = 0; - int getFieldCount = 0; - public MyRowStream(MySQLSession optSession) { - this.useNewBuffer = true; - setSession(optSession, true, false); - this.session = optSession; - } - - public AbstractDataNodeMerge getAbstractDataNodeMerge() { - return merge; - } - - public void setAbstractDataNodeMerge(AbstractDataNodeMerge abstractDataNodeMerge) { - this.merge = abstractDataNodeMerge; - } - - // public void fetchStream() { -// /*设置为忙*/ -// session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); -// ProxyBuffer proxyBuf = session.proxyBuffer; -// session.setCurNIOHandler(this); -// proxyBuf.flip(); -// proxyBuf.readIndex = proxyBuf.writeIndex; -// try { -// this.session.writeToChannel(); -// } catch (IOException e) { -// logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); -// e.printStackTrace(); -// } -// } -public void fetchStream(String sql) { - /*设置为忙*/ - session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); - ProxyBuffer proxyBuf = session.proxyBuffer; - proxyBuf.reset(); - QueryPacket queryPacket = new QueryPacket(); - queryPacket.packetId = 0; - queryPacket.sql = sql; - queryPacket.write(proxyBuf); - session.setCurNIOHandler(this); - proxyBuf.flip(); - proxyBuf.readIndex = proxyBuf.writeIndex; - try { - this.session.writeToChannel(); - } catch (IOException e) { - logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); - e.printStackTrace(); - } -} - public void fetchStream(MycatSession mycatSession) { - // 切换 buffer 读写状态 - ProxyBuffer proxyBuf = mycatSession.proxyBuffer; - proxyBuf.flip(); - // 改变 owner,对端Session获取,并且感兴趣写事件 - mycatSession.clearReadWriteOpts(); - /*设置为忙*/ - this.session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); - this.session.setCurNIOHandler(this); - proxyBuf.flip(); - proxyBuf.readIndex = proxyBuf.writeIndex; - try { - this.session.writeToChannel(); - } catch (IOException e) { - logger.error(" The FetchIntoRowStream task write is error . {}",e.getMessage()); - e.printStackTrace(); - } - } - @Override - void onRsColCount(MySQLSession session) { - ProxyBuffer proxyBuffer = session.proxyBuffer; - MySQLPackageInf curMSQLPackgInf = session.curMSQLPackgInf; - int fieldCount = (int) proxyBuffer.getLenencInt(curMSQLPackgInf.startPos + MySQLPacket.packetHeaderSize); - - this.resultSetMeta = new ResultSetMeta(fieldCount); - this.fieldCount = fieldCount; - } - - @Override - void onRsColDef(MySQLSession session) { - ProxyBuffer proxyBuffer = session.proxyBuffer; - MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; - - int tmpReadIndex = proxyBuffer.readIndex; - int rowDataIndex = curMQLPackgInf.startPos+MySQLPacket.packetHeaderSize; - proxyBuffer.readIndex = rowDataIndex; - proxyBuffer.readLenencString(); //catalog - proxyBuffer.readLenencString(); //schema - proxyBuffer.readLenencString(); //table - proxyBuffer.readLenencString(); //orgTable - String name = proxyBuffer.readLenencString(); //name - proxyBuffer.readLenencString(); - proxyBuffer.readBytes(7); // 1(filler) + 2(charsetNumber) + 4 (length) - int fieldType = proxyBuffer.readByte() & 0xff; - this.resultSetMeta.addFiled(name, fieldType); - proxyBuffer.readIndex = tmpReadIndex; - if(resultSetMeta.getFiledCount() == resultSetMeta.getRealFieldNameListSize()) { - - } - - columToIndx.put(name, new ColumnMeta(getFieldCount++, fieldType)); - if (fieldCount == getFieldCount) { - merge.onRowMetaData(columToIndx, fieldCount); - } - } - - @Override - void onRsRow(MySQLSession session) { - ProxyBuffer proxyBuffer = session.proxyBuffer; - MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; - int rowDataIndex = curMQLPackgInf.startPos + MySQLPacket.packetHeaderSize; - int fieldCount = resultSetMeta.getFiledCount(); - int tmpReadIndex = proxyBuffer.readIndex; - proxyBuffer.readIndex = rowDataIndex; - ByteBuffer byteBuffer = ByteBuffer.allocate(proxyBuffer.getBuffer().capacity()); - if(merge!=null){ - for (int i = proxyBuffer.readIndex; i < proxyBuffer.writeIndex; i++) { - byteBuffer.put(proxyBuffer.getByte(i)); - }} - merge.onNewRecords(session.getDatabase(), byteBuffer); - proxyBuffer.readIndex = tmpReadIndex; - - } - - @Override - void onRsFinish(MySQLSession session, boolean success, String msg) throws IOException { - if (callBack != null) { - if (success == false) { - this.errPkg = new ErrorPacket(); - MySQLPackageInf curMQLPackgInf = session.curMSQLPackgInf; - session.proxyBuffer.readIndex = curMQLPackgInf.startPos; - this.errPkg.read(session.proxyBuffer); - session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); - revertPreBuffer(); - callBack.finished(session, this, success, this.errPkg); - } else { - session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); - revertPreBuffer(); - callBack.finished(session, null, success, null); - } - } - if(merge!=null){ - System.out.println("=>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.hahahahah"); - merge.rrs.countDown(session,()->{ - System.out.println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); - merge.onEOF(); - }); - } - logger.debug("session[{}] load result finish", session); - //@todo check - session.unbindMycatSession(); - } -} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java b/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java index e30bc60..70631de 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/RawSQLQueryResultTaskWrapper.java @@ -2,17 +2,30 @@ import io.mycat.mycat2.MySQLSession; import io.mycat.mycat2.beans.MySQLPackageInf; +import io.mycat.mycat2.console.SessionKeyEnum; import io.mycat.mysql.packet.MySQLPacket; +import io.mycat.mysql.packet.QueryPacket; import io.mycat.proxy.ProxyBuffer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.nio.ByteBuffer; -import java.util.ArrayList; +import java.io.IOException; public abstract class RawSQLQueryResultTaskWrapper extends BackendIOTaskWithResultSet { private static Logger logger = LoggerFactory.getLogger(RawSQLQueryResultTaskWrapper.class); + public void fetchSQL(QueryPacket queryPacket) throws IOException { + /*设置为忙*/ + session.getSessionAttrMap().put(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey(), false); + ProxyBuffer proxyBuf = session.proxyBuffer; + proxyBuf.reset(); + queryPacket.write(proxyBuf); + session.setCurNIOHandler(this); + proxyBuf.flip(); + proxyBuf.readIndex = proxyBuf.writeIndex; + this.session.writeToChannel(); + } + @Override void onRsColCount(MySQLSession session) { ProxyBuffer proxyBuffer = session.proxyBuffer; @@ -21,7 +34,7 @@ void onRsColCount(MySQLSession session) { } /** - * *
+     * * 
      * Bytes                      Name
      * -----                      ----
      * n (Length Coded String)    catalog
@@ -38,8 +51,8 @@ void onRsColCount(MySQLSession session) {
      * 1                          decimals
      * 2                          (filler), always 0x00
      * n (Length Coded Binary)    default
-     *
-     * @see http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Field_Packet
+     * 

+ * http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Field_Packet *

* * @param session @@ -90,8 +103,7 @@ void onRsRow(MySQLSession session) { int tmpReadIndex = proxyBuffer.readIndex; proxyBuffer.readIndex = rowDataIndex; - onRsRow(session,proxyBuffer); - + onRsRow(session, proxyBuffer); proxyBuffer.readIndex = tmpReadIndex; @@ -99,17 +111,72 @@ void onRsRow(MySQLSession session) { abstract void onRsColCount(MySQLSession session, int fieldCount); + /** + * the param's type is Object beacuse it may be byte[],String,ByteBuffer even null + * + * @param session + * @param catalog + * @param schema + * @param table + * @param orgTable + * @param name + * @param org_name + * @param filler + * @param charsetNumber + * @param length + * @param fieldType + */ abstract void onRsColDef(MySQLSession session, - String catalog, - String schema, - String table, - String orgTable, - String name, - String org_name, + Object catalog, + Object schema, + Object table, + Object orgTable, + Object name, + Object org_name, byte filler, int charsetNumber, int length, int fieldType); + /** + * read the row field by readLenencString or readLenencBytes + * + * @param session + * @param proxyBuffer + */ abstract void onRsRow(MySQLSession session, ProxyBuffer proxyBuffer); + + abstract void onError(MySQLSession session, String msg); + + abstract void onRsFinished(MySQLSession session); + + public void clearResouces() { + session.getSessionAttrMap().remove(SessionKeyEnum.SESSION_KEY_CONN_IDLE_FLAG.getKey()); + revertPreBuffer(); + session.setCurNIOHandler(null);// gc stream + } + + @Override + void onRsFinish(MySQLSession session, boolean success, String msg) throws IOException { + if (!success) { + onError(session, msg); + if (callBack != null) { + callBack.finished(session, this, success, msg); + } + } else { + onRsFinished(session); + if (callBack != null) + callBack.finished(session, null, success, msg); + } + } + + /** + * @param userSession + * @param normal + * @todo check need Override this function + */ + @Override + public void onSocketClosed(MySQLSession userSession, boolean normal) { + clearResouces(); + } } diff --git a/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java b/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java index 3ef0372..4b1aabc 100644 --- a/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java +++ b/source/src/main/java/io/mycat/mycat2/tasks/RowStream.java @@ -1,13 +1,5 @@ package io.mycat.mycat2.tasks; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.MySQLSession; import io.mycat.mycat2.beans.MySQLPackageInf; import io.mycat.mycat2.console.SessionKeyEnum; @@ -18,6 +10,13 @@ import io.mycat.mysql.packet.MySQLPacket; import io.mycat.mysql.packet.QueryPacket; import io.mycat.proxy.ProxyBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; public class RowStream extends BackendIOTaskWithResultSet { private static Logger logger = LoggerFactory.getLogger(RowStream.class); @@ -97,7 +96,7 @@ void onRsColDef(MySQLSession session) { proxyBuffer.readLenencString(); proxyBuffer.readBytes(7); // 1(filler) + 2(charsetNumber) + 4 (length) int fieldType = proxyBuffer.readByte() & 0xff; - this.resultSetMeta.addFiled(name, fieldType); + this.resultSetMeta.addField(name, fieldType); proxyBuffer.readIndex = tmpReadIndex; if(resultSetMeta.getFiledCount() == resultSetMeta.getRealFieldNameListSize()) { this.onHeader.apply(resultSetMeta); diff --git a/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java b/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java deleted file mode 100644 index 8fbdbf1..0000000 --- a/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryResultTask.java +++ /dev/null @@ -1,80 +0,0 @@ -package io.mycat.mycat2.tasks; - -import io.mycat.mycat2.ColumnMeta; -import io.mycat.mycat2.MySQLSession; -import io.mycat.mycat2.MycatSession; -import io.mycat.mycat2.console.SessionKeyEnum; -import io.mycat.mycat2.hbt.TableMeta; -import io.mycat.mycat2.net.DefaultMycatSessionHandler; -import io.mycat.proxy.ProxyBuffer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class SQLQueryResultTask extends RawSQLQueryResultTaskWrapper { - private static Logger logger = LoggerFactory.getLogger(SQLQueryResultTask.class); - AbstractDataNodeMerge merge; - int fieldCount = 0; - int getFieldCount = 0; - Map columToIndx = new HashMap<>(); - - public SQLQueryResultTask(AbstractDataNodeMerge merge) { - this.merge = merge; - } - - @Override - void onRsColCount(MySQLSession session, int fieldCount) { - this.fieldCount = fieldCount; - } - - @Override - void onRsColDef(MySQLSession session, String catalog, String schema, String table, String orgTable, String name, String originName, byte filler, int charsetNumber, int length, int fieldType) { - columToIndx.put(name, new ColumnMeta(getFieldCount++, fieldType)); - if (fieldCount == getFieldCount) { - merge.onRowMetaData(columToIndx, fieldCount); - } - } - - @Override - void onRsRow(MySQLSession session, ProxyBuffer proxyBuffer) { -// ArrayList row = new ArrayList(3); -// for(int i = 0; i < 3; i++) { -// byte[] x = proxyBuffer.readLenencBytes(); -// ByteBuffer byteBuffer =ByteBuffer.allocate(proxyBuffer.getBuffer().position());; -// byteBuffer.put(x); -// //byteBuffer.flip(); -// merge.onNewRecords("",byteBuffer); -// row.add(x); -// } - - ByteBuffer byteBuffer = ByteBuffer.allocate(proxyBuffer.getBuffer().capacity()); - for (int i = proxyBuffer.readIndex; i < proxyBuffer.writeIndex; i++) { - byteBuffer.put(proxyBuffer.getByte(i)); - } - merge.onNewRecords("", byteBuffer); - - - -// for(int i = 0; i < 3; i++) { -// System.out.println(row); -// byteBuffer.put(row.get(i)); -// -// } - -} - - - @Override - void onRsFinish(MySQLSession session, boolean success, String msg) throws IOException { - merge.onEOF(); - System.out.println(msg); - session.setCurNIOHandler(DefaultMycatSessionHandler.INSTANCE); - } -} diff --git a/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryStream.java b/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryStream.java new file mode 100644 index 0000000..d113f7e --- /dev/null +++ b/source/src/main/java/io/mycat/mycat2/tasks/SQLQueryStream.java @@ -0,0 +1,87 @@ +package io.mycat.mycat2.tasks; + +import io.mycat.mycat2.MySQLSession; +import io.mycat.mycat2.beans.ColumnMeta; +import io.mycat.mysql.packet.QueryPacket; +import io.mycat.proxy.ProxyBuffer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/** + * @author jamie + * @since 2.0 + */ +public class SQLQueryStream extends RawSQLQueryResultTaskWrapper { + private static Logger logger = LoggerFactory.getLogger(SQLQueryStream.class); + final String dataNode; + final private DataNodeManager merge; + final private Map columToIndx = new HashMap<>(); + int fieldCount = 0; + int getFieldCount = 0; + + public SQLQueryStream(String dataNode, MySQLSession optSession, DataNodeManager merge) { + this.useNewBuffer = true; + setSession(optSession, true, false); + this.session = optSession; + this.merge = merge; + this.dataNode = dataNode; + } + + + public void fetchSQL(String sql) throws IOException { + QueryPacket queryPacket = new QueryPacket(); + queryPacket.packetId = 0; + queryPacket.sql = sql; + fetchSQL(queryPacket); + } + + @Override + void onRsColCount(MySQLSession session, int fieldCount) { + this.fieldCount = fieldCount; + } + + @Override + void onRsColDef(MySQLSession session, Object catalog, Object schema, Object table, Object orgTable, Object name, Object org_name, byte filler, int charsetNumber, int length, int fieldType) { + columToIndx.put((String) name, new ColumnMeta(getFieldCount++, fieldType)); + if (fieldCount == getFieldCount) { + merge.onRowMetaData(this.dataNode, columToIndx, fieldCount); + } + } + + @Override + void onRsRow(MySQLSession session, ProxyBuffer proxyBuffer) { + ByteBuffer byteBuffer = session.bufPool.allocate(proxyBuffer.writeIndex - proxyBuffer.readIndex); + ByteBuffer data = proxyBuffer.getBuffer(); + data.position(proxyBuffer.readIndex); + data.limit(proxyBuffer.writeIndex); + byteBuffer.put(data);//copy with internal methods + merge.onNewRecords(this.dataNode, byteBuffer); + } + + @Override + void onError(MySQLSession session, String msg) { + if (logger.isDebugEnabled()) { + logger.debug("mysql session:{} is error", session.toString()); + } + merge.onError(this.dataNode, msg); + } + + @Override + void onRsFinished(MySQLSession session) { + merge.routeResultset.countDown(session, () -> { + if (logger.isDebugEnabled()) { + logger.debug("mysql session:{} is last finished", session.toString()); + } + merge.onEOF(this.dataNode); + }); + } + + public DataNodeManager getMerge() { + return merge; + } +} diff --git a/source/src/main/java/io/mycat/proxy/AbstractSession.java b/source/src/main/java/io/mycat/proxy/AbstractSession.java index c96677a..038f1cd 100644 --- a/source/src/main/java/io/mycat/proxy/AbstractSession.java +++ b/source/src/main/java/io/mycat/proxy/AbstractSession.java @@ -1,5 +1,10 @@ package io.mycat.proxy; +import io.mycat.mycat2.MycatSession; +import io.mycat.proxy.buffer.BufferPool; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -10,12 +15,6 @@ import java.util.HashMap; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import io.mycat.mycat2.MycatSession; -import io.mycat.proxy.buffer.BufferPool; - /** * 会话,代表一个前端连接 * @@ -136,7 +135,6 @@ public boolean readFromChannel() throws IOException { // logger.debug(" readed {} total bytes curChannel is {}", readed,this); if (readed == -1) { logger.warn("Read EOF ,socket closed "); - System.out.println("==============================>"+this); throw new ClosedChannelException(); } else if (readed == 0) { logger.warn("readed zero bytes ,Maybe a bug ,please fix it !!!!"); diff --git a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java index 4a84c19..32cad71 100644 --- a/source/src/main/java/io/mycat/proxy/MycatReactorThread.java +++ b/source/src/main/java/io/mycat/proxy/MycatReactorThread.java @@ -1,17 +1,5 @@ package io.mycat.proxy; -import java.io.IOException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Stream; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import io.mycat.mycat2.MySQLSession; import io.mycat.mycat2.MycatSession; import io.mycat.mycat2.beans.MySQLMetaBean; @@ -24,6 +12,13 @@ import io.mycat.mysql.packet.ErrorPacket; import io.mycat.proxy.buffer.BufferPool; import io.mycat.util.ErrorCode; +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Stream; /** * mycat 多个Session会话 @@ -65,7 +60,7 @@ private int getUsingBackendConCounts(MySQLMetaBean mySQLMetaBean) { return allSessions.stream() // .filter(session -> session instanceof MycatSession) .map(session -> { - MycatSession mycatSession = (MycatSession) session; + MycatSession mycatSession = session; return mycatSession.getBackendConCounts(mySQLMetaBean); }) .mapToInt(Integer::intValue) @@ -188,8 +183,10 @@ public void getMysqlSession(MySQLMetaBean mySQLMetaBean,AsynTaskCallBack Date: Tue, 17 Apr 2018 16:56:06 +0800 Subject: [PATCH 22/22] not fixed sql in DbInMutilServerCmd --- .../mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java index 4a13682..a047085 100644 --- a/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java +++ b/source/src/main/java/io/mycat/mycat2/cmds/multinode/DbInMultiServerCmd.java @@ -70,11 +70,12 @@ private static void broadcast(MycatSession mycatSession, RouteResultsetNode[] no public boolean procssSQL(MycatSession mycatSession) throws IOException { DNBean dnBean = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn1"); DNBean dnBean2 = ProxyRuntime.INSTANCE.getConfig().getDNBean("dn2"); - RouteResultset curRouteResultset = new RouteResultset("select * from travelrecord", (byte) 0); + String sql = mycatSession.sqlContext.getRealSQL(0); + RouteResultset curRouteResultset = new RouteResultset(sql, (byte) 0); curRouteResultset.setNodes( new RouteResultsetNode[]{ - new RouteResultsetNode(dnBean.getName(), (byte) 1, "select * from travelrecord"), - new RouteResultsetNode(dnBean2.getName(), (byte) 1, "select * from travelrecord")}); + new RouteResultsetNode(dnBean.getName(), (byte) 1, sql), + new RouteResultsetNode(dnBean2.getName(), (byte) 1, sql)}); mycatSession.setCurRouteResultset(curRouteResultset); mycatSession.merge = new HeapDataNodeMergeManager(mycatSession.getCurRouteResultset(), mycatSession); RouteResultsetNode[] nodes = mycatSession.merge.getRouteResultset().getNodes(); @@ -88,7 +89,7 @@ public boolean procssSQL(MycatSession mycatSession) throws IOException { @Override public boolean onFrontWriteFinished(MycatSession session) throws IOException { - if (session.merge.isMultiBackendMoreOne()) { + if (session.merge != null && session.merge.isMultiBackendMoreOne()) { //@todo 改为迭代器实现 TableMeta tableMeta = ((HeapDataNodeMergeManager) session.merge).getTableMeta(); if (session.getSessionAttrMap().containsKey(SessionKeyEnum.SESSION_KEY_MERGE_OVER_FLAG.getKey()) && !tableMeta.isWriteFinish()) {