diff --git a/conf/dnindex.properties b/conf/dnindex.properties index 11d8953b9..4aa093275 100644 --- a/conf/dnindex.properties +++ b/conf/dnindex.properties @@ -1,5 +1,6 @@ #update -#Mon Apr 24 16:41:54 CST 2017 +#Mon Oct 09 12:58:57 CST 2017 +test_db=0 dh1=0 jdbchost=0 dataHost2=0 diff --git a/src/main/java/io/mycat/config/Versions.java b/src/main/java/io/mycat/config/Versions.java index 4d7215da4..e53d3d26f 100644 --- a/src/main/java/io/mycat/config/Versions.java +++ b/src/main/java/io/mycat/config/Versions.java @@ -28,11 +28,15 @@ */ public abstract class Versions { - /**协议版本**/ + /** + * 协议版本 + **/ public static final byte PROTOCOL_VERSION = 10; - /**服务器版本**/ - public static byte[] SERVER_VERSION = "5.6.29-mycat-1.6.5-BETA-20170424174212".getBytes(); + /** + * 服务器版本 + **/ + public static byte[] SERVER_VERSION = "5.6.29-mycat-1.6.5-release-20170930155225".getBytes(); public static void setServerVersion(String version) { byte[] mysqlVersionPart = version.getBytes(); @@ -42,7 +46,7 @@ public static void setServerVersion(String version) { break; } - // 生成mycat version信息 + // 重新拼接mycat version字节数组 byte[] newMycatVersion = new byte[mysqlVersionPart.length + SERVER_VERSION.length - startIndex]; System.arraycopy(mysqlVersionPart, 0, newMycatVersion, 0, mysqlVersionPart.length); System.arraycopy(SERVER_VERSION, startIndex, newMycatVersion, mysqlVersionPart.length, diff --git a/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java b/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java index 65fda5329..c0ecacae4 100644 --- a/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java +++ b/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java @@ -23,798 +23,793 @@ */ package io.mycat.config.loader.xml; -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.text.SimpleDateFormat; -import java.util.*; - +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.model.*; import io.mycat.config.model.rule.RuleConfig; +import io.mycat.config.model.rule.TableRuleConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.config.util.ConfigUtil; +import io.mycat.route.function.AbstractPartitionAlgorithm; import io.mycat.route.function.TableRuleAware; +import io.mycat.util.DecryptUtil; import io.mycat.util.ObjectUtil; +import io.mycat.util.SplitUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; -import io.mycat.backend.datasource.PhysicalDBPool; -import io.mycat.config.loader.SchemaLoader; -import io.mycat.config.model.DBHostConfig; -import io.mycat.config.model.DataHostConfig; -import io.mycat.config.model.DataNodeConfig; -import io.mycat.config.model.SchemaConfig; -import io.mycat.config.model.TableConfig; -import io.mycat.config.model.TableConfigMap; -import io.mycat.config.model.rule.TableRuleConfig; -import io.mycat.config.util.ConfigException; -import io.mycat.config.util.ConfigUtil; -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.util.DecryptUtil; -import io.mycat.util.SplitUtil; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.text.SimpleDateFormat; +import java.util.*; /** * @author mycat */ @SuppressWarnings("unchecked") public class XMLSchemaLoader implements SchemaLoader { - - private static final Logger LOGGER = LoggerFactory.getLogger(XMLSchemaLoader.class); - - private final static String DEFAULT_DTD = "/schema.dtd"; - private final static String DEFAULT_XML = "/schema.xml"; - - private final Map tableRules; - private final Map dataHosts; - private final Map dataNodes; - private final Map schemas; - - public XMLSchemaLoader(String schemaFile, String ruleFile) { - //先读取rule.xml - XMLRuleLoader ruleLoader = new XMLRuleLoader(ruleFile); - //将tableRules拿出,用于这里加载Schema做rule有效判断,以及之后的分片路由计算 - this.tableRules = ruleLoader.getTableRules(); - //释放ruleLoader - ruleLoader = null; - this.dataHosts = new HashMap(); - this.dataNodes = new HashMap(); - this.schemas = new HashMap(); - //读取加载schema配置 - this.load(DEFAULT_DTD, schemaFile == null ? DEFAULT_XML : schemaFile); - } - - public XMLSchemaLoader() { - this(null, null); - } - - @Override - public Map getTableRules() { - return tableRules; - } - - @Override - public Map getDataHosts() { - return (Map) (dataHosts.isEmpty() ? Collections.emptyMap() : dataHosts); - } - - @Override - public Map getDataNodes() { - return (Map) (dataNodes.isEmpty() ? Collections.emptyMap() : dataNodes); - } - - @Override - public Map getSchemas() { - return (Map) (schemas.isEmpty() ? Collections.emptyMap() : schemas); - } - - private void load(String dtdFile, String xmlFile) { - InputStream dtd = null; - InputStream xml = null; - try { - dtd = XMLSchemaLoader.class.getResourceAsStream(dtdFile); - xml = XMLSchemaLoader.class.getResourceAsStream(xmlFile); - Element root = ConfigUtil.getDocument(dtd, xml).getDocumentElement(); - //先加载所有的DataHost - loadDataHosts(root); - //再加载所有的DataNode - loadDataNodes(root); - //最后加载所有的Schema - loadSchemas(root); - } catch (ConfigException e) { - throw e; - } catch (Exception e) { - throw new ConfigException(e); - } finally { - - if (dtd != null) { - try { - dtd.close(); - } catch (IOException e) { - } - } - - if (xml != null) { - try { - xml.close(); - } catch (IOException e) { - } - } - } - } - - private void loadSchemas(Element root) { - NodeList list = root.getElementsByTagName("schema"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Element schemaElement = (Element) list.item(i); - //读取各个属性 - String name = schemaElement.getAttribute("name"); - String dataNode = schemaElement.getAttribute("dataNode"); - String checkSQLSchemaStr = schemaElement.getAttribute("checkSQLschema"); - String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit"); - int sqlMaxLimit = -1; - //读取sql返回结果集限制 - if (sqlMaxLimitStr != null && !sqlMaxLimitStr.isEmpty()) { - sqlMaxLimit = Integer.parseInt(sqlMaxLimitStr); - } - - // check dataNode already exists or not,看schema标签中是否有datanode - String defaultDbType = null; - //校验检查并添加dataNode - if (dataNode != null && !dataNode.isEmpty()) { - List dataNodeLst = new ArrayList(1); - dataNodeLst.add(dataNode); - checkDataNodeExists(dataNodeLst); - String dataHost = dataNodes.get(dataNode).getDataHost(); - defaultDbType = dataHosts.get(dataHost).getDbType(); - } else { - dataNode = null; - } - //加载schema下所有tables - Map tables = loadTables(schemaElement); - //判断schema是否重复 - if (schemas.containsKey(name)) { - throw new ConfigException("schema " + name + " duplicated!"); - } - - // 设置了table的不需要设置dataNode属性,没有设置table的必须设置dataNode属性 - if (dataNode == null && tables.size() == 0) { - throw new ConfigException( - "schema " + name + " didn't config tables,so you must set dataNode property!"); - } - - SchemaConfig schemaConfig = new SchemaConfig(name, dataNode, - tables, sqlMaxLimit, "true".equalsIgnoreCase(checkSQLSchemaStr)); - - //设定DB类型,这对之后的sql语句路由解析有帮助 - if (defaultDbType != null) { - schemaConfig.setDefaultDataNodeDbType(defaultDbType); - if (!"mysql".equalsIgnoreCase(defaultDbType)) { - schemaConfig.setNeedSupportMultiDBType(true); - } - } - - // 判断是否有不是mysql的数据库类型,方便解析判断是否启用多数据库分页语法解析 - for (TableConfig tableConfig : tables.values()) { - if (isHasMultiDbType(tableConfig)) { - schemaConfig.setNeedSupportMultiDBType(true); - break; - } - } - //记录每种dataNode的DB类型 - Map dataNodeDbTypeMap = new HashMap<>(); - for (String dataNodeName : dataNodes.keySet()) { - DataNodeConfig dataNodeConfig = dataNodes.get(dataNodeName); - String dataHost = dataNodeConfig.getDataHost(); - DataHostConfig dataHostConfig = dataHosts.get(dataHost); - if (dataHostConfig != null) { - String dbType = dataHostConfig.getDbType(); - dataNodeDbTypeMap.put(dataNodeName, dbType); - } - } - schemaConfig.setDataNodeDbTypeMap(dataNodeDbTypeMap); - schemas.put(name, schemaConfig); - } - } - - - /** - * 处理动态日期表, 支持 YYYYMM、YYYYMMDD 两种格式 - * - * YYYYMM格式: yyyymm,2015,01,60 - * YYYYMMDD格式: yyyymmdd,2015,01,10,50 - * - * @param tableNameElement - * @param tableNameSuffixElement - * @return - */ - private String doTableNameSuffix(String tableNameElement, String tableNameSuffixElement) { - - String newTableName = tableNameElement; - - String[] params = tableNameSuffixElement.split(","); - String suffixFormat = params[0].toUpperCase(); - if ( suffixFormat.equals("YYYYMM") ) { - - //读取参数 - int yyyy = Integer.parseInt( params[1] ); - int mm = Integer.parseInt( params[2] ); - int mmEndIdx = Integer.parseInt( params[3] ); - - //日期处理 - SimpleDateFormat yyyyMMSDF = new SimpleDateFormat("yyyyMM"); - - Calendar cal = Calendar.getInstance(); - cal.set(Calendar.YEAR, yyyy ); - cal.set(Calendar.MONTH, mm - 1 ); - cal.set(Calendar.DATE, 0 ); - - //表名改写 - StringBuffer tableNameBuffer = new StringBuffer(); - for(int mmIdx = 0; mmIdx <= mmEndIdx; mmIdx++) { - tableNameBuffer.append( tableNameElement ); - tableNameBuffer.append( yyyyMMSDF.format(cal.getTime()) ); - cal.add(Calendar.MONTH, 1); - - if ( mmIdx != mmEndIdx) { - tableNameBuffer.append(","); - } - } - newTableName = tableNameBuffer.toString(); - - } else if ( suffixFormat.equals("YYYYMMDD") ) { - - //读取参数 - int yyyy = Integer.parseInt( params[1] ); - int mm = Integer.parseInt( params[2] ); - int dd = Integer.parseInt( params[3] ); - int ddEndIdx = Integer.parseInt( params[4] ); - - //日期处理 - SimpleDateFormat yyyyMMddSDF = new SimpleDateFormat("yyyyMMdd"); - - Calendar cal = Calendar.getInstance(); - cal.set(Calendar.YEAR, yyyy ); - cal.set(Calendar.MONTH, mm - 1 ); - cal.set(Calendar.DATE, dd ); - - //表名改写 - StringBuffer tableNameBuffer = new StringBuffer(); - for(int ddIdx = 0; ddIdx <= ddEndIdx; ddIdx++) { - tableNameBuffer.append( tableNameElement ); - tableNameBuffer.append( yyyyMMddSDF.format(cal.getTime()) ); - - cal.add(Calendar.DATE, 1); - - if ( ddIdx != ddEndIdx) { - tableNameBuffer.append(","); - } - } - newTableName = tableNameBuffer.toString(); - } - return newTableName; - } - - - private Map loadTables(Element node) { - - // Map tables = new HashMap(); - - // 支持表名中包含引号[`] BEN GONG - Map tables = new TableConfigMap(); - NodeList nodeList = node.getElementsByTagName("table"); - for (int i = 0; i < nodeList.getLength(); i++) { - Element tableElement = (Element) nodeList.item(i); - String tableNameElement = tableElement.getAttribute("name").toUpperCase(); - - //TODO:路由, 增加对动态日期表的支持 - String tableNameSuffixElement = tableElement.getAttribute("nameSuffix").toUpperCase(); - if ( !"".equals( tableNameSuffixElement ) ) { - - if( tableNameElement.split(",").length > 1 ) { - throw new ConfigException("nameSuffix " + tableNameSuffixElement + ", require name parameter cannot multiple breaks!"); - } - //前缀用来标明日期格式 - tableNameElement = doTableNameSuffix(tableNameElement, tableNameSuffixElement); - } - //记录主键,用于之后路由分析,以及启用自增长主键 - String[] tableNames = tableNameElement.split(","); - String primaryKey = tableElement.hasAttribute("primaryKey") ? tableElement.getAttribute("primaryKey").toUpperCase() : null; - //记录是否主键自增,默认不是,(启用全局sequence handler) - boolean autoIncrement = false; - if (tableElement.hasAttribute("autoIncrement")) { - autoIncrement = Boolean.parseBoolean(tableElement.getAttribute("autoIncrement")); - } - //记录是否需要加返回结果集限制,默认需要加 - boolean needAddLimit = true; - if (tableElement.hasAttribute("needAddLimit")) { - needAddLimit = Boolean.parseBoolean(tableElement.getAttribute("needAddLimit")); - } - //记录type,是否为global - String tableTypeStr = tableElement.hasAttribute("type") ? tableElement.getAttribute("type") : null; - int tableType = TableConfig.TYPE_GLOBAL_DEFAULT; - if ("global".equalsIgnoreCase(tableTypeStr)) { - tableType = TableConfig.TYPE_GLOBAL_TABLE; - } - //记录dataNode,就是分布在哪些dataNode上 - String dataNode = tableElement.getAttribute("dataNode"); - TableRuleConfig tableRule = null; - if (tableElement.hasAttribute("rule")) { - String ruleName = tableElement.getAttribute("rule"); - tableRule = tableRules.get(ruleName); - if (tableRule == null) { - throw new ConfigException("rule " + ruleName + " is not found!"); - } - } - - boolean ruleRequired = false; - //记录是否绑定有分片规则 - if (tableElement.hasAttribute("ruleRequired")) { - ruleRequired = Boolean.parseBoolean(tableElement.getAttribute("ruleRequired")); - } - - if (tableNames == null) { - throw new ConfigException("table name is not found!"); - } - //distribute函数,重新编排dataNode - String distPrex = "distribute("; - boolean distTableDns = dataNode.startsWith(distPrex); - if (distTableDns) { - dataNode = dataNode.substring(distPrex.length(), dataNode.length() - 1); - } - //分表功能 - String subTables = tableElement.getAttribute("subTables"); - - for (int j = 0; j < tableNames.length; j++) { - - String tableName = tableNames[j]; - TableRuleConfig tableRuleConfig=tableRule ; - if(tableRuleConfig!=null) { - //对于实现TableRuleAware的function进行特殊处理 根据每个表新建个实例 - RuleConfig rule= tableRuleConfig.getRule(); - if(rule.getRuleAlgorithm() instanceof TableRuleAware) { - tableRuleConfig = (TableRuleConfig) ObjectUtil.copyObject(tableRuleConfig); - tableRules.remove(tableRuleConfig.getName()) ; - String newRuleName = tableRuleConfig.getName() + "_" + tableName; - tableRuleConfig. setName(newRuleName); - TableRuleAware tableRuleAware= (TableRuleAware) tableRuleConfig.getRule().getRuleAlgorithm(); - tableRuleAware.setRuleName(newRuleName); - tableRuleAware.setTableName(tableName); - tableRuleConfig.getRule().getRuleAlgorithm().init(); - tableRules.put(newRuleName,tableRuleConfig); - } - } - - TableConfig table = new TableConfig(tableName, primaryKey, - autoIncrement, needAddLimit, tableType, dataNode, - getDbType(dataNode), - (tableRuleConfig != null) ? tableRuleConfig.getRule() : null, - ruleRequired, null, false, null, null,subTables); - - checkDataNodeExists(table.getDataNodes()); - // 检查分片表分片规则配置是否合法 - if(table.getRule() != null) { - checkRuleSuitTable(table); - } - - if (distTableDns) { - distributeDataNodes(table.getDataNodes()); - } - //检查去重 - if (tables.containsKey(table.getName())) { - throw new ConfigException("table " + tableName + " duplicated!"); - } - //放入map - tables.put(table.getName(), table); - } - //只有tableName配置的是单个表(没有逗号)的时候才能有子表 - if (tableNames.length == 1) { - TableConfig table = tables.get(tableNames[0]); - // process child tables - processChildTables(tables, table, dataNode, tableElement); - } - } - return tables; - } - - /** - * distribute datanodes in multi hosts,means ,dn1 (host1),dn100 - * (host2),dn300(host3),dn2(host1),dn101(host2),dn301(host3)...etc - * 将每个host上的datanode按照host重新排列。比如上面的例子host1拥有dn1,dn2,host2拥有dn100,dn101,host3拥有dn300,dn301, - * 按照host重新排列: 0->dn1 (host1),1->dn100(host2),2->dn300(host3),3->dn2(host1),4->dn101(host2),5->dn301(host3) - * - * @param theDataNodes - */ - private void distributeDataNodes(ArrayList theDataNodes) { - Map> newDataNodeMap = new HashMap>(dataHosts.size()); - for (String dn : theDataNodes) { - DataNodeConfig dnConf = dataNodes.get(dn); - String host = dnConf.getDataHost(); - ArrayList hostDns = newDataNodeMap.get(host); - hostDns = (hostDns == null) ? new ArrayList() : hostDns; - hostDns.add(dn); - newDataNodeMap.put(host, hostDns); - } - - ArrayList result = new ArrayList(theDataNodes.size()); - boolean hasData = true; - while (hasData) { - hasData = false; - for (ArrayList dns : newDataNodeMap.values()) { - if (!dns.isEmpty()) { - result.add(dns.remove(0)); - hasData = true; - } - } - } - theDataNodes.clear(); - theDataNodes.addAll(result); - } - - private Set getDbType(String dataNode) { - Set dbTypes = new HashSet<>(); - String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); - for (String node : dataNodeArr) { - DataNodeConfig datanode = dataNodes.get(node); - DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); - dbTypes.add(datahost.getDbType()); - } - - return dbTypes; - } - - private Set getDataNodeDbTypeMap(String dataNode) { - Set dbTypes = new HashSet<>(); - String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); - for (String node : dataNodeArr) { - DataNodeConfig datanode = dataNodes.get(node); - DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); - dbTypes.add(datahost.getDbType()); - } - return dbTypes; - } - - private boolean isHasMultiDbType(TableConfig table) { - Set dbTypes = table.getDbTypes(); - for (String dbType : dbTypes) { - if (!"mysql".equalsIgnoreCase(dbType)) { - return true; - } - } - return false; - } - - private void processChildTables(Map tables, - TableConfig parentTable, String dataNodes, Element tableNode) { - - // parse child tables - NodeList childNodeList = tableNode.getChildNodes(); - for (int j = 0; j < childNodeList.getLength(); j++) { - Node theNode = childNodeList.item(j); - if (!theNode.getNodeName().equals("childTable")) { - continue; - } - Element childTbElement = (Element) theNode; - //读取子表信息 - String cdTbName = childTbElement.getAttribute("name").toUpperCase(); - String primaryKey = childTbElement.hasAttribute("primaryKey") ? childTbElement.getAttribute("primaryKey").toUpperCase() : null; - - boolean autoIncrement = false; - if (childTbElement.hasAttribute("autoIncrement")) { - autoIncrement = Boolean.parseBoolean(childTbElement.getAttribute("autoIncrement")); - } - boolean needAddLimit = true; - if (childTbElement.hasAttribute("needAddLimit")) { - needAddLimit = Boolean.parseBoolean(childTbElement.getAttribute("needAddLimit")); - } - String subTables = childTbElement.getAttribute("subTables"); - //子表join键,和对应的parent的键,父子表通过这个关联 - String joinKey = childTbElement.getAttribute("joinKey").toUpperCase(); - String parentKey = childTbElement.getAttribute("parentKey").toUpperCase(); - TableConfig table = new TableConfig(cdTbName, primaryKey, - autoIncrement, needAddLimit, - TableConfig.TYPE_GLOBAL_DEFAULT, dataNodes, - getDbType(dataNodes), null, false, parentTable, true, - joinKey, parentKey, subTables); - - if (tables.containsKey(table.getName())) { - throw new ConfigException("table " + table.getName() + " duplicated!"); - } - tables.put(table.getName(), table); - //对于子表的子表,递归处理 - processChildTables(tables, table, dataNodes, childTbElement); - } - } - - private void checkDataNodeExists(Collection nodes) { - if (nodes == null || nodes.size() < 1) { - return; - } - for (String node : nodes) { - if (!dataNodes.containsKey(node)) { - throw new ConfigException("dataNode '" + node + "' is not found!"); - } - } - } - - /** - * 检查分片表分片规则配置, 目前主要检查分片表分片算法定义与分片dataNode是否匹配
- * 例如分片表定义如下:
- * {@code - * - * } - *
- * 分片算法如下:
- * {@code - * - - 3 - - * } - *
- * shard table datanode(2) < function count(3) 此时检测为不匹配 - */ - private void checkRuleSuitTable(TableConfig tableConf) { - AbstractPartitionAlgorithm function = tableConf.getRule().getRuleAlgorithm(); - int suitValue = function.suitableFor(tableConf); - switch(suitValue) { - case -1: - // 少节点,给提示并抛异常 - throw new ConfigException("Illegal table conf : table [ " + tableConf.getName() + " ] rule function [ " - + tableConf.getRule().getFunctionName() + " ] partition size : " + tableConf.getRule().getRuleAlgorithm().getPartitionNum() + " > table datanode size : " - + tableConf.getDataNodes().size() + ", please make sure table datanode size = function partition size"); - case 0: - // table datanode size == rule function partition size - break; - case 1: - // 有些节点是多余的,给出warn log - LOGGER.warn("table conf : table [ {} ] rule function [ {} ] partition size : {} < table datanode size : {} , this cause some datanode to be redundant", - new String[]{ - tableConf.getName(), - tableConf.getRule().getFunctionName(), - String.valueOf(tableConf.getRule().getRuleAlgorithm().getPartitionNum()), - String.valueOf(tableConf.getDataNodes().size()) - }); - break; - } - } - - private void loadDataNodes(Element root) { - //读取DataNode分支 - NodeList list = root.getElementsByTagName("dataNode"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Element element = (Element) list.item(i); - String dnNamePre = element.getAttribute("name"); - - String databaseStr = element.getAttribute("database"); - String host = element.getAttribute("dataHost"); - //字符串不为空 - if (empty(dnNamePre) || empty(databaseStr) || empty(host)) { - throw new ConfigException("dataNode " + dnNamePre + " define error ,attribute can't be empty"); - } - //dnNames(name),databases(database),hostStrings(dataHost)都可以配置多个,以',', '$', '-'区分,但是需要保证database的个数*dataHost的个数=name的个数 - //多个dataHost与多个database如果写在一个标签,则每个dataHost拥有所有database - //例如: - //则为:localhost1拥有dn1$0-75,localhost2也拥有dn1$0-75(对应db$76-151) - String[] dnNames = io.mycat.util.SplitUtil.split(dnNamePre, ',', '$', '-'); - String[] databases = io.mycat.util.SplitUtil.split(databaseStr, ',', '$', '-'); - String[] hostStrings = io.mycat.util.SplitUtil.split(host, ',', '$', '-'); - - if (dnNames.length > 1 && dnNames.length != databases.length * hostStrings.length) { - throw new ConfigException("dataNode " + dnNamePre - + " define error ,dnNames.length must be=databases.length*hostStrings.length"); - } - if (dnNames.length > 1) { - - List mhdList = mergerHostDatabase(hostStrings, databases); - for (int k = 0; k < dnNames.length; k++) { - String[] hd = mhdList.get(k); - String dnName = dnNames[k]; - String databaseName = hd[1]; - String hostName = hd[0]; - createDataNode(dnName, databaseName, hostName); - } - - } else { - createDataNode(dnNamePre, databaseStr, host); - } - - } - } - - /** - * 匹配DataHost和Database,每个DataHost拥有每个Database名字 - * @param hostStrings - * @param databases + + private static final Logger LOGGER = LoggerFactory.getLogger(XMLSchemaLoader.class); + + private final static String DEFAULT_DTD = "/schema.dtd"; + private final static String DEFAULT_XML = "/schema.xml"; + + private final Map tableRules; + private final Map dataHosts; + private final Map dataNodes; + private final Map schemas; + + public XMLSchemaLoader(String schemaFile, String ruleFile) { + //先读取rule.xml + XMLRuleLoader ruleLoader = new XMLRuleLoader(ruleFile); + //将tableRules拿出,用于这里加载Schema做rule有效判断,以及之后的分片路由计算 + this.tableRules = ruleLoader.getTableRules(); + //释放ruleLoader + ruleLoader = null; + this.dataHosts = new HashMap(); + this.dataNodes = new HashMap(); + this.schemas = new HashMap(); + //读取加载schema配置 + this.load(DEFAULT_DTD, schemaFile == null ? DEFAULT_XML : schemaFile); + } + + public XMLSchemaLoader() { + this(null, null); + } + + @Override + public Map getTableRules() { + return tableRules; + } + + @Override + public Map getDataHosts() { + return (Map) (dataHosts.isEmpty() ? Collections.emptyMap() : dataHosts); + } + + @Override + public Map getDataNodes() { + return (Map) (dataNodes.isEmpty() ? Collections.emptyMap() : dataNodes); + } + + @Override + public Map getSchemas() { + return (Map) (schemas.isEmpty() ? Collections.emptyMap() : schemas); + } + + private void load(String dtdFile, String xmlFile) { + InputStream dtd = null; + InputStream xml = null; + try { + dtd = XMLSchemaLoader.class.getResourceAsStream(dtdFile); + xml = XMLSchemaLoader.class.getResourceAsStream(xmlFile); + Element root = ConfigUtil.getDocument(dtd, xml).getDocumentElement(); + //先加载所有的DataHost + loadDataHosts(root); + //再加载所有的DataNode + loadDataNodes(root); + //最后加载所有的Schema + loadSchemas(root); + } catch (ConfigException e) { + throw e; + } catch (Exception e) { + throw new ConfigException(e); + } finally { + + if (dtd != null) { + try { + dtd.close(); + } catch (IOException e) { + } + } + + if (xml != null) { + try { + xml.close(); + } catch (IOException e) { + } + } + } + } + + private void loadSchemas(Element root) { + NodeList list = root.getElementsByTagName("schema"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Element schemaElement = (Element) list.item(i); + //读取各个属性 + String name = schemaElement.getAttribute("name"); + String dataNode = schemaElement.getAttribute("dataNode"); + String checkSQLSchemaStr = schemaElement.getAttribute("checkSQLschema"); + String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit"); + int sqlMaxLimit = -1; + //读取sql返回结果集限制 + if (sqlMaxLimitStr != null && !sqlMaxLimitStr.isEmpty()) { + sqlMaxLimit = Integer.parseInt(sqlMaxLimitStr); + } + + // check dataNode already exists or not,看schema标签中是否有datanode + String defaultDbType = null; + //校验检查并添加dataNode + if (dataNode != null && !dataNode.isEmpty()) { + List dataNodeLst = new ArrayList(1); + dataNodeLst.add(dataNode); + checkDataNodeExists(dataNodeLst); + String dataHost = dataNodes.get(dataNode).getDataHost(); + defaultDbType = dataHosts.get(dataHost).getDbType(); + } else { + dataNode = null; + } + //加载schema下所有tables + Map tables = loadTables(schemaElement); + //判断schema是否重复 + if (schemas.containsKey(name)) { + throw new ConfigException("schema " + name + " duplicated!"); + } + + // 设置了table的不需要设置dataNode属性,没有设置table的必须设置dataNode属性 + if (dataNode == null && tables.size() == 0) { + throw new ConfigException( + "schema " + name + " didn't config tables,so you must set dataNode property!"); + } + + SchemaConfig schemaConfig = new SchemaConfig(name, dataNode, + tables, sqlMaxLimit, "true".equalsIgnoreCase(checkSQLSchemaStr)); + + //设定DB类型,这对之后的sql语句路由解析有帮助 + if (defaultDbType != null) { + schemaConfig.setDefaultDataNodeDbType(defaultDbType); + if (!"mysql".equalsIgnoreCase(defaultDbType)) { + schemaConfig.setNeedSupportMultiDBType(true); + } + } + + // 判断是否有不是mysql的数据库类型,方便解析判断是否启用多数据库分页语法解析 + for (TableConfig tableConfig : tables.values()) { + if (isHasMultiDbType(tableConfig)) { + schemaConfig.setNeedSupportMultiDBType(true); + break; + } + } + //记录每种dataNode的DB类型 + Map dataNodeDbTypeMap = new HashMap<>(); + for (String dataNodeName : dataNodes.keySet()) { + DataNodeConfig dataNodeConfig = dataNodes.get(dataNodeName); + String dataHost = dataNodeConfig.getDataHost(); + DataHostConfig dataHostConfig = dataHosts.get(dataHost); + if (dataHostConfig != null) { + String dbType = dataHostConfig.getDbType(); + dataNodeDbTypeMap.put(dataNodeName, dbType); + } + } + schemaConfig.setDataNodeDbTypeMap(dataNodeDbTypeMap); + schemas.put(name, schemaConfig); + } + } + + + /** + * 处理动态日期表, 支持 YYYYMM、YYYYMMDD 两种格式 + *

+ * YYYYMM格式: yyyymm,2015,01,60 + * YYYYMMDD格式: yyyymmdd,2015,01,10,50 + * + * @param tableNameElement + * @param tableNameSuffixElement + * @return + */ + private String doTableNameSuffix(String tableNameElement, String tableNameSuffixElement) { + + String newTableName = tableNameElement; + + String[] params = tableNameSuffixElement.split(","); + String suffixFormat = params[0].toUpperCase(); + if (suffixFormat.equals("YYYYMM")) { + + //读取参数 + int yyyy = Integer.parseInt(params[1]); + int mm = Integer.parseInt(params[2]); + int mmEndIdx = Integer.parseInt(params[3]); + + //日期处理 + SimpleDateFormat yyyyMMSDF = new SimpleDateFormat("yyyyMM"); + + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.YEAR, yyyy); + cal.set(Calendar.MONTH, mm - 1); + cal.set(Calendar.DATE, 0); + + //表名改写 + StringBuffer tableNameBuffer = new StringBuffer(); + for (int mmIdx = 0; mmIdx <= mmEndIdx; mmIdx++) { + tableNameBuffer.append(tableNameElement); + tableNameBuffer.append(yyyyMMSDF.format(cal.getTime())); + cal.add(Calendar.MONTH, 1); + + if (mmIdx != mmEndIdx) { + tableNameBuffer.append(","); + } + } + newTableName = tableNameBuffer.toString(); + + } else if (suffixFormat.equals("YYYYMMDD")) { + + //读取参数 + int yyyy = Integer.parseInt(params[1]); + int mm = Integer.parseInt(params[2]); + int dd = Integer.parseInt(params[3]); + int ddEndIdx = Integer.parseInt(params[4]); + + //日期处理 + SimpleDateFormat yyyyMMddSDF = new SimpleDateFormat("yyyyMMdd"); + + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.YEAR, yyyy); + cal.set(Calendar.MONTH, mm - 1); + cal.set(Calendar.DATE, dd); + + //表名改写 + StringBuffer tableNameBuffer = new StringBuffer(); + for (int ddIdx = 0; ddIdx <= ddEndIdx; ddIdx++) { + tableNameBuffer.append(tableNameElement); + tableNameBuffer.append(yyyyMMddSDF.format(cal.getTime())); + + cal.add(Calendar.DATE, 1); + + if (ddIdx != ddEndIdx) { + tableNameBuffer.append(","); + } + } + newTableName = tableNameBuffer.toString(); + } + return newTableName; + } + + + private Map loadTables(Element node) { + + // Map tables = new HashMap(); + + // 支持表名中包含引号[`] BEN GONG + Map tables = new TableConfigMap(); + NodeList nodeList = node.getElementsByTagName("table"); + for (int i = 0; i < nodeList.getLength(); i++) { + Element tableElement = (Element) nodeList.item(i); + String tableNameElement = tableElement.getAttribute("name").toUpperCase(); + + //TODO:路由, 增加对动态日期表的支持 + String tableNameSuffixElement = tableElement.getAttribute("nameSuffix").toUpperCase(); + if (!"".equals(tableNameSuffixElement)) { + + if (tableNameElement.split(",").length > 1) { + throw new ConfigException("nameSuffix " + tableNameSuffixElement + ", require name parameter cannot multiple breaks!"); + } + //前缀用来标明日期格式 + tableNameElement = doTableNameSuffix(tableNameElement, tableNameSuffixElement); + } + //记录主键,用于之后路由分析,以及启用自增长主键 + String[] tableNames = tableNameElement.split(","); + String primaryKey = tableElement.hasAttribute("primaryKey") ? tableElement.getAttribute("primaryKey").toUpperCase() : null; + //记录是否主键自增,默认不是,(启用全局sequence handler) + boolean autoIncrement = false; + if (tableElement.hasAttribute("autoIncrement")) { + autoIncrement = Boolean.parseBoolean(tableElement.getAttribute("autoIncrement")); + } + //记录是否需要加返回结果集限制,默认需要加 + boolean needAddLimit = true; + if (tableElement.hasAttribute("needAddLimit")) { + needAddLimit = Boolean.parseBoolean(tableElement.getAttribute("needAddLimit")); + } + //记录type,是否为global + String tableTypeStr = tableElement.hasAttribute("type") ? tableElement.getAttribute("type") : null; + int tableType = TableConfig.TYPE_GLOBAL_DEFAULT; + if ("global".equalsIgnoreCase(tableTypeStr)) { + tableType = TableConfig.TYPE_GLOBAL_TABLE; + } + //记录dataNode,就是分布在哪些dataNode上 + String dataNode = tableElement.getAttribute("dataNode"); + TableRuleConfig tableRule = null; + if (tableElement.hasAttribute("rule")) { + String ruleName = tableElement.getAttribute("rule"); + tableRule = tableRules.get(ruleName); + if (tableRule == null) { + throw new ConfigException("rule " + ruleName + " is not found!"); + } + } + + boolean ruleRequired = false; + //记录是否绑定有分片规则 + if (tableElement.hasAttribute("ruleRequired")) { + ruleRequired = Boolean.parseBoolean(tableElement.getAttribute("ruleRequired")); + } + + if (tableNames == null) { + throw new ConfigException("table name is not found!"); + } + //distribute函数,重新编排dataNode + String distPrex = "distribute("; + boolean distTableDns = dataNode.startsWith(distPrex); + if (distTableDns) { + dataNode = dataNode.substring(distPrex.length(), dataNode.length() - 1); + } + //分表功能 + String subTables = tableElement.getAttribute("subTables"); + String sumTable = tableElement.getAttribute("sumTable"); + for (int j = 0; j < tableNames.length; j++) { + String tableName = tableNames[j]; + TableRuleConfig tableRuleConfig = tableRule; + if (tableRuleConfig != null) { + //对于实现TableRuleAware的function进行特殊处理 根据每个表新建个实例 + RuleConfig rule = tableRuleConfig.getRule(); + if (rule.getRuleAlgorithm() instanceof TableRuleAware) { + tableRuleConfig = (TableRuleConfig) ObjectUtil.copyObject(tableRuleConfig); + tableRules.remove(tableRuleConfig.getName()); + String newRuleName = tableRuleConfig.getName() + "_" + tableName; + tableRuleConfig.setName(newRuleName); + TableRuleAware tableRuleAware = (TableRuleAware) tableRuleConfig.getRule().getRuleAlgorithm(); + tableRuleAware.setRuleName(newRuleName); + tableRuleAware.setTableName(tableName); + tableRuleConfig.getRule().getRuleAlgorithm().init(); + tableRules.put(newRuleName, tableRuleConfig); + } + } + + TableConfig table = new TableConfig(tableName, primaryKey, + autoIncrement, needAddLimit, tableType, dataNode, + getDbType(dataNode), + (tableRuleConfig != null) ? tableRuleConfig.getRule() : null, + ruleRequired, null, false, null, null, subTables, sumTable); + + checkDataNodeExists(table.getDataNodes()); + // 检查分片表分片规则配置是否合法 + if (table.getRule() != null) { + checkRuleSuitTable(table); + } + + if (distTableDns) { + distributeDataNodes(table.getDataNodes()); + } + //检查去重 + if (tables.containsKey(table.getName())) { + throw new ConfigException("table " + tableName + " duplicated!"); + } + //放入map + tables.put(table.getName(), table); + } + //只有tableName配置的是单个表(没有逗号)的时候才能有子表 + if (tableNames.length == 1) { + TableConfig table = tables.get(tableNames[0]); + // process child tables + processChildTables(tables, table, dataNode, tableElement); + } + } + return tables; + } + + /** + * distribute datanodes in multi hosts,means ,dn1 (host1),dn100 + * (host2),dn300(host3),dn2(host1),dn101(host2),dn301(host3)...etc + * 将每个host上的datanode按照host重新排列。比如上面的例子host1拥有dn1,dn2,host2拥有dn100,dn101,host3拥有dn300,dn301, + * 按照host重新排列: 0->dn1 (host1),1->dn100(host2),2->dn300(host3),3->dn2(host1),4->dn101(host2),5->dn301(host3) + * + * @param theDataNodes + */ + private void distributeDataNodes(ArrayList theDataNodes) { + Map> newDataNodeMap = new HashMap>(dataHosts.size()); + for (String dn : theDataNodes) { + DataNodeConfig dnConf = dataNodes.get(dn); + String host = dnConf.getDataHost(); + ArrayList hostDns = newDataNodeMap.get(host); + hostDns = (hostDns == null) ? new ArrayList() : hostDns; + hostDns.add(dn); + newDataNodeMap.put(host, hostDns); + } + + ArrayList result = new ArrayList(theDataNodes.size()); + boolean hasData = true; + while (hasData) { + hasData = false; + for (ArrayList dns : newDataNodeMap.values()) { + if (!dns.isEmpty()) { + result.add(dns.remove(0)); + hasData = true; + } + } + } + theDataNodes.clear(); + theDataNodes.addAll(result); + } + + private Set getDbType(String dataNode) { + Set dbTypes = new HashSet<>(); + String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); + for (String node : dataNodeArr) { + DataNodeConfig datanode = dataNodes.get(node); + DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); + dbTypes.add(datahost.getDbType()); + } + + return dbTypes; + } + + private Set getDataNodeDbTypeMap(String dataNode) { + Set dbTypes = new HashSet<>(); + String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); + for (String node : dataNodeArr) { + DataNodeConfig datanode = dataNodes.get(node); + DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); + dbTypes.add(datahost.getDbType()); + } + return dbTypes; + } + + private boolean isHasMultiDbType(TableConfig table) { + Set dbTypes = table.getDbTypes(); + for (String dbType : dbTypes) { + if (!"mysql".equalsIgnoreCase(dbType)) { + return true; + } + } + return false; + } + + private void processChildTables(Map tables, + TableConfig parentTable, String dataNodes, Element tableNode) { + + // parse child tables + NodeList childNodeList = tableNode.getChildNodes(); + for (int j = 0; j < childNodeList.getLength(); j++) { + Node theNode = childNodeList.item(j); + if (!theNode.getNodeName().equals("childTable")) { + continue; + } + Element childTbElement = (Element) theNode; + //读取子表信息 + String cdTbName = childTbElement.getAttribute("name").toUpperCase(); + String primaryKey = childTbElement.hasAttribute("primaryKey") ? childTbElement.getAttribute("primaryKey").toUpperCase() : null; + + boolean autoIncrement = false; + if (childTbElement.hasAttribute("autoIncrement")) { + autoIncrement = Boolean.parseBoolean(childTbElement.getAttribute("autoIncrement")); + } + boolean needAddLimit = true; + if (childTbElement.hasAttribute("needAddLimit")) { + needAddLimit = Boolean.parseBoolean(childTbElement.getAttribute("needAddLimit")); + } + String subTables = childTbElement.getAttribute("subTables"); + String sumTable = childTbElement.getAttribute("sumTable"); + //子表join键,和对应的parent的键,父子表通过这个关联 + String joinKey = childTbElement.getAttribute("joinKey").toUpperCase(); + String parentKey = childTbElement.getAttribute("parentKey").toUpperCase(); + TableConfig table = new TableConfig(cdTbName, primaryKey, + autoIncrement, needAddLimit, + TableConfig.TYPE_GLOBAL_DEFAULT, dataNodes, + getDbType(dataNodes), null, false, parentTable, true, + joinKey, parentKey, subTables, sumTable); + + if (tables.containsKey(table.getName())) { + throw new ConfigException("table " + table.getName() + " duplicated!"); + } + tables.put(table.getName(), table); + //对于子表的子表,递归处理 + processChildTables(tables, table, dataNodes, childTbElement); + } + } + + private void checkDataNodeExists(Collection nodes) { + if (nodes == null || nodes.size() < 1) { + return; + } + for (String node : nodes) { + if (!dataNodes.containsKey(node)) { + throw new ConfigException("dataNode '" + node + "' is not found!"); + } + } + } + + /** + * 检查分片表分片规则配置, 目前主要检查分片表分片算法定义与分片dataNode是否匹配
+ * 例如分片表定义如下:
+ * {@code + *

+ * } + *
+ * 分片算法如下:
+ * {@code + * + * + * 3 + * + * } + *
+ * shard table datanode(2) < function count(3) 此时检测为不匹配 + */ + private void checkRuleSuitTable(TableConfig tableConf) { + AbstractPartitionAlgorithm function = tableConf.getRule().getRuleAlgorithm(); + int suitValue = function.suitableFor(tableConf); + switch (suitValue) { + case -1: + // 少节点,给提示并抛异常 + throw new ConfigException("Illegal table conf : table [ " + tableConf.getName() + " ] rule function [ " + + tableConf.getRule().getFunctionName() + " ] partition size : " + tableConf.getRule().getRuleAlgorithm().getPartitionNum() + " > table datanode size : " + + tableConf.getDataNodes().size() + ", please make sure table datanode size = function partition size"); + case 0: + // table datanode size == rule function partition size + break; + case 1: + // 有些节点是多余的,给出warn log + LOGGER.warn("table conf : table [ {} ] rule function [ {} ] partition size : {} < table datanode size : {} , this cause some datanode to be redundant", + new String[]{ + tableConf.getName(), + tableConf.getRule().getFunctionName(), + String.valueOf(tableConf.getRule().getRuleAlgorithm().getPartitionNum()), + String.valueOf(tableConf.getDataNodes().size()) + }); + break; + } + } + + private void loadDataNodes(Element root) { + //读取DataNode分支 + NodeList list = root.getElementsByTagName("dataNode"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Element element = (Element) list.item(i); + String dnNamePre = element.getAttribute("name"); + + String databaseStr = element.getAttribute("database"); + String host = element.getAttribute("dataHost"); + //字符串不为空 + if (empty(dnNamePre) || empty(databaseStr) || empty(host)) { + throw new ConfigException("dataNode " + dnNamePre + " define error ,attribute can't be empty"); + } + //dnNames(name),databases(database),hostStrings(dataHost)都可以配置多个,以',', '$', '-'区分,但是需要保证database的个数*dataHost的个数=name的个数 + //多个dataHost与多个database如果写在一个标签,则每个dataHost拥有所有database + //例如: + //则为:localhost1拥有dn1$0-75,localhost2也拥有dn1$0-75(对应db$76-151) + String[] dnNames = io.mycat.util.SplitUtil.split(dnNamePre, ',', '$', '-'); + String[] databases = io.mycat.util.SplitUtil.split(databaseStr, ',', '$', '-'); + String[] hostStrings = io.mycat.util.SplitUtil.split(host, ',', '$', '-'); + + if (dnNames.length > 1 && dnNames.length != databases.length * hostStrings.length) { + throw new ConfigException("dataNode " + dnNamePre + + " define error ,dnNames.length must be=databases.length*hostStrings.length"); + } + if (dnNames.length > 1) { + + List mhdList = mergerHostDatabase(hostStrings, databases); + for (int k = 0; k < dnNames.length; k++) { + String[] hd = mhdList.get(k); + String dnName = dnNames[k]; + String databaseName = hd[1]; + String hostName = hd[0]; + createDataNode(dnName, databaseName, hostName); + } + + } else { + createDataNode(dnNamePre, databaseStr, host); + } + + } + } + + /** + * 匹配DataHost和Database,每个DataHost拥有每个Database名字 + * + * @param hostStrings + * @param databases * @return */ - private List mergerHostDatabase(String[] hostStrings, String[] databases) { - List mhdList = new ArrayList<>(); - for (int i = 0; i < hostStrings.length; i++) { - String hostString = hostStrings[i]; - for (int i1 = 0; i1 < databases.length; i1++) { - String database = databases[i1]; - String[] hd = new String[2]; - hd[0] = hostString; - hd[1] = database; - mhdList.add(hd); - } - } - return mhdList; - } - - private void createDataNode(String dnName, String database, String host) { - - DataNodeConfig conf = new DataNodeConfig(dnName, database, host); - if (dataNodes.containsKey(conf.getName())) { - throw new ConfigException("dataNode " + conf.getName() + " duplicated!"); - } - - if (!dataHosts.containsKey(host)) { - throw new ConfigException("dataNode " + dnName + " reference dataHost:" + host + " not exists!"); - } - - dataHosts.get(host).addDataNode(conf.getName()); - dataNodes.put(conf.getName(), conf); - } - - private boolean empty(String dnName) { - return dnName == null || dnName.length() == 0; - } - - private DBHostConfig createDBHostConf(String dataHost, Element node, - String dbType, String dbDriver, int maxCon, int minCon, String filters, long logTime) { - - String nodeHost = node.getAttribute("host"); - String nodeUrl = node.getAttribute("url"); - String user = node.getAttribute("user"); - String password = node.getAttribute("password"); - String usingDecrypt = node.getAttribute("usingDecrypt"); - String passwordEncryty= DecryptUtil.DBHostDecrypt(usingDecrypt, nodeHost, user, password); - - String weightStr = node.getAttribute("weight"); - int weight = "".equals(weightStr) ? PhysicalDBPool.WEIGHT : Integer.parseInt(weightStr) ; - - String ip = null; - int port = 0; - if (empty(nodeHost) || empty(nodeUrl) || empty(user)) { - throw new ConfigException( - "dataHost " - + dataHost - + " define error,some attributes of this element is empty: " - + nodeHost); - } - if ("native".equalsIgnoreCase(dbDriver)) { - int colonIndex = nodeUrl.indexOf(':'); - ip = nodeUrl.substring(0, colonIndex).trim(); - port = Integer.parseInt(nodeUrl.substring(colonIndex + 1).trim()); - } else { - URI url; - try { - url = new URI(nodeUrl.substring(5)); - } catch (Exception e) { - throw new ConfigException("invalid jdbc url " + nodeUrl + " of " + dataHost); - } - ip = url.getHost(); - port = url.getPort(); - } - - DBHostConfig conf = new DBHostConfig(nodeHost, ip, port, nodeUrl, user, passwordEncryty,password); - conf.setDbType(dbType); - conf.setMaxCon(maxCon); - conf.setMinCon(minCon); - conf.setFilters(filters); - conf.setLogTime(logTime); - conf.setWeight(weight); //新增权重 - return conf; - } - - private void loadDataHosts(Element root) { - NodeList list = root.getElementsByTagName("dataHost"); - for (int i = 0, n = list.getLength(); i < n; ++i) { - - Element element = (Element) list.item(i); - String name = element.getAttribute("name"); - //判断是否重复 - if (dataHosts.containsKey(name)) { - throw new ConfigException("dataHost name " + name + "duplicated!"); - } - //读取最大连接数 - int maxCon = Integer.parseInt(element.getAttribute("maxCon")); - //读取最小连接数 - int minCon = Integer.parseInt(element.getAttribute("minCon")); - /** - * 读取负载均衡配置 - * 1. balance="0", 不开启分离机制,所有读操作都发送到当前可用的 writeHost 上。 - * 2. balance="1",全部的 readHost 和 stand by writeHost 参不 select 的负载均衡 - * 3. balance="2",所有读操作都随机的在 writeHost、readhost 上分发。 - * 4. balance="3",所有读请求随机的分发到 wiriterHost 对应的 readhost 执行,writerHost 不负担读压力 - */ - int balance = Integer.parseInt(element.getAttribute("balance")); - /** - * 读取切换类型 - * -1 表示不自动切换 - * 1 默认值,自动切换 - * 2 基于MySQL主从同步的状态决定是否切换 - * 心跳询句为 show slave status - * 3 基于 MySQL galary cluster 的切换机制 - */ - String switchTypeStr = element.getAttribute("switchType"); - int switchType = switchTypeStr.equals("") ? -1 : Integer.parseInt(switchTypeStr); - //读取从延迟界限 - String slaveThresholdStr = element.getAttribute("slaveThreshold"); - int slaveThreshold = slaveThresholdStr.equals("") ? -1 : Integer.parseInt(slaveThresholdStr); - - //如果 tempReadHostAvailable 设置大于 0 则表示写主机如果挂掉, 临时的读服务依然可用 - String tempReadHostAvailableStr = element.getAttribute("tempReadHostAvailable"); - boolean tempReadHostAvailable = !tempReadHostAvailableStr.equals("") && Integer.parseInt(tempReadHostAvailableStr) > 0; - /** - * 读取 写类型 - * 这里只支持 0 - 所有写操作仅配置的第一个 writeHost - */ - String writeTypStr = element.getAttribute("writeType"); - int writeType = "".equals(writeTypStr) ? PhysicalDBPool.WRITE_ONLYONE_NODE : Integer.parseInt(writeTypStr); - - - String dbDriver = element.getAttribute("dbDriver"); - String dbType = element.getAttribute("dbType"); - String filters = element.getAttribute("filters"); - String logTimeStr = element.getAttribute("logTime"); - String slaveIDs = element.getAttribute("slaveIDs"); - long logTime = "".equals(logTimeStr) ? PhysicalDBPool.LONG_TIME : Long.parseLong(logTimeStr) ; - //读取心跳语句 - String heartbeatSQL = element.getElementsByTagName("heartbeat").item(0).getTextContent(); - //读取 初始化sql配置,用于oracle - NodeList connectionInitSqlList = element.getElementsByTagName("connectionInitSql"); - String initConSQL = null; - if (connectionInitSqlList.getLength() > 0) { - initConSQL = connectionInitSqlList.item(0).getTextContent(); - } - //读取writeHost - NodeList writeNodes = element.getElementsByTagName("writeHost"); - DBHostConfig[] writeDbConfs = new DBHostConfig[writeNodes.getLength()]; - Map readHostsMap = new HashMap(2); - Set writeHostNameSet = new HashSet(writeNodes.getLength()); - for (int w = 0; w < writeDbConfs.length; w++) { - Element writeNode = (Element) writeNodes.item(w); - writeDbConfs[w] = createDBHostConf(name, writeNode, dbType, dbDriver, maxCon, minCon,filters,logTime); - if(writeHostNameSet.contains(writeDbConfs[w].getHostName())) { - throw new ConfigException("writeHost " + writeDbConfs[w].getHostName() + " duplicated!"); - } else { - writeHostNameSet.add(writeDbConfs[w].getHostName()); - } - NodeList readNodes = writeNode.getElementsByTagName("readHost"); - //读取对应的每一个readHost - if (readNodes.getLength() != 0) { - DBHostConfig[] readDbConfs = new DBHostConfig[readNodes.getLength()]; - Set readHostNameSet = new HashSet(readNodes.getLength()); - for (int r = 0; r < readDbConfs.length; r++) { - Element readNode = (Element) readNodes.item(r); - readDbConfs[r] = createDBHostConf(name, readNode, dbType, dbDriver, maxCon, minCon,filters, logTime); - if(readHostNameSet.contains(readDbConfs[r].getHostName())) { - throw new ConfigException("readHost " + readDbConfs[r].getHostName() + " duplicated!"); - } else { - readHostNameSet.add(readDbConfs[r].getHostName()); - } - } - readHostsMap.put(w, readDbConfs); - } - } - - DataHostConfig hostConf = new DataHostConfig(name, dbType, dbDriver, - writeDbConfs, readHostsMap, switchType, slaveThreshold, tempReadHostAvailable); - - hostConf.setMaxCon(maxCon); - hostConf.setMinCon(minCon); - hostConf.setBalance(balance); - hostConf.setWriteType(writeType); - hostConf.setHearbeatSQL(heartbeatSQL); - hostConf.setConnectionInitSql(initConSQL); - hostConf.setFilters(filters); - hostConf.setLogTime(logTime); - hostConf.setSlaveIDs(slaveIDs); - dataHosts.put(hostConf.getName(), hostConf); - } - } + private List mergerHostDatabase(String[] hostStrings, String[] databases) { + List mhdList = new ArrayList<>(); + for (int i = 0; i < hostStrings.length; i++) { + String hostString = hostStrings[i]; + for (int i1 = 0; i1 < databases.length; i1++) { + String database = databases[i1]; + String[] hd = new String[2]; + hd[0] = hostString; + hd[1] = database; + mhdList.add(hd); + } + } + return mhdList; + } + + private void createDataNode(String dnName, String database, String host) { + + DataNodeConfig conf = new DataNodeConfig(dnName, database, host); + if (dataNodes.containsKey(conf.getName())) { + throw new ConfigException("dataNode " + conf.getName() + " duplicated!"); + } + + if (!dataHosts.containsKey(host)) { + throw new ConfigException("dataNode " + dnName + " reference dataHost:" + host + " not exists!"); + } + + dataHosts.get(host).addDataNode(conf.getName()); + dataNodes.put(conf.getName(), conf); + } + + private boolean empty(String dnName) { + return dnName == null || dnName.length() == 0; + } + + private DBHostConfig createDBHostConf(String dataHost, Element node, + String dbType, String dbDriver, int maxCon, int minCon, String filters, long logTime) { + + String nodeHost = node.getAttribute("host"); + String nodeUrl = node.getAttribute("url"); + String user = node.getAttribute("user"); + String password = node.getAttribute("password"); + String usingDecrypt = node.getAttribute("usingDecrypt"); + String passwordEncryty = DecryptUtil.DBHostDecrypt(usingDecrypt, nodeHost, user, password); + + String weightStr = node.getAttribute("weight"); + int weight = "".equals(weightStr) ? PhysicalDBPool.WEIGHT : Integer.parseInt(weightStr); + + String ip = null; + int port = 0; + if (empty(nodeHost) || empty(nodeUrl) || empty(user)) { + throw new ConfigException( + "dataHost " + + dataHost + + " define error,some attributes of this element is empty: " + + nodeHost); + } + if ("native".equalsIgnoreCase(dbDriver)) { + int colonIndex = nodeUrl.indexOf(':'); + ip = nodeUrl.substring(0, colonIndex).trim(); + port = Integer.parseInt(nodeUrl.substring(colonIndex + 1).trim()); + } else { + URI url; + try { + url = new URI(nodeUrl.substring(5)); + } catch (Exception e) { + throw new ConfigException("invalid jdbc url " + nodeUrl + " of " + dataHost); + } + ip = url.getHost(); + port = url.getPort(); + } + + DBHostConfig conf = new DBHostConfig(nodeHost, ip, port, nodeUrl, user, passwordEncryty, password); + conf.setDbType(dbType); + conf.setMaxCon(maxCon); + conf.setMinCon(minCon); + conf.setFilters(filters); + conf.setLogTime(logTime); + conf.setWeight(weight); //新增权重 + return conf; + } + + private void loadDataHosts(Element root) { + NodeList list = root.getElementsByTagName("dataHost"); + for (int i = 0, n = list.getLength(); i < n; ++i) { + + Element element = (Element) list.item(i); + String name = element.getAttribute("name"); + //判断是否重复 + if (dataHosts.containsKey(name)) { + throw new ConfigException("dataHost name " + name + "duplicated!"); + } + //读取最大连接数 + int maxCon = Integer.parseInt(element.getAttribute("maxCon")); + //读取最小连接数 + int minCon = Integer.parseInt(element.getAttribute("minCon")); + /** + * 读取负载均衡配置 + * 1. balance="0", 不开启分离机制,所有读操作都发送到当前可用的 writeHost 上。 + * 2. balance="1",全部的 readHost 和 stand by writeHost 参不 select 的负载均衡 + * 3. balance="2",所有读操作都随机的在 writeHost、readhost 上分发。 + * 4. balance="3",所有读请求随机的分发到 wiriterHost 对应的 readhost 执行,writerHost 不负担读压力 + */ + int balance = Integer.parseInt(element.getAttribute("balance")); + /** + * 读取切换类型 + * -1 表示不自动切换 + * 1 默认值,自动切换 + * 2 基于MySQL主从同步的状态决定是否切换 + * 心跳询句为 show slave status + * 3 基于 MySQL galary cluster 的切换机制 + */ + String switchTypeStr = element.getAttribute("switchType"); + int switchType = switchTypeStr.equals("") ? -1 : Integer.parseInt(switchTypeStr); + //读取从延迟界限 + String slaveThresholdStr = element.getAttribute("slaveThreshold"); + int slaveThreshold = slaveThresholdStr.equals("") ? -1 : Integer.parseInt(slaveThresholdStr); + + //如果 tempReadHostAvailable 设置大于 0 则表示写主机如果挂掉, 临时的读服务依然可用 + String tempReadHostAvailableStr = element.getAttribute("tempReadHostAvailable"); + boolean tempReadHostAvailable = !tempReadHostAvailableStr.equals("") && Integer.parseInt(tempReadHostAvailableStr) > 0; + /** + * 读取 写类型 + * 这里只支持 0 - 所有写操作仅配置的第一个 writeHost + */ + String writeTypStr = element.getAttribute("writeType"); + int writeType = "".equals(writeTypStr) ? PhysicalDBPool.WRITE_ONLYONE_NODE : Integer.parseInt(writeTypStr); + + + String dbDriver = element.getAttribute("dbDriver"); + String dbType = element.getAttribute("dbType"); + String filters = element.getAttribute("filters"); + String logTimeStr = element.getAttribute("logTime"); + String slaveIDs = element.getAttribute("slaveIDs"); + long logTime = "".equals(logTimeStr) ? PhysicalDBPool.LONG_TIME : Long.parseLong(logTimeStr); + //读取心跳语句 + String heartbeatSQL = element.getElementsByTagName("heartbeat").item(0).getTextContent(); + //读取 初始化sql配置,用于oracle + NodeList connectionInitSqlList = element.getElementsByTagName("connectionInitSql"); + String initConSQL = null; + if (connectionInitSqlList.getLength() > 0) { + initConSQL = connectionInitSqlList.item(0).getTextContent(); + } + //读取writeHost + NodeList writeNodes = element.getElementsByTagName("writeHost"); + DBHostConfig[] writeDbConfs = new DBHostConfig[writeNodes.getLength()]; + Map readHostsMap = new HashMap(2); + Set writeHostNameSet = new HashSet(writeNodes.getLength()); + for (int w = 0; w < writeDbConfs.length; w++) { + Element writeNode = (Element) writeNodes.item(w); + writeDbConfs[w] = createDBHostConf(name, writeNode, dbType, dbDriver, maxCon, minCon, filters, logTime); + if (writeHostNameSet.contains(writeDbConfs[w].getHostName())) { + throw new ConfigException("writeHost " + writeDbConfs[w].getHostName() + " duplicated!"); + } else { + writeHostNameSet.add(writeDbConfs[w].getHostName()); + } + NodeList readNodes = writeNode.getElementsByTagName("readHost"); + //读取对应的每一个readHost + if (readNodes.getLength() != 0) { + DBHostConfig[] readDbConfs = new DBHostConfig[readNodes.getLength()]; + Set readHostNameSet = new HashSet(readNodes.getLength()); + for (int r = 0; r < readDbConfs.length; r++) { + Element readNode = (Element) readNodes.item(r); + readDbConfs[r] = createDBHostConf(name, readNode, dbType, dbDriver, maxCon, minCon, filters, logTime); + if (readHostNameSet.contains(readDbConfs[r].getHostName())) { + throw new ConfigException("readHost " + readDbConfs[r].getHostName() + " duplicated!"); + } else { + readHostNameSet.add(readDbConfs[r].getHostName()); + } + } + readHostsMap.put(w, readDbConfs); + } + } + + DataHostConfig hostConf = new DataHostConfig(name, dbType, dbDriver, + writeDbConfs, readHostsMap, switchType, slaveThreshold, tempReadHostAvailable); + + hostConf.setMaxCon(maxCon); + hostConf.setMinCon(minCon); + hostConf.setBalance(balance); + hostConf.setWriteType(writeType); + hostConf.setHearbeatSQL(heartbeatSQL); + hostConf.setConnectionInitSql(initConSQL); + hostConf.setFilters(filters); + hostConf.setLogTime(logTime); + hostConf.setSlaveIDs(slaveIDs); + dataHosts.put(hostConf.getName(), hostConf); + } + } } diff --git a/src/main/java/io/mycat/config/model/TableConfig.java b/src/main/java/io/mycat/config/model/TableConfig.java index dd2ede1a3..16009e672 100644 --- a/src/main/java/io/mycat/config/model/TableConfig.java +++ b/src/main/java/io/mycat/config/model/TableConfig.java @@ -23,283 +23,285 @@ */ package io.mycat.config.model; -import java.util.*; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import com.alibaba.druid.sql.ast.SQLDataType; import com.alibaba.druid.sql.ast.statement.SQLTableElement; import io.mycat.config.model.rule.RuleConfig; import io.mycat.util.SplitUtil; +import java.util.*; +import java.util.concurrent.locks.ReentrantReadWriteLock; + /** * @author mycat */ public class TableConfig { - public static final int TYPE_GLOBAL_TABLE = 1; - public static final int TYPE_GLOBAL_DEFAULT = 0; - private final String name; - private final String primaryKey; - private final boolean autoIncrement; - private final boolean needAddLimit; - private final Set dbTypes; - private final int tableType; - private final ArrayList dataNodes; - private final ArrayList distTables; - private final RuleConfig rule; - private final String partitionColumn; - private final boolean ruleRequired; - private final TableConfig parentTC; - private final boolean childTable; - private final String joinKey; - private final String parentKey; - private final String locateRTableKeySql; - // only has one level of parent - private final boolean secondLevel; - private final boolean partionKeyIsPrimaryKey; - private final Random rand = new Random(); - - private volatile List tableElementList; - private volatile String tableStructureSQL; - private volatile Map> dataNodeTableStructureSQLMap; - private ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(false); - - - public TableConfig(String name, String primaryKey, boolean autoIncrement,boolean needAddLimit, int tableType, - String dataNode,Set dbType, RuleConfig rule, boolean ruleRequired, - TableConfig parentTC, boolean isChildTable, String joinKey, - String parentKey,String subTables) { - if (name == null) { - throw new IllegalArgumentException("table name is null"); - } else if (dataNode == null) { - throw new IllegalArgumentException("dataNode name is null"); - } - this.primaryKey = primaryKey; - this.autoIncrement = autoIncrement; - this.needAddLimit=needAddLimit; - this.tableType = tableType; - this.dbTypes=dbType; - if (ruleRequired && rule == null) { - throw new IllegalArgumentException("ruleRequired but rule is null"); - } - - this.name = name.toUpperCase(); - - String theDataNodes[] = SplitUtil.split(dataNode, ',', '$', '-'); - if (theDataNodes == null || theDataNodes.length <= 0) { - throw new IllegalArgumentException("invalid table dataNodes: " + dataNode); - } - dataNodes = new ArrayList(theDataNodes.length); - for (String dn : theDataNodes) { - dataNodes.add(dn); - } - - if(subTables!=null && !subTables.equals("")){ - String sTables[] = SplitUtil.split(subTables, ',', '$', '-'); - if (sTables == null || sTables.length <= 0) { - throw new IllegalArgumentException("invalid table subTables"); - } - this.distTables = new ArrayList(sTables.length); - for (String table : sTables) { - distTables.add(table); - } - }else{ - this.distTables = new ArrayList(); - } - - this.rule = rule; - this.partitionColumn = (rule == null) ? null : rule.getColumn(); - partionKeyIsPrimaryKey=(partitionColumn==null)?primaryKey==null:partitionColumn.equals(primaryKey); - this.ruleRequired = ruleRequired; - this.childTable = isChildTable; - this.parentTC = parentTC; - this.joinKey = joinKey; - this.parentKey = parentKey; - if (parentTC != null) { - locateRTableKeySql = genLocateRootParentSQL(); - secondLevel = (parentTC.parentTC == null); - } else { - locateRTableKeySql = null; - secondLevel = false; - } - } - - public String getPrimaryKey() { - return primaryKey; - } - - public Set getDbTypes() - { + public static final int TYPE_GLOBAL_TABLE = 1; + public static final int TYPE_GLOBAL_DEFAULT = 0; + private final String name; + private final String primaryKey; + private final boolean autoIncrement; + private final boolean needAddLimit; + private final Set dbTypes; + private final int tableType; + private final ArrayList dataNodes; + private final ArrayList distTables; + private final RuleConfig rule; + private final String partitionColumn; + private final boolean ruleRequired; + private final TableConfig parentTC; + private final boolean childTable; + private final String joinKey; + private final String parentKey; + private final String locateRTableKeySql; + // only has one level of parent + private final boolean secondLevel; + private final boolean partionKeyIsPrimaryKey; + private final Random rand = new Random(); + private final String sumTable; + + private volatile List tableElementList; + private volatile String tableStructureSQL; + private volatile Map> dataNodeTableStructureSQLMap; + private ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(false); + + + public TableConfig(String name, String primaryKey, boolean autoIncrement, boolean needAddLimit, int tableType, + String dataNode, Set dbType, RuleConfig rule, boolean ruleRequired, + TableConfig parentTC, boolean isChildTable, String joinKey, + String parentKey, String subTables, String sumTable) { + if (name == null) { + throw new IllegalArgumentException("table name is null"); + } else if (dataNode == null) { + throw new IllegalArgumentException("dataNode name is null"); + } + this.primaryKey = primaryKey; + this.autoIncrement = autoIncrement; + this.needAddLimit = needAddLimit; + this.tableType = tableType; + this.dbTypes = dbType; + if (ruleRequired && rule == null) { + throw new IllegalArgumentException("ruleRequired but rule is null"); + } + + this.name = name.toUpperCase(); + + String theDataNodes[] = SplitUtil.split(dataNode, ',', '$', '-'); + if (theDataNodes == null || theDataNodes.length <= 0) { + throw new IllegalArgumentException("invalid table dataNodes: " + dataNode); + } + dataNodes = new ArrayList(theDataNodes.length); + for (String dn : theDataNodes) { + dataNodes.add(dn); + } + + if (subTables != null && !subTables.equals("")) { + String sTables[] = SplitUtil.split(subTables, ',', '$', '-'); + if (sTables == null || sTables.length <= 0) { + throw new IllegalArgumentException("invalid table subTables"); + } + this.distTables = new ArrayList(sTables.length); + for (String table : sTables) { + distTables.add(table); + } + } else { + this.distTables = new ArrayList(); + } + + if (null != sumTable && !"".equals(sumTable)) this.sumTable = sumTable; + else this.sumTable = null; + + this.rule = rule; + this.partitionColumn = (rule == null) ? null : rule.getColumn(); + partionKeyIsPrimaryKey = (partitionColumn == null) ? primaryKey == null : partitionColumn.equals(primaryKey); + this.ruleRequired = ruleRequired; + this.childTable = isChildTable; + this.parentTC = parentTC; + this.joinKey = joinKey; + this.parentKey = parentKey; + if (parentTC != null) { + locateRTableKeySql = genLocateRootParentSQL(); + secondLevel = (parentTC.parentTC == null); + } else { + locateRTableKeySql = null; + secondLevel = false; + } + } + + public String getPrimaryKey() { + return primaryKey; + } + + public Set getDbTypes() { return dbTypes; } public boolean isAutoIncrement() { - return autoIncrement; - } - - public boolean isNeedAddLimit() { - return needAddLimit; - } - - public boolean isSecondLevel() { - return secondLevel; - } - - public String getLocateRTableKeySql() { - return locateRTableKeySql; - } - - public boolean isGlobalTable() { - return this.tableType == TableConfig.TYPE_GLOBAL_TABLE; - } - - public String genLocateRootParentSQL() { - TableConfig tb = this; - StringBuilder tableSb = new StringBuilder(); - StringBuilder condition = new StringBuilder(); - TableConfig prevTC = null; - int level = 0; - String latestCond = null; - while (tb.parentTC != null) { - tableSb.append(tb.parentTC.name).append(','); - String relation = null; - if (level == 0) { - latestCond = " " + tb.parentTC.getName() + '.' + tb.parentKey - + "="; - } else { - relation = tb.parentTC.getName() + '.' + tb.parentKey + '=' - + tb.name + '.' + tb.joinKey; - condition.append(relation).append(" AND "); - } - level++; - prevTC = tb; - tb = tb.parentTC; - } - String sql = "SELECT " - + prevTC.parentTC.name - + '.' - + prevTC.parentKey - + " FROM " - + tableSb.substring(0, tableSb.length() - 1) - + " WHERE " - + ((level < 2) ? latestCond : condition.toString() + latestCond); - // System.out.println(this.name+" sql " + sql); - return sql; - - } - - public String getPartitionColumn() { - return partitionColumn; - } - - public int getTableType() { - return tableType; - } - - /** - * get root parent - * - * @return - */ - public TableConfig getRootParent() { - if (parentTC == null) { - return null; - } - TableConfig preParent = parentTC; - TableConfig parent = preParent.getParentTC(); - - while (parent != null) { - preParent = parent; - parent = parent.getParentTC(); - } - return preParent; - } - - public TableConfig getParentTC() { - return parentTC; - } - - public boolean isChildTable() { - return childTable; - } - - public String getJoinKey() { - return joinKey; - } - - public String getParentKey() { - return parentKey; - } - - /** - * @return upper-case - */ - public String getName() { - return name; - } - - public ArrayList getDataNodes() { - return dataNodes; - } - - public String getRandomDataNode() { - int index = Math.abs(rand.nextInt(Integer.MAX_VALUE)) % dataNodes.size(); - return dataNodes.get(index); - } - - public boolean isRuleRequired() { - return ruleRequired; - } - - public RuleConfig getRule() { - return rule; - } - - public boolean primaryKeyIsPartionKey() { - return partionKeyIsPrimaryKey; - } - - public ArrayList getDistTables() { - return this.distTables; - } - - public boolean isDistTable(){ - if(this.distTables!=null && !this.distTables.isEmpty() ){ - return true; - } - return false; - } - - public List getTableElementList() { - return tableElementList; - } - - public void setTableElementList(List tableElementList) { - this.tableElementList = tableElementList; - } - - public ReentrantReadWriteLock getReentrantReadWriteLock() { - return reentrantReadWriteLock; - } - - public void setReentrantReadWriteLock(ReentrantReadWriteLock reentrantReadWriteLock) { - this.reentrantReadWriteLock = reentrantReadWriteLock; - } - - public String getTableStructureSQL() { - return tableStructureSQL; - } - - public void setTableStructureSQL(String tableStructureSQL) { - this.tableStructureSQL = tableStructureSQL; - } - - - public Map> getDataNodeTableStructureSQLMap() { - return dataNodeTableStructureSQLMap; - } - - public void setDataNodeTableStructureSQLMap(Map> dataNodeTableStructureSQLMap) { - this.dataNodeTableStructureSQLMap = dataNodeTableStructureSQLMap; - } + return autoIncrement; + } + + public boolean isNeedAddLimit() { + return needAddLimit; + } + + public boolean isSecondLevel() { + return secondLevel; + } + + public String getLocateRTableKeySql() { + return locateRTableKeySql; + } + + public boolean isGlobalTable() { + return this.tableType == TableConfig.TYPE_GLOBAL_TABLE; + } + + public String genLocateRootParentSQL() { + TableConfig tb = this; + StringBuilder tableSb = new StringBuilder(); + StringBuilder condition = new StringBuilder(); + TableConfig prevTC = null; + int level = 0; + String latestCond = null; + while (tb.parentTC != null) { + tableSb.append(tb.parentTC.name).append(','); + String relation = null; + if (level == 0) { + latestCond = " " + tb.parentTC.getName() + '.' + tb.parentKey + + "="; + } else { + relation = tb.parentTC.getName() + '.' + tb.parentKey + '=' + + tb.name + '.' + tb.joinKey; + condition.append(relation).append(" AND "); + } + level++; + prevTC = tb; + tb = tb.parentTC; + } + String sql = "SELECT " + + prevTC.parentTC.name + + '.' + + prevTC.parentKey + + " FROM " + + tableSb.substring(0, tableSb.length() - 1) + + " WHERE " + + ((level < 2) ? latestCond : condition.toString() + latestCond); + // System.out.println(this.name+" sql " + sql); + return sql; + + } + + public String getPartitionColumn() { + return partitionColumn; + } + + public int getTableType() { + return tableType; + } + + /** + * get root parent + * + * @return + */ + public TableConfig getRootParent() { + if (parentTC == null) { + return null; + } + TableConfig preParent = parentTC; + TableConfig parent = preParent.getParentTC(); + + while (parent != null) { + preParent = parent; + parent = parent.getParentTC(); + } + return preParent; + } + + public TableConfig getParentTC() { + return parentTC; + } + + public boolean isChildTable() { + return childTable; + } + + public String getJoinKey() { + return joinKey; + } + + public String getParentKey() { + return parentKey; + } + + /** + * @return upper-case + */ + public String getName() { + return name; + } + + public ArrayList getDataNodes() { + return dataNodes; + } + + public String getRandomDataNode() { + int index = Math.abs(rand.nextInt(Integer.MAX_VALUE)) % dataNodes.size(); + return dataNodes.get(index); + } + + public boolean isRuleRequired() { + return ruleRequired; + } + + public RuleConfig getRule() { + return rule; + } + + public boolean primaryKeyIsPartionKey() { + return partionKeyIsPrimaryKey; + } + + public ArrayList getDistTables() { + return this.distTables; + } + + public boolean isDistTable() { + return this.distTables != null && !this.distTables.isEmpty(); + } + + public List getTableElementList() { + return tableElementList; + } + + public void setTableElementList(List tableElementList) { + this.tableElementList = tableElementList; + } + + public ReentrantReadWriteLock getReentrantReadWriteLock() { + return reentrantReadWriteLock; + } + + public void setReentrantReadWriteLock(ReentrantReadWriteLock reentrantReadWriteLock) { + this.reentrantReadWriteLock = reentrantReadWriteLock; + } + + public String getTableStructureSQL() { + return tableStructureSQL; + } + + public void setTableStructureSQL(String tableStructureSQL) { + this.tableStructureSQL = tableStructureSQL; + } + + + public Map> getDataNodeTableStructureSQLMap() { + return dataNodeTableStructureSQLMap; + } + + public void setDataNodeTableStructureSQLMap(Map> dataNodeTableStructureSQLMap) { + this.dataNodeTableStructureSQLMap = dataNodeTableStructureSQLMap; + } + + public String getSumTable() { + return sumTable; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java b/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java index 41d21422e..08563b7f7 100644 --- a/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java +++ b/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java @@ -35,33 +35,27 @@ * * @author mycat */ -public class FrontendCommandHandler implements NIOHandler -{ +public class FrontendCommandHandler implements NIOHandler { protected final FrontendConnection source; protected final CommandCount commands; - public FrontendCommandHandler(FrontendConnection source) - { + public FrontendCommandHandler(FrontendConnection source) { this.source = source; this.commands = source.getProcessor().getCommands(); } @Override - public void handle(byte[] data) - { - if(source.getLoadDataInfileHandler()!=null&&source.getLoadDataInfileHandler().isStartLoadData()) - { + public void handle(byte[] data) { + if (source.getLoadDataInfileHandler() != null && source.getLoadDataInfileHandler().isStartLoadData()) { MySQLMessage mm = new MySQLMessage(data); - int packetLength = mm.readUB3(); - if(packetLength+4==data.length) - { + int packetLength = mm.readUB3(); + if (packetLength + 4 == data.length) { source.loadDataInfileData(data); } return; } - switch (data[4]) - { + switch (data[4]) { case MySQLPacket.COM_INIT_DB: commands.doInitDB(); source.initDB(data); @@ -87,13 +81,13 @@ public void handle(byte[] data) source.stmtPrepare(data); break; case MySQLPacket.COM_STMT_SEND_LONG_DATA: - commands.doStmtSendLongData(); - source.stmtSendLongData(data); - break; + commands.doStmtSendLongData(); + source.stmtSendLongData(data); + break; case MySQLPacket.COM_STMT_RESET: - commands.doStmtReset(); - source.stmtReset(data); - break; + commands.doStmtReset(); + source.stmtReset(data); + break; case MySQLPacket.COM_STMT_EXECUTE: commands.doStmtExecute(); source.stmtExecute(data); @@ -107,9 +101,9 @@ public void handle(byte[] data) source.heartbeat(data); break; default: - commands.doOther(); - source.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Unknown command"); + commands.doOther(); + source.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, + "Unknown command"); } } diff --git a/src/main/java/io/mycat/route/RouteResultset.java b/src/main/java/io/mycat/route/RouteResultset.java index 9b3b0a182..4e991fc90 100644 --- a/src/main/java/io/mycat/route/RouteResultset.java +++ b/src/main/java/io/mycat/route/RouteResultset.java @@ -24,9 +24,6 @@ package io.mycat.route; import com.alibaba.druid.sql.ast.SQLStatement; - -import io.mycat.MycatServer; -import io.mycat.config.MycatConfig; import io.mycat.config.model.SchemaConfig; import io.mycat.route.parser.util.PageSQLUtil; import io.mycat.sqlengine.mpp.HavingCols; @@ -43,8 +40,8 @@ public final class RouteResultset implements Serializable { private final int sqlType; private RouteResultsetNode[] nodes; // 路由结果节点 private Set subTables; - private SQLStatement sqlStatement; - + private SQLStatement sqlStatement; + private int limitStart; private boolean cacheAble; @@ -66,17 +63,17 @@ public final class RouteResultset implements Serializable { //是否自动提交,此属性主要用于记录ServerConnection上的autocommit状态 private boolean autocommit = true; - private boolean isLoadData=false; + private boolean isLoadData = false; //是否可以在从库运行,此属性主要供RouteResultsetNode获取 private Boolean canRunInReadDB; // 强制走 master,可以通过 RouteResultset的属性canRunInReadDB=false // 传给 RouteResultsetNode 来实现,但是 强制走 slave需要增加一个属性来实现: - private Boolean runOnSlave = null; // 默认null表示不施加影响 + private Boolean runOnSlave = null; // 默认null表示不施加影响 - //key=dataNode value=slot - private Map dataNodeSlotMap=new HashMap<>(); + //key=dataNode value=slot + private Map dataNodeSlotMap = new HashMap<>(); private boolean selectForUpdate; @@ -87,9 +84,9 @@ public boolean isSelectForUpdate() { public void setSelectForUpdate(boolean selectForUpdate) { this.selectForUpdate = selectForUpdate; } - - - private List tables; + + + private List tables; public List getTables() { return tables; @@ -108,31 +105,28 @@ public void setDataNodeSlotMap(Map dataNodeSlotMap) { } public Boolean getRunOnSlave() { - return runOnSlave; - } + return runOnSlave; + } + + public void setRunOnSlave(Boolean runOnSlave) { + this.runOnSlave = runOnSlave; + } - public void setRunOnSlave(Boolean runOnSlave) { - this.runOnSlave = runOnSlave; - } - private Procedure procedure; + private Procedure procedure; - public Procedure getProcedure() - { + public Procedure getProcedure() { return procedure; } - public void setProcedure(Procedure procedure) - { + public void setProcedure(Procedure procedure) { this.procedure = procedure; } - public boolean isLoadData() - { + public boolean isLoadData() { return isLoadData; } - public void setLoadData(boolean isLoadData) - { + public void setLoadData(boolean isLoadData) { this.isLoadData = isLoadData; } @@ -168,12 +162,9 @@ public void resetNodes() { public void copyLimitToNodes() { - if(nodes!=null) - { - for (RouteResultsetNode node : nodes) - { - if(node.getLimitSize()==-1&&node.getLimitStart()==0) - { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { + if (node.getLimitSize() == -1 && node.getLimitStart() == 0) { node.setLimitStart(limitStart); node.setLimitSize(limitSize); } @@ -294,11 +285,9 @@ public RouteResultsetNode[] getNodes() { } public void setNodes(RouteResultsetNode[] nodes) { - if(nodes!=null) - { - int nodeSize=nodes.length; - for (RouteResultsetNode node : nodes) - { + if (nodes != null) { + int nodeSize = nodes.length; + for (RouteResultsetNode node : nodes) { node.setTotalNodeSize(nodeSize); } @@ -327,10 +316,8 @@ public boolean isCallStatement() { public void setCallStatement(boolean callStatement) { this.callStatement = callStatement; - if(nodes!=null) - { - for (RouteResultsetNode node : nodes) - { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { node.setCallStatement(callStatement); } @@ -338,26 +325,21 @@ public void setCallStatement(boolean callStatement) { } public void changeNodeSqlAfterAddLimit(SchemaConfig schemaConfig, String sourceDbType, String sql, int offset, int count, boolean isNeedConvert) { - if (nodes != null) - { + if (nodes != null) { Map dataNodeDbTypeMap = schemaConfig.getDataNodeDbTypeMap(); Map sqlMapCache = new HashMap<>(); - for (RouteResultsetNode node : nodes) - { + for (RouteResultsetNode node : nodes) { String dbType = dataNodeDbTypeMap.get(node.getName()); - if (dbType.equalsIgnoreCase("mysql")) - { + if (dbType.equalsIgnoreCase("mysql")) { node.setStatement(sql); //mysql之前已经加好limit - } else if (sqlMapCache.containsKey(dbType)) - { + } else if (sqlMapCache.containsKey(dbType)) { node.setStatement(sqlMapCache.get(dbType)); - } else if(isNeedConvert) - { + } else if (isNeedConvert) { String nativeSql = PageSQLUtil.convertLimitToNativePageSql(dbType, sql, offset, count); sqlMapCache.put(dbType, nativeSql); node.setStatement(nativeSql); - } else { + } else { node.setStatement(sql); } @@ -385,48 +367,48 @@ public void setCanRunInReadDB(Boolean canRunInReadDB) { this.canRunInReadDB = canRunInReadDB; } - public HavingCols getHavingCols() { - return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; - } + public HavingCols getHavingCols() { + return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; + } - public void setSubTables(Set subTables) { - this.subTables = subTables; - } + public void setSubTables(Set subTables) { + this.subTables = subTables; + } - public void setHavings(HavingCols havings) { - if (havings != null) { - createSQLMergeIfNull().setHavingCols(havings); - } - } + public void setHavings(HavingCols havings) { + if (havings != null) { + createSQLMergeIfNull().setHavingCols(havings); + } + } - // Added by winbill, 20160314, for having clause, Begin ==> - public void setHavingColsName(Object[] names) { - if (names != null && names.length > 0) { - createSQLMergeIfNull().setHavingColsName(names); - } - } - // Added by winbill, 20160314, for having clause, End <== + // Added by winbill, 20160314, for having clause, Begin ==> + public void setHavingColsName(Object[] names) { + if (names != null && names.length > 0) { + createSQLMergeIfNull().setHavingColsName(names); + } + } + // Added by winbill, 20160314, for having clause, End <== public SQLStatement getSqlStatement() { - return this.sqlStatement; - } - - public void setSqlStatement(SQLStatement sqlStatement) { - this.sqlStatement = sqlStatement; - } - - public Set getSubTables() { - return this.subTables; - } - - public boolean isDistTable(){ - if(this.getSubTables()!=null && !this.getSubTables().isEmpty() ){ - return true; - } - return false; - } - - @Override + return this.sqlStatement; + } + + public void setSqlStatement(SQLStatement sqlStatement) { + this.sqlStatement = sqlStatement; + } + + public Set getSubTables() { + return this.subTables; + } + + public boolean isDistTable() { + if (this.getSubTables() != null && !this.getSubTables().isEmpty()) { + return true; + } + return false; + } + + @Override public String toString() { StringBuilder s = new StringBuilder(); s.append(statement).append(", route={"); diff --git a/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java b/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java index 9abfc45dd..f2ffa6155 100644 --- a/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java +++ b/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java @@ -10,93 +10,94 @@ * 路由分片函数抽象类 * 为了实现一个默认的支持范围分片的函数 calcualteRange * 重写它以实现自己的范围路由规则 - * @author lxy * + * @author lxy */ -public abstract class AbstractPartitionAlgorithm implements RuleAlgorithm ,Serializable { +public abstract class AbstractPartitionAlgorithm implements RuleAlgorithm, Serializable { + + @Override + public void init() { + } + + /** + * 返回所有被路由到的节点的编号 + * 返回长度为0的数组表示所有节点都被路由(默认) + * 返回null表示没有节点被路由到 + */ + @Override + public Integer[] calculateRange(String beginValue, String endValue) { + return new Integer[0]; + } + + /** + * 对于存储数据按顺序存放的字段做范围路由,可以使用这个函数 + * + * @param algorithm + * @param beginValue + * @param endValue + * @return + */ + public static Integer[] calculateSequenceRange(AbstractPartitionAlgorithm algorithm, String beginValue, String endValue) { + Integer begin = 0, end = 0; + begin = algorithm.calculate(beginValue); + end = algorithm.calculate(endValue); + + if (begin == null || end == null) { + return new Integer[0]; + } + + if (end >= begin) { + int len = end - begin + 1; + Integer[] re = new Integer[len]; + + for (int i = 0; i < len; i++) { + re[i] = begin + i; + } + + return re; + } else { + return new Integer[0]; + } + } - @Override - public void init() { - } + /** + * 分片表所跨的节点数与分片算法分区数一致性校验 + * + * @param tableConf + * @return -1 if table datanode size < rule function partition size + * 0 if table datanode size == rule function partition size + * 1 if table datanode size > rule function partition size + */ + public final int suitableFor(TableConfig tableConf) { + int nPartition = getPartitionNum(); + if (nPartition > 0) { // 对于有限制分区数的规则,进行检查 + int dnSize = tableConf.getDataNodes().size(); + boolean distTable = tableConf.isDistTable(); + List tables = tableConf.getDistTables(); + if (distTable) { + if (tables.size() < nPartition) { + return -1; + } else if (dnSize > nPartition) { + return 1; + } + } else { + if (dnSize < nPartition) { + return -1; + } else if (dnSize > nPartition) { + return 1; + } + } + } + return 0; + } - /** - * 返回所有被路由到的节点的编号 - * 返回长度为0的数组表示所有节点都被路由(默认) - * 返回null表示没有节点被路由到 - */ - @Override - public Integer[] calculateRange(String beginValue, String endValue) { - return new Integer[0]; - } - - /** - * 对于存储数据按顺序存放的字段做范围路由,可以使用这个函数 - * @param algorithm - * @param beginValue - * @param endValue - * @return - */ - public static Integer[] calculateSequenceRange(AbstractPartitionAlgorithm algorithm, String beginValue, String endValue) { - Integer begin = 0, end = 0; - begin = algorithm.calculate(beginValue); - end = algorithm.calculate(endValue); + /** + * 返回分区数, 返回-1表示分区数没有限制 + * + * @return + */ + public int getPartitionNum() { + return -1; // 表示没有限制 + } - if(begin == null || end == null){ - return new Integer[0]; - } - - if (end >= begin) { - int len = end-begin+1; - Integer [] re = new Integer[len]; - - for(int i =0;i rule function partition size - */ - public final int suitableFor(TableConfig tableConf) { - int nPartition = getPartitionNum(); - if(nPartition > 0) { // 对于有限制分区数的规则,进行检查 - int dnSize = tableConf.getDataNodes().size(); - boolean distTable = tableConf.isDistTable(); - List tables = tableConf.getDistTables(); - if(distTable){ - if(tables.size() < nPartition){ - return -1; - } else if(dnSize > nPartition) { - return 1; - } - }else{ - if(dnSize < nPartition) { - return -1; - } else if(dnSize > nPartition) { - return 1; - } - } - } - return 0; - } - - /** - * 返回分区数, 返回-1表示分区数没有限制 - * @return - */ - public int getPartitionNum() { - return -1; // 表示没有限制 - } - } diff --git a/src/main/java/io/mycat/route/function/PartitionByPostfix.java b/src/main/java/io/mycat/route/function/PartitionByPostfix.java new file mode 100644 index 000000000..e51f8f95b --- /dev/null +++ b/src/main/java/io/mycat/route/function/PartitionByPostfix.java @@ -0,0 +1,68 @@ +package io.mycat.route.function; + +import io.mycat.config.model.rule.RuleAlgorithm; +import org.apache.commons.lang.StringUtils; + +/** + * 根据后缀分表 + *

+ * COPYRIGHT © 2001 - 2016 VOYAGE ONE GROUP INC. ALL RIGHTS RESERVED. + * + * @author vantis 2017/10/9 + * @version 1.0.0 + */ +public class PartitionByPostfix extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private Integer firstValue; + private String prefix; + private String postfix; + + @Override + public void init() { + if (null == firstValue) firstValue = 0; + if (null == prefix) prefix = ""; + if (null == postfix) postfix = ""; + } + + @Override + public Integer calculate(String columnValue) { + // 1. 按照 prefix 和 postfix 解析真正的序号 + String finalValue = columnValue; + if (!StringUtils.isBlank(prefix) && columnValue.startsWith(prefix)) + finalValue = finalValue.substring(finalValue.indexOf(prefix) + 1, finalValue.length()); + if (!StringUtils.isBlank(prefix) && columnValue.endsWith(postfix)) + finalValue = finalValue.substring(0, finalValue.lastIndexOf(postfix)); + return Integer.parseInt(finalValue) - firstValue; + } + + public Integer getFirst() { + return firstValue; + } + + public void setFirst(Integer first) { + this.firstValue = first; + } + + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } + + public Integer getFirstValue() { + return firstValue; + } + + public void setFirstValue(Integer firstValue) { + this.firstValue = firstValue; + } + + public String getPostfix() { + return postfix; + } + + public void setPostfix(String postfix) { + this.postfix = postfix; + } +} diff --git a/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java b/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java index e3a602664..369e3bf4d 100644 --- a/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java +++ b/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java @@ -1,36 +1,10 @@ package io.mycat.route.impl; -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.TreeSet; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.alibaba.druid.sql.SQLUtils; import com.alibaba.druid.sql.ast.SQLObject; import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.expr.SQLAllExpr; -import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; -import com.alibaba.druid.sql.ast.expr.SQLExistsExpr; -import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; -import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; -import com.alibaba.druid.sql.ast.expr.SQLQueryExpr; -import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; -import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; -import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; -import com.alibaba.druid.sql.ast.statement.SQLSelect; -import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; -import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.ast.expr.*; +import com.alibaba.druid.sql.ast.statement.*; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlReplaceStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; @@ -38,7 +12,6 @@ import com.alibaba.druid.sql.parser.SQLStatementParser; import com.alibaba.druid.stat.TableStat.Relationship; import com.google.common.base.Strings; - import io.mycat.MycatServer; import io.mycat.backend.mysql.nio.handler.MiddlerQueryResultHandler; import io.mycat.backend.mysql.nio.handler.MiddlerResultHandler; @@ -51,540 +24,546 @@ import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; import io.mycat.route.function.SlotFunction; -import io.mycat.route.impl.middlerResultStrategy.BinaryOpResultHandler; -import io.mycat.route.impl.middlerResultStrategy.InSubQueryResultHandler; -import io.mycat.route.impl.middlerResultStrategy.RouteMiddlerReaultHandler; -import io.mycat.route.impl.middlerResultStrategy.SQLAllResultHandler; -import io.mycat.route.impl.middlerResultStrategy.SQLExistsResultHandler; -import io.mycat.route.impl.middlerResultStrategy.SQLQueryResultHandler; -import io.mycat.route.parser.druid.DruidParser; -import io.mycat.route.parser.druid.DruidParserFactory; -import io.mycat.route.parser.druid.DruidShardingParseInfo; -import io.mycat.route.parser.druid.MycatSchemaStatVisitor; -import io.mycat.route.parser.druid.MycatStatementParser; -import io.mycat.route.parser.druid.RouteCalculateUnit; +import io.mycat.route.impl.middlerResultStrategy.*; +import io.mycat.route.parser.druid.*; import io.mycat.route.parser.util.ParseUtil; import io.mycat.route.util.RouterUtil; import io.mycat.server.NonBlockingSession; import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; +import java.util.*; public class DruidMycatRouteStrategy extends AbstractRouteStrategy { - - public static final Logger LOGGER = LoggerFactory.getLogger(DruidMycatRouteStrategy.class); - - private static Map,RouteMiddlerReaultHandler> middlerResultHandler = new HashMap<>(); - - static{ - middlerResultHandler.put(SQLQueryExpr.class, new SQLQueryResultHandler()); - middlerResultHandler.put(SQLBinaryOpExpr.class, new BinaryOpResultHandler()); - middlerResultHandler.put(SQLInSubQueryExpr.class, new InSubQueryResultHandler()); - middlerResultHandler.put(SQLExistsExpr.class, new SQLExistsResultHandler()); - middlerResultHandler.put(SQLAllExpr.class, new SQLAllResultHandler()); - } - - - @Override - public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, - String stmt, RouteResultset rrs,String charset, - LayerCachePool cachePool,int sqlType,ServerConnection sc) throws SQLNonTransientException { - - /** - * 只有mysql时只支持mysql语法 - */ - SQLStatementParser parser = null; - if (schema.isNeedSupportMultiDBType()) { - parser = new MycatStatementParser(stmt); - } else { - parser = new MySqlStatementParser(stmt); - } - - MycatSchemaStatVisitor visitor = null; - SQLStatement statement; - - /** - * 解析出现问题统一抛SQL语法错误 - */ - try { - statement = parser.parseStatement(); + + public static final Logger LOGGER = LoggerFactory.getLogger(DruidMycatRouteStrategy.class); + + private static Map, RouteMiddlerReaultHandler> middlerResultHandler = new HashMap<>(); + + static { + middlerResultHandler.put(SQLQueryExpr.class, new SQLQueryResultHandler()); + middlerResultHandler.put(SQLBinaryOpExpr.class, new BinaryOpResultHandler()); + middlerResultHandler.put(SQLInSubQueryExpr.class, new InSubQueryResultHandler()); + middlerResultHandler.put(SQLExistsExpr.class, new SQLExistsResultHandler()); + middlerResultHandler.put(SQLAllExpr.class, new SQLAllResultHandler()); + } + + + @Override + public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, + String stmt, RouteResultset rrs, String charset, + LayerCachePool cachePool, int sqlType, ServerConnection sc) throws SQLNonTransientException { + + /** + * 只有mysql时只支持mysql语法 + */ + SQLStatementParser parser = null; + if (schema.isNeedSupportMultiDBType()) { + parser = new MycatStatementParser(stmt); + } else { + parser = new MySqlStatementParser(stmt); + } + + MycatSchemaStatVisitor visitor = null; + SQLStatement statement; + + /** + * 解析出现问题统一抛SQL语法错误 + */ + try { + statement = parser.parseStatement(); visitor = new MycatSchemaStatVisitor(); - } catch (Exception t) { - LOGGER.error("DruidMycatRouteStrategyError", t); - throw new SQLSyntaxErrorException(t); - } + } catch (Exception t) { + LOGGER.error("DruidMycatRouteStrategyError", t); + throw new SQLSyntaxErrorException(t); + } + + /** + * 检验unsupported statement + */ + checkUnSupportedStatement(statement); + + DruidParser druidParser = DruidParserFactory.create(schema, statement, visitor); + druidParser.parser(schema, rrs, statement, stmt, cachePool, visitor); + DruidShardingParseInfo ctx = druidParser.getCtx(); + rrs.setTables(ctx.getTables()); + + if (visitor.isSubqueryRelationOr()) { + String err = "In subQuery,the or condition is not supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } - /** - * 检验unsupported statement - */ - checkUnSupportedStatement(statement); - - DruidParser druidParser = DruidParserFactory.create(schema, statement, visitor); - druidParser.parser(schema, rrs, statement, stmt,cachePool,visitor); - DruidShardingParseInfo ctx= druidParser.getCtx() ; - rrs.setTables(ctx.getTables()); - - if(visitor.isSubqueryRelationOr()){ - String err = "In subQuery,the or condition is not supported."; - LOGGER.error(err); - throw new SQLSyntaxErrorException(err); - } - /* 按照以下情况路由 - 1.2.1 可以直接路由. + 1.2.1 可以直接路由. 1.2.2 两个表夸库join的sql.调用calat 1.2.3 需要先执行subquery 的sql.把subquery拆分出来.获取结果后,与outerquery */ - - //add huangyiming 分片规则不一样的且表中带查询条件的则走Catlet - List tables = ctx.getTables(); - SchemaConfig schemaConf = MycatServer.getInstance().getConfig().getSchemas().get(schema.getName()); - int index = 0; - RuleConfig firstRule = null; - boolean directRoute = true; - Set firstDataNodes = new HashSet(); - Map tconfigs = schemaConf==null?null:schemaConf.getTables(); - - Map rulemap = new HashMap<>(); - if(tconfigs!=null){ - for(String tableName : tables){ - TableConfig tc = tconfigs.get(tableName); - if(tc == null){ - //add 别名中取 - Map tableAliasMap = ctx.getTableAliasMap(); - if(tableAliasMap !=null && tableAliasMap.get(tableName) !=null){ - tc = schemaConf.getTables().get(tableAliasMap.get(tableName)); - } - } - - if(index == 0){ - if(tc !=null){ - firstRule= tc.getRule(); - //没有指定分片规则时,不做处理 - if(firstRule==null){ - continue; - } - firstDataNodes.addAll(tc.getDataNodes()); - rulemap.put(tc.getName(), firstRule); - } - }else{ - if(tc !=null){ - //ER关系表的时候是可能存在字表中没有tablerule的情况,所以加上判断 - RuleConfig ruleCfg = tc.getRule(); - if(ruleCfg==null){ //没有指定分片规则时,不做处理 - continue; - } - Set dataNodes = new HashSet(); - dataNodes.addAll(tc.getDataNodes()); - rulemap.put(tc.getName(), ruleCfg); - //如果匹配规则不相同或者分片的datanode不相同则需要走子查询处理 - if(firstRule!=null&&((ruleCfg !=null && !ruleCfg.getRuleAlgorithm().equals(firstRule.getRuleAlgorithm()) )||( !dataNodes.equals(firstDataNodes)))){ - directRoute = false; - break; - } - } - } - index++; - } - } - - RouteResultset rrsResult = rrs; - if(directRoute){ //直接路由 - if(!RouterUtil.isAllGlobalTable(ctx, schemaConf)){ - if(rulemap.size()>1&&!checkRuleField(rulemap,visitor)){ - String err = "In case of slice table,sql have same rules,but the relationship condition is different from rule field!"; - LOGGER.error(err); - throw new SQLSyntaxErrorException(err); - } - } - rrsResult = directRoute(rrs,ctx,schema,druidParser,statement,cachePool); - }else{ - int subQuerySize = visitor.getSubQuerys().size(); - if(subQuerySize==0&&ctx.getTables().size()==2){ //两表关联,考虑使用catlet - if(!visitor.getRelationships().isEmpty()){ - rrs.setCacheAble(false); - rrs.setFinishedRoute(true); - rrsResult = catletRoute(schema,ctx.getSql(),charset,sc); - }else{ - rrsResult = directRoute(rrs,ctx,schema,druidParser,statement,cachePool); - } - }else if(subQuerySize==1){ //只涉及一张表的子查询,使用 MiddlerResultHandler 获取中间结果后,改写原有 sql 继续执行 TODO 后期可能会考虑多个子查询的情况. - SQLSelect sqlselect = visitor.getSubQuerys().iterator().next(); - if(!visitor.getRelationships().isEmpty()){ // 当 inner query 和 outer query 有关联条件时,暂不支持 - String err = "In case of slice table,sql have different rules,the relationship condition is not supported."; - LOGGER.error(err); - throw new SQLSyntaxErrorException(err); - }else{ - SQLSelectQuery sqlSelectQuery = sqlselect.getQuery(); - if(((MySqlSelectQueryBlock)sqlSelectQuery).getFrom() instanceof SQLExprTableSource) { - rrs.setCacheAble(false); - rrs.setFinishedRoute(true); - rrsResult = middlerResultRoute(schema,charset,sqlselect,sqlType,statement,sc); - } - } - }else if(subQuerySize >=2){ - String err = "In case of slice table,sql has different rules,currently only one subQuery is supported."; - LOGGER.error(err); - throw new SQLSyntaxErrorException(err); - } - } - return rrsResult; - } - - /** - * 子查询中存在关联查询的情况下,检查关联字段是否是分片字段 - * @param rulemap - * @param ships - * @return - */ - private boolean checkRuleField(Map rulemap,MycatSchemaStatVisitor visitor){ - - Set ships = visitor.getRelationships(); - Iterator iter = ships.iterator(); - while(iter.hasNext()){ - Relationship ship = iter.next(); - String lefttable = ship.getLeft().getTable().toUpperCase(); - String righttable = ship.getRight().getTable().toUpperCase(); - // 如果是同一个表中的关联条件,不做处理 - if(lefttable.equals(righttable)){ - return true; - } - RuleConfig leftconfig = rulemap.get(lefttable); - if(leftconfig!=null){ - if(!leftconfig.getColumn().equals(ship.getLeft().getName().toUpperCase())){ - return false; - } - } - RuleConfig rightconfig = rulemap.get(righttable); - if(rightconfig!=null){ - if(!rightconfig.getColumn().equals(ship.getRight().getName().toUpperCase())){ - return false; - } - } - } - return true; - } - - private RouteResultset middlerResultRoute(final SchemaConfig schema,final String charset,final SQLSelect sqlselect, - final int sqlType,final SQLStatement statement,final ServerConnection sc){ - - final String middlesql = SQLUtils.toMySqlString(sqlselect); - - MiddlerResultHandler middlerResultHandler = new MiddlerQueryResultHandler<>(new SecondHandler() { - @Override - public void doExecute(List param) { - sc.getSession2().setMiddlerResultHandler(null); - String sqls = null; - // 路由计算 - RouteResultset rrs = null; - try { - - sqls = buildSql(statement,sqlselect,param); - rrs = MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, sqlType,sqls.toLowerCase(), charset,sc ); - - } catch (Exception e) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(this).append(sqls).toString() + " err:" + e.toString(),e); - String msg = e.getMessage(); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); - return; - } - NonBlockingSession noBlockSession = new NonBlockingSession(sc.getSession2().getSource()); - noBlockSession.setMiddlerResultHandler(null); - //session的预编译标示传递 - noBlockSession.setPrepared(sc.getSession2().isPrepared()); - if (rrs != null) { - noBlockSession.setCanClose(false); - noBlockSession.execute(rrs, ServerParse.SELECT); - } - } - } ); - sc.getSession2().setMiddlerResultHandler(middlerResultHandler); - sc.getSession2().setCanClose(false); - - // 路由计算 - RouteResultset rrs = null; - try { - rrs = MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, ServerParse.SELECT, middlesql, charset, sc); - - } catch (Exception e) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(this).append(middlesql).toString() + " err:" + e.toString(),e); - String msg = e.getMessage(); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); - return null; - } - - if(rrs!=null){ - rrs.setCacheAble(false); - } - return rrs; - } - - /** - * 获取子查询执行结果后,改写原始sql 继续执行. - * @param statement - * @param sqlselect - * @param param - * @return - */ - private String buildSql(SQLStatement statement,SQLSelect sqlselect,List param){ - - SQLObject parent = sqlselect.getParent(); - RouteMiddlerReaultHandler handler = middlerResultHandler.get(parent.getClass()); - if(handler==null){ - throw new UnsupportedOperationException(parent.getClass()+" current is not supported "); - } - return handler.dohandler(statement, sqlselect, parent, param); - } - - /** - * 两个表的情况,catlet - * @param schema - * @param stmt - * @param charset - * @param sc - * @return - */ - private RouteResultset catletRoute(SchemaConfig schema,String stmt,String charset,ServerConnection sc){ - RouteResultset rrs = null; - try { - rrs = MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, ServerParse.SELECT, "/*!mycat:catlet=io.mycat.catlets.ShareJoin */ "+stmt, charset, sc); - - }catch(Exception e){ - - } - return rrs; - } - - /** - * 直接结果路由 - * @param rrs - * @param ctx - * @param schema - * @param druidParser - * @param statement - * @param cachePool - * @return - * @throws SQLNonTransientException - */ - private RouteResultset directRoute(RouteResultset rrs,DruidShardingParseInfo ctx,SchemaConfig schema, - DruidParser druidParser,SQLStatement statement,LayerCachePool cachePool) throws SQLNonTransientException{ - - //改写sql:如insert语句主键自增长, 在直接结果路由的情况下,进行sql 改写处理 - druidParser.changeSql(schema, rrs, statement,cachePool); - - /** - * DruidParser 解析过程中已完成了路由的直接返回 - */ - if ( rrs.isFinishedRoute() ) { - return rrs; - } - - /** - * 没有from的select语句或其他 - */ - if((ctx.getTables() == null || ctx.getTables().size() == 0)&&(ctx.getTableAliasMap()==null||ctx.getTableAliasMap().isEmpty())) - { - return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), druidParser.getCtx().getSql()); - } - - if(druidParser.getCtx().getRouteCalculateUnits().size() == 0) { - RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit(); - druidParser.getCtx().addRouteCalculateUnit(routeCalculateUnit); - } - - SortedSet nodeSet = new TreeSet(); - boolean isAllGlobalTable = RouterUtil.isAllGlobalTable(ctx, schema); - for(RouteCalculateUnit unit: druidParser.getCtx().getRouteCalculateUnits()) { - RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, druidParser.getCtx(), unit, rrs, isSelect(statement), cachePool); - if(rrsTmp != null&&rrsTmp.getNodes()!=null) { - for(RouteResultsetNode node :rrsTmp.getNodes()) { - nodeSet.add(node); - } - } - if(isAllGlobalTable) {//都是全局表时只计算一遍路由 - break; - } - } - - RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()]; - int i = 0; - for (RouteResultsetNode aNodeSet : nodeSet) { - nodes[i] = aNodeSet; - if(statement instanceof MySqlInsertStatement &&ctx.getTables().size()==1&&schema.getTables().containsKey(ctx.getTables().get(0))) { - RuleConfig rule = schema.getTables().get(ctx.getTables().get(0)).getRule(); - if(rule!=null&& rule.getRuleAlgorithm() instanceof SlotFunction){ - aNodeSet.setStatement(ParseUtil.changeInsertAddSlot(aNodeSet.getStatement(),aNodeSet.getSlot())); - } - } - i++; - } - rrs.setNodes(nodes); - - //分表 - /** - * subTables="t_order$1-2,t_order3" - *目前分表 1.6 开始支持 幵丏 dataNode 在分表条件下只能配置一个,分表条件下不支持join。 - */ - if(rrs.isDistTable()){ - return this.routeDisTable(statement,rrs); - } - return rrs; - } - - private SQLExprTableSource getDisTable(SQLTableSource tableSource,RouteResultsetNode node) throws SQLSyntaxErrorException{ - if(node.getSubTableName()==null){ - String msg = " sub table not exists for " + node.getName() + " on " + tableSource; - LOGGER.error("DruidMycatRouteStrategyError " + msg); - throw new SQLSyntaxErrorException(msg); - } - - SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); - sqlIdentifierExpr.setParent(tableSource.getParent()); - sqlIdentifierExpr.setName(node.getSubTableName()); - SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr); - return from2; - } - - private RouteResultset routeDisTable(SQLStatement statement, RouteResultset rrs) throws SQLSyntaxErrorException{ - SQLTableSource tableSource = null; - if(statement instanceof SQLInsertStatement) { - SQLInsertStatement insertStatement = (SQLInsertStatement) statement; - tableSource = insertStatement.getTableSource(); - for (RouteResultsetNode node : rrs.getNodes()) { - SQLExprTableSource from2 = getDisTable(tableSource, node); - insertStatement.setTableSource(from2); - node.setStatement(insertStatement.toString()); - } - } - if(statement instanceof SQLDeleteStatement) { - SQLDeleteStatement deleteStatement = (SQLDeleteStatement) statement; - tableSource = deleteStatement.getTableSource(); - for (RouteResultsetNode node : rrs.getNodes()) { - SQLExprTableSource from2 = getDisTable(tableSource, node); - deleteStatement.setTableSource(from2); - node.setStatement(deleteStatement.toString()); - } - } - if(statement instanceof SQLUpdateStatement) { - SQLUpdateStatement updateStatement = (SQLUpdateStatement) statement; - tableSource = updateStatement.getTableSource(); - for (RouteResultsetNode node : rrs.getNodes()) { - SQLExprTableSource from2 = getDisTable(tableSource, node); - updateStatement.setTableSource(from2); - node.setStatement(updateStatement.toString()); - } - } - - return rrs; - } - - /** - * SELECT 语句 - */ + + //add huangyiming 分片规则不一样的且表中带查询条件的则走Catlet + List tables = ctx.getTables(); + SchemaConfig schemaConf = MycatServer.getInstance().getConfig().getSchemas().get(schema.getName()); + int index = 0; + RuleConfig firstRule = null; + boolean directRoute = true; + Set firstDataNodes = new HashSet(); + Map tconfigs = schemaConf == null ? null : schemaConf.getTables(); + + Map rulemap = new HashMap<>(); + if (tconfigs != null) { + for (String tableName : tables) { + TableConfig tc = tconfigs.get(tableName); + if (tc == null) { + //add 别名中取 + Map tableAliasMap = ctx.getTableAliasMap(); + if (tableAliasMap != null && tableAliasMap.get(tableName) != null) { + tc = schemaConf.getTables().get(tableAliasMap.get(tableName)); + } + } + + if (index == 0) { + if (tc != null) { + firstRule = tc.getRule(); + //没有指定分片规则时,不做处理 + if (firstRule == null) { + continue; + } + firstDataNodes.addAll(tc.getDataNodes()); + rulemap.put(tc.getName(), firstRule); + } + } else { + if (tc != null) { + //ER关系表的时候是可能存在字表中没有tablerule的情况,所以加上判断 + RuleConfig ruleCfg = tc.getRule(); + if (ruleCfg == null) { //没有指定分片规则时,不做处理 + continue; + } + Set dataNodes = new HashSet(); + dataNodes.addAll(tc.getDataNodes()); + rulemap.put(tc.getName(), ruleCfg); + //如果匹配规则不相同或者分片的datanode不相同则需要走子查询处理 + if (firstRule != null && ((ruleCfg != null && !ruleCfg.getRuleAlgorithm().equals(firstRule.getRuleAlgorithm())) || (!dataNodes.equals(firstDataNodes)))) { + directRoute = false; + break; + } + } + } + index++; + } + } + + RouteResultset rrsResult = rrs; + if (directRoute) { //直接路由 + if (!RouterUtil.isAllGlobalTable(ctx, schemaConf)) { + if (rulemap.size() > 1 && !checkRuleField(rulemap, visitor)) { + String err = "In case of slice table,sql have same rules,but the relationship condition is different from rule field!"; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } + } + rrsResult = directRoute(rrs, ctx, schema, druidParser, statement, cachePool); + } else { + int subQuerySize = visitor.getSubQuerys().size(); + if (subQuerySize == 0 && ctx.getTables().size() == 2) { //两表关联,考虑使用catlet + if (!visitor.getRelationships().isEmpty()) { + rrs.setCacheAble(false); + rrs.setFinishedRoute(true); + rrsResult = catletRoute(schema, ctx.getSql(), charset, sc); + } else { + rrsResult = directRoute(rrs, ctx, schema, druidParser, statement, cachePool); + } + } else if (subQuerySize == 1) { //只涉及一张表的子查询,使用 MiddlerResultHandler 获取中间结果后,改写原有 sql 继续执行 TODO 后期可能会考虑多个子查询的情况. + SQLSelect sqlselect = visitor.getSubQuerys().iterator().next(); + if (!visitor.getRelationships().isEmpty()) { // 当 inner query 和 outer query 有关联条件时,暂不支持 + String err = "In case of slice table,sql have different rules,the relationship condition is not supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } else { + SQLSelectQuery sqlSelectQuery = sqlselect.getQuery(); + if (((MySqlSelectQueryBlock) sqlSelectQuery).getFrom() instanceof SQLExprTableSource) { + rrs.setCacheAble(false); + rrs.setFinishedRoute(true); + rrsResult = middlerResultRoute(schema, charset, sqlselect, sqlType, statement, sc); + } + } + } else if (subQuerySize >= 2) { + String err = "In case of slice table,sql has different rules,currently only one subQuery is supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } + } + return rrsResult; + } + + /** + * 子查询中存在关联查询的情况下,检查关联字段是否是分片字段 + * + * @param rulemap + * @param ships + * @return + */ + private boolean checkRuleField(Map rulemap, MycatSchemaStatVisitor visitor) { + + Set ships = visitor.getRelationships(); + Iterator iter = ships.iterator(); + while (iter.hasNext()) { + Relationship ship = iter.next(); + String lefttable = ship.getLeft().getTable().toUpperCase(); + String righttable = ship.getRight().getTable().toUpperCase(); + // 如果是同一个表中的关联条件,不做处理 + if (lefttable.equals(righttable)) { + return true; + } + RuleConfig leftconfig = rulemap.get(lefttable); + if (leftconfig != null) { + if (!leftconfig.getColumn().equals(ship.getLeft().getName().toUpperCase())) { + return false; + } + } + RuleConfig rightconfig = rulemap.get(righttable); + if (rightconfig != null) { + if (!rightconfig.getColumn().equals(ship.getRight().getName().toUpperCase())) { + return false; + } + } + } + return true; + } + + private RouteResultset middlerResultRoute(final SchemaConfig schema, final String charset, final SQLSelect sqlselect, + final int sqlType, final SQLStatement statement, final ServerConnection sc) { + + final String middlesql = SQLUtils.toMySqlString(sqlselect); + + MiddlerResultHandler middlerResultHandler = new MiddlerQueryResultHandler<>(new SecondHandler() { + @Override + public void doExecute(List param) { + sc.getSession2().setMiddlerResultHandler(null); + String sqls = null; + // 路由计算 + RouteResultset rrs = null; + try { + + sqls = buildSql(statement, sqlselect, param); + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, sqlType, sqls.toLowerCase(), charset, sc); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(sqls).toString() + " err:" + e.toString(), e); + String msg = e.getMessage(); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return; + } + NonBlockingSession noBlockSession = new NonBlockingSession(sc.getSession2().getSource()); + noBlockSession.setMiddlerResultHandler(null); + //session的预编译标示传递 + noBlockSession.setPrepared(sc.getSession2().isPrepared()); + if (rrs != null) { + noBlockSession.setCanClose(false); + noBlockSession.execute(rrs, ServerParse.SELECT); + } + } + }); + sc.getSession2().setMiddlerResultHandler(middlerResultHandler); + sc.getSession2().setCanClose(false); + + // 路由计算 + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, ServerParse.SELECT, middlesql, charset, sc); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(middlesql).toString() + " err:" + e.toString(), e); + String msg = e.getMessage(); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return null; + } + + if (rrs != null) { + rrs.setCacheAble(false); + } + return rrs; + } + + /** + * 获取子查询执行结果后,改写原始sql 继续执行. + * + * @param statement + * @param sqlselect + * @param param + * @return + */ + private String buildSql(SQLStatement statement, SQLSelect sqlselect, List param) { + + SQLObject parent = sqlselect.getParent(); + RouteMiddlerReaultHandler handler = middlerResultHandler.get(parent.getClass()); + if (handler == null) { + throw new UnsupportedOperationException(parent.getClass() + " current is not supported "); + } + return handler.dohandler(statement, sqlselect, parent, param); + } + + /** + * 两个表的情况,catlet + * + * @param schema + * @param stmt + * @param charset + * @param sc + * @return + */ + private RouteResultset catletRoute(SchemaConfig schema, String stmt, String charset, ServerConnection sc) { + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, ServerParse.SELECT, "/*!mycat:catlet=io.mycat.catlets.ShareJoin */ " + stmt, charset, sc); + + } catch (Exception e) { + + } + return rrs; + } + + /** + * 直接结果路由 + * + * @param rrs + * @param ctx + * @param schema + * @param druidParser + * @param statement + * @param cachePool + * @return + * @throws SQLNonTransientException + */ + private RouteResultset directRoute(RouteResultset rrs, DruidShardingParseInfo ctx, SchemaConfig schema, + DruidParser druidParser, SQLStatement statement, LayerCachePool cachePool) throws SQLNonTransientException { + + //改写sql:如insert语句主键自增长, 在直接结果路由的情况下,进行sql 改写处理 + druidParser.changeSql(schema, rrs, statement, cachePool); + + /** + * DruidParser 解析过程中已完成了路由的直接返回 + */ + if (rrs.isFinishedRoute()) { + return rrs; + } + + /** + * 没有from的select语句或其他 + */ + if ((ctx.getTables() == null || ctx.getTables().size() == 0) && (ctx.getTableAliasMap() == null || ctx.getTableAliasMap().isEmpty())) { + return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), druidParser.getCtx().getSql()); + } + + if (druidParser.getCtx().getRouteCalculateUnits().size() == 0) { + RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit(); + druidParser.getCtx().addRouteCalculateUnit(routeCalculateUnit); + } + + SortedSet nodeSet = new TreeSet(); + boolean isAllGlobalTable = RouterUtil.isAllGlobalTable(ctx, schema); + for (RouteCalculateUnit unit : druidParser.getCtx().getRouteCalculateUnits()) { + RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, druidParser.getCtx(), unit, rrs, isSelect(statement), cachePool); + if (rrsTmp != null && rrsTmp.getNodes() != null) { + for (RouteResultsetNode node : rrsTmp.getNodes()) { + nodeSet.add(node); + } + } + if (isAllGlobalTable) {//都是全局表时只计算一遍路由 + break; + } + } + + RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()]; + int i = 0; + for (RouteResultsetNode aNodeSet : nodeSet) { + nodes[i] = aNodeSet; + if (statement instanceof MySqlInsertStatement && ctx.getTables().size() == 1 && schema.getTables().containsKey(ctx.getTables().get(0))) { + RuleConfig rule = schema.getTables().get(ctx.getTables().get(0)).getRule(); + if (rule != null && rule.getRuleAlgorithm() instanceof SlotFunction) { + aNodeSet.setStatement(ParseUtil.changeInsertAddSlot(aNodeSet.getStatement(), aNodeSet.getSlot())); + } + } + i++; + } + rrs.setNodes(nodes); + + //分表 + /** + * subTables="t_order$1-2,t_order3" + *目前分表 1.6 开始支持 幵丏 dataNode 在分表条件下只能配置一个,分表条件下不支持join。 + */ + if (rrs.isDistTable()) { + return this.routeDisTable(statement, rrs); + } + return rrs; + } + + private SQLExprTableSource getDisTable(SQLTableSource tableSource, RouteResultsetNode node) throws SQLSyntaxErrorException { + if (node.getSubTableName() == null) { + String msg = " sub table not exists for " + node.getName() + " on " + tableSource; + LOGGER.error("DruidMycatRouteStrategyError " + msg); + throw new SQLSyntaxErrorException(msg); + } + + SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); + sqlIdentifierExpr.setParent(tableSource.getParent()); + sqlIdentifierExpr.setName(node.getSubTableName()); + SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr); + return from2; + } + + private RouteResultset routeDisTable(SQLStatement statement, RouteResultset rrs) throws SQLSyntaxErrorException { + SQLTableSource tableSource = null; + if (statement instanceof SQLInsertStatement) { + SQLInsertStatement insertStatement = (SQLInsertStatement) statement; + tableSource = insertStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getSqlExprTableSource(tableSource, node); + insertStatement.setTableSource(from2); + node.setStatement(insertStatement.toString()); + } + } + if (statement instanceof SQLDeleteStatement) { + SQLDeleteStatement deleteStatement = (SQLDeleteStatement) statement; + tableSource = deleteStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getSqlExprTableSource(tableSource, node); + deleteStatement.setTableSource(from2); + node.setStatement(deleteStatement.toString()); + } + } + if (statement instanceof SQLUpdateStatement) { + SQLUpdateStatement updateStatement = (SQLUpdateStatement) statement; + tableSource = updateStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getSqlExprTableSource(tableSource, node); + updateStatement.setTableSource(from2); + node.setStatement(updateStatement.toString()); + } + } + + return rrs; + } + + private SQLExprTableSource getSqlExprTableSource(SQLTableSource tableSource, RouteResultsetNode node) + throws SQLSyntaxErrorException { + SQLExprTableSource from2 = getDisTable(tableSource, node); + from2.setAlias(tableSource.getAlias()); + return from2; + } + + /** + * SELECT 语句 + */ private boolean isSelect(SQLStatement statement) { - if(statement instanceof SQLSelectStatement) { - return true; - } - return false; - } - - /** - * 检验不支持的SQLStatement类型 :不支持的类型直接抛SQLSyntaxErrorException异常 - * @param statement - * @throws SQLSyntaxErrorException - */ - private void checkUnSupportedStatement(SQLStatement statement) throws SQLSyntaxErrorException { - //不支持replace语句 - if(statement instanceof MySqlReplaceStatement) { - throw new SQLSyntaxErrorException(" ReplaceStatement can't be supported,use insert into ...on duplicate key update... instead "); - } - } - - /** - * 分析 SHOW SQL - */ - @Override - public RouteResultset analyseShowSQL(SchemaConfig schema, - RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { - - String upStmt = stmt.toUpperCase(); - int tabInd = upStmt.indexOf(" TABLES"); - if (tabInd > 0) {// show tables - int[] nextPost = RouterUtil.getSpecPos(upStmt, 0); - if (nextPost[0] > 0) {// remove db info - int end = RouterUtil.getSpecEndPos(upStmt, tabInd); - if (upStmt.indexOf(" FULL") > 0) { - stmt = "SHOW FULL TABLES" + stmt.substring(end); - } else { - stmt = "SHOW TABLES" + stmt.substring(end); - } - } - String defaultNode= schema.getDataNode(); - if(!Strings.isNullOrEmpty(defaultNode)) - { - return RouterUtil.routeToSingleNode(rrs, defaultNode, stmt); + if (statement instanceof SQLSelectStatement) { + return true; + } + return false; + } + + /** + * 检验不支持的SQLStatement类型 :不支持的类型直接抛SQLSyntaxErrorException异常 + * + * @param statement + * @throws SQLSyntaxErrorException + */ + private void checkUnSupportedStatement(SQLStatement statement) throws SQLSyntaxErrorException { + //不支持replace语句 + if (statement instanceof MySqlReplaceStatement) { + throw new SQLSyntaxErrorException(" ReplaceStatement can't be supported,use insert into ...on duplicate key update... instead "); + } + } + + /** + * 分析 SHOW SQL + */ + @Override + public RouteResultset analyseShowSQL(SchemaConfig schema, + RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { + + String upStmt = stmt.toUpperCase(); + int tabInd = upStmt.indexOf(" TABLES"); + if (tabInd > 0) {// show tables + int[] nextPost = RouterUtil.getSpecPos(upStmt, 0); + if (nextPost[0] > 0) {// remove db info + int end = RouterUtil.getSpecEndPos(upStmt, tabInd); + if (upStmt.indexOf(" FULL") > 0) { + stmt = "SHOW FULL TABLES" + stmt.substring(end); + } else { + stmt = "SHOW TABLES" + stmt.substring(end); + } } - return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); - } - - /** - * show index or column - */ - int[] indx = RouterUtil.getSpecPos(upStmt, 0); - if (indx[0] > 0) { - /** - * has table - */ - int[] repPos = { indx[0] + indx[1], 0 }; - String tableName = RouterUtil.getShowTableName(stmt, repPos); - /** - * IN DB pattern - */ - int[] indx2 = RouterUtil.getSpecPos(upStmt, indx[0] + indx[1] + 1); - if (indx2[0] > 0) {// find LIKE OR WHERE - repPos[1] = RouterUtil.getSpecEndPos(upStmt, indx2[0] + indx2[1]); - - } - stmt = stmt.substring(0, indx[0]) + " FROM " + tableName + stmt.substring(repPos[1]); - RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); - return rrs; - - } - - /** - * show create table tableName - */ - int[] createTabInd = RouterUtil.getCreateTablePos(upStmt, 0); - if (createTabInd[0] > 0) { - int tableNameIndex = createTabInd[0] + createTabInd[1]; - if (upStmt.length() > tableNameIndex) { - String tableName = stmt.substring(tableNameIndex).trim(); - int ind2 = tableName.indexOf('.'); - if (ind2 > 0) { - tableName = tableName.substring(ind2 + 1); - } - RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); - return rrs; - } - } - - return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), stmt); - } - - + String defaultNode = schema.getDataNode(); + if (!Strings.isNullOrEmpty(defaultNode)) { + return RouterUtil.routeToSingleNode(rrs, defaultNode, stmt); + } + return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); + } + + /** + * show index or column + */ + int[] indx = RouterUtil.getSpecPos(upStmt, 0); + if (indx[0] > 0) { + /** + * has table + */ + int[] repPos = {indx[0] + indx[1], 0}; + String tableName = RouterUtil.getShowTableName(stmt, repPos); + /** + * IN DB pattern + */ + int[] indx2 = RouterUtil.getSpecPos(upStmt, indx[0] + indx[1] + 1); + if (indx2[0] > 0) {// find LIKE OR WHERE + repPos[1] = RouterUtil.getSpecEndPos(upStmt, indx2[0] + indx2[1]); + + } + stmt = stmt.substring(0, indx[0]) + " FROM " + tableName + stmt.substring(repPos[1]); + RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); + return rrs; + + } + + /** + * show create table tableName + */ + int[] createTabInd = RouterUtil.getCreateTablePos(upStmt, 0); + if (createTabInd[0] > 0) { + int tableNameIndex = createTabInd[0] + createTabInd[1]; + if (upStmt.length() > tableNameIndex) { + String tableName = stmt.substring(tableNameIndex).trim(); + int ind2 = tableName.indexOf('.'); + if (ind2 > 0) { + tableName = tableName.substring(ind2 + 1); + } + RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); + return rrs; + } + } + + return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), stmt); + } + + // /** // * 为一个表进行条件路由 // * @param schema @@ -663,93 +642,94 @@ public RouteResultset analyseShowSQL(SchemaConfig schema, // // } - public RouteResultset routeSystemInfo(SchemaConfig schema, int sqlType, - String stmt, RouteResultset rrs) throws SQLSyntaxErrorException { - switch(sqlType){ - case ServerParse.SHOW:// if origSQL is like show tables - return analyseShowSQL(schema, rrs, stmt); - case ServerParse.SELECT://if origSQL is like select @@ - int index = stmt.indexOf("@@"); - if(index > 0 && "SELECT".equals(stmt.substring(0, index).trim().toUpperCase())){ - return analyseDoubleAtSgin(schema, rrs, stmt); - } - break; - case ServerParse.DESCRIBE:// if origSQL is meta SQL, such as describe table - int ind = stmt.indexOf(' '); - stmt = stmt.trim(); - return analyseDescrSQL(schema, rrs, stmt, ind + 1); - } - return null; - } - - /** - * 对Desc语句进行分析 返回数据路由集合 - * * - * @param schema 数据库名 - * @param rrs 数据路由集合 - * @param stmt 执行语句 - * @param ind 第一个' '的位置 - * @return RouteResultset (数据路由集合) - * @author mycat - */ - private static RouteResultset analyseDescrSQL(SchemaConfig schema, - RouteResultset rrs, String stmt, int ind) { - - final String MATCHED_FEATURE = "DESCRIBE "; - final String MATCHED2_FEATURE = "DESC "; - int pos = 0; - while (pos < stmt.length()) { - char ch = stmt.charAt(pos); - // 忽略处理注释 /* */ BEN - if(ch == '/' && pos+4 < stmt.length() && stmt.charAt(pos+1) == '*') { - if(stmt.substring(pos+2).indexOf("*/") != -1) { - pos += stmt.substring(pos+2).indexOf("*/")+4; - continue; - } else { - // 不应该发生这类情况。 - throw new IllegalArgumentException("sql 注释 语法错误"); - } - } else if(ch == 'D'||ch == 'd') { - // 匹配 [describe ] - if(pos+MATCHED_FEATURE.length() < stmt.length() && (stmt.substring(pos).toUpperCase().indexOf(MATCHED_FEATURE) != -1)) { - pos = pos + MATCHED_FEATURE.length(); - break; - } else if(pos+MATCHED2_FEATURE.length() < stmt.length() && (stmt.substring(pos).toUpperCase().indexOf(MATCHED2_FEATURE) != -1)) { - pos = pos + MATCHED2_FEATURE.length(); - break; - } else { - pos++; - } - } - } - - // 重置ind坐标。BEN GONG - ind = pos; - int[] repPos = { ind, 0 }; - String tableName = RouterUtil.getTableName(stmt, repPos); - - stmt = stmt.substring(0, ind) + tableName + stmt.substring(repPos[1]); - RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); - return rrs; - } - - /** - * 根据执行语句判断数据路由 - * - * @param schema 数据库名 - * @param rrs 数据路由集合 - * @param stmt 执行sql - * @return RouteResultset 数据路由集合 - * @throws SQLSyntaxErrorException - * @author mycat - */ - private RouteResultset analyseDoubleAtSgin(SchemaConfig schema, - RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { - String upStmt = stmt.toUpperCase(); - int atSginInd = upStmt.indexOf(" @@"); - if (atSginInd > 0) { - return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); - } - return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), stmt); - } + public RouteResultset routeSystemInfo(SchemaConfig schema, int sqlType, + String stmt, RouteResultset rrs) throws SQLSyntaxErrorException { + switch (sqlType) { + case ServerParse.SHOW:// if origSQL is like show tables + return analyseShowSQL(schema, rrs, stmt); + case ServerParse.SELECT://if origSQL is like select @@ + int index = stmt.indexOf("@@"); + if (index > 0 && "SELECT".equals(stmt.substring(0, index).trim().toUpperCase())) { + return analyseDoubleAtSgin(schema, rrs, stmt); + } + break; + case ServerParse.DESCRIBE:// if origSQL is meta SQL, such as describe table + int ind = stmt.indexOf(' '); + stmt = stmt.trim(); + return analyseDescrSQL(schema, rrs, stmt, ind + 1); + } + return null; + } + + /** + * 对Desc语句进行分析 返回数据路由集合 + * * + * + * @param schema 数据库名 + * @param rrs 数据路由集合 + * @param stmt 执行语句 + * @param ind 第一个' '的位置 + * @return RouteResultset (数据路由集合) + * @author mycat + */ + private static RouteResultset analyseDescrSQL(SchemaConfig schema, + RouteResultset rrs, String stmt, int ind) { + + final String MATCHED_FEATURE = "DESCRIBE "; + final String MATCHED2_FEATURE = "DESC "; + int pos = 0; + while (pos < stmt.length()) { + char ch = stmt.charAt(pos); + // 忽略处理注释 /* */ BEN + if (ch == '/' && pos + 4 < stmt.length() && stmt.charAt(pos + 1) == '*') { + if (stmt.substring(pos + 2).indexOf("*/") != -1) { + pos += stmt.substring(pos + 2).indexOf("*/") + 4; + continue; + } else { + // 不应该发生这类情况。 + throw new IllegalArgumentException("sql 注释 语法错误"); + } + } else if (ch == 'D' || ch == 'd') { + // 匹配 [describe ] + if (pos + MATCHED_FEATURE.length() < stmt.length() && (stmt.substring(pos).toUpperCase().indexOf(MATCHED_FEATURE) != -1)) { + pos = pos + MATCHED_FEATURE.length(); + break; + } else if (pos + MATCHED2_FEATURE.length() < stmt.length() && (stmt.substring(pos).toUpperCase().indexOf(MATCHED2_FEATURE) != -1)) { + pos = pos + MATCHED2_FEATURE.length(); + break; + } else { + pos++; + } + } + } + + // 重置ind坐标。BEN GONG + ind = pos; + int[] repPos = {ind, 0}; + String tableName = RouterUtil.getTableName(stmt, repPos); + + stmt = stmt.substring(0, ind) + tableName + stmt.substring(repPos[1]); + RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); + return rrs; + } + + /** + * 根据执行语句判断数据路由 + * + * @param schema 数据库名 + * @param rrs 数据路由集合 + * @param stmt 执行sql + * @return RouteResultset 数据路由集合 + * @throws SQLSyntaxErrorException + * @author mycat + */ + private RouteResultset analyseDoubleAtSgin(SchemaConfig schema, + RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { + String upStmt = stmt.toUpperCase(); + int atSginInd = upStmt.indexOf(" @@"); + if (atSginInd > 0) { + return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); + } + return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), stmt); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java index a89ca8281..66770a8db 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java @@ -431,6 +431,7 @@ public void changeSql(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt sqlIdentifierExpr.setParent(from); sqlIdentifierExpr.setName(node.getSubTableName()); SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr); + from2.setAlias(from.getAlias()); mysqlSelectQuery.setFrom(from2); node.setStatement(stmt.toString()); } diff --git a/src/main/java/io/mycat/route/util/RouterUtil.java b/src/main/java/io/mycat/route/util/RouterUtil.java index b4cc10e2e..d0d4fdbbe 100644 --- a/src/main/java/io/mycat/route/util/RouterUtil.java +++ b/src/main/java/io/mycat/route/util/RouterUtil.java @@ -1,24 +1,5 @@ package io.mycat.route.util; -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.regex.Pattern; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.alibaba.druid.sql.ast.SQLExpr; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.ast.expr.SQLCharExpr; @@ -34,7 +15,6 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; - import io.mycat.MycatServer; import io.mycat.backend.datasource.PhysicalDBNode; import io.mycat.backend.datasource.PhysicalDBPool; @@ -58,30 +38,36 @@ import io.mycat.sqlengine.mpp.ColumnRoutePair; import io.mycat.sqlengine.mpp.LoadData; import io.mycat.util.StringUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; +import java.util.*; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; /** * 从ServerRouterUtil中抽取的一些公用方法,路由解析工具类 - * @author wang.dw * + * @author wang.dw */ public class RouterUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(RouterUtil.class); + private static final Logger LOGGER = LoggerFactory.getLogger(RouterUtil.class); - /** - * 移除执行语句中的数据库名 - * - * @param stmt 执行语句 - * @param schema 数据库名 - * @return 执行语句 - * @author mycat + /** + * 移除执行语句中的数据库名 * + * @param stmt 执行语句 + * @param schema 数据库名 + * @return 执行语句 + * @author mycat * @modification 修正移除schema的方法 * @date 2016/12/29 * @modifiedBy Hash Zhang - * - */ - public static String removeSchema(String stmt, String schema) { + */ + public static String removeSchema(String stmt, String schema) { final String upStmt = stmt.toUpperCase(); final String upSchema = schema.toUpperCase() + "."; final String upSchema2 = new StringBuilder("`").append(schema.toUpperCase()).append("`.").toString(); @@ -120,705 +106,697 @@ public static String removeSchema(String stmt, String schema) { return sb.toString(); } - private static int countChar(String sql,int end) - { - int count=0; - boolean skipChar = false; - for (int i = 0; i < end; i++) { - if(sql.charAt(i)=='\'' && !skipChar) { - count++; - skipChar = false; - }else if( sql.charAt(i)=='\\'){ - skipChar = true; - }else{ - skipChar = false; - } - } - return count; - } - - /** - * 获取第一个节点作为路由 - * - * @param rrs 数据路由集合 - * @param dataNode 数据库所在节点 - * @param stmt 执行语句 - * @return 数据路由集合 - * - * @author mycat - */ - public static RouteResultset routeToSingleNode(RouteResultset rrs, - String dataNode, String stmt) { - if (dataNode == null) { - return rrs; - } - RouteResultsetNode[] nodes = new RouteResultsetNode[1]; - nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt);//rrs.getStatement() - nodes[0].setSource(rrs); - rrs.setNodes(nodes); - rrs.setFinishedRoute(true); - if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ - nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); - } - if (rrs.getCanRunInReadDB() != null) { - nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - if(rrs.getRunOnSlave() != null){ - nodes[0].setRunOnSlave(rrs.getRunOnSlave()); - } - - return rrs; - } - - - - /** - * 修复DDL路由 - * - * @return RouteResultset - * @author aStoneGod - */ - public static RouteResultset routeToDDLNode(RouteResultset rrs, int sqlType, String stmt,SchemaConfig schema) throws SQLSyntaxErrorException { - stmt = getFixedSql(stmt); - String tablename = ""; - final String upStmt = stmt.toUpperCase(); - if(upStmt.startsWith("CREATE")){ - if (upStmt.contains("CREATE INDEX ")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateIndexPos(upStmt, 0)); - }else { - tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateTablePos(upStmt, 0)); - } - }else if(upStmt.startsWith("DROP")){ - if (upStmt.contains("DROP INDEX ")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropIndexPos(upStmt, 0)); - }else { - tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropTablePos(upStmt, 0)); - } - }else if(upStmt.startsWith("ALTER")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getAlterTablePos(upStmt, 0)); - }else if (upStmt.startsWith("TRUNCATE")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getTruncateTablePos(upStmt, 0)); - } - tablename = tablename.toUpperCase(); - - if (schema.getTables().containsKey(tablename)){ - if(ServerParse.DDL==sqlType){ - List dataNodes = new ArrayList<>(); - Map tables = schema.getTables(); - TableConfig tc=tables.get(tablename); - if (tables != null && (tc != null)) { - dataNodes = tc.getDataNodes(); - } - boolean isSlotFunction= tc.getRule() != null && tc.getRule().getRuleAlgorithm() instanceof SlotFunction; - Iterator iterator1 = dataNodes.iterator(); - int nodeSize = dataNodes.size(); - RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSize]; - if(isSlotFunction){ - stmt=changeCreateTable(schema,tablename,stmt); - } - for(int i=0;i 0) { - tableName = tableName.substring(ind2 + 1); - } - return tableName; - } - - - /** - * 获取show语句table名字 - * - * @param stmt 执行语句 - * @param repPos 开始位置和位数 - * @return 表名 - * @author AStoneGod - */ - public static String getShowTableName(String stmt, int[] repPos) { - int startPos = repPos[0]; - int secInd = stmt.indexOf(' ', startPos + 1); - if (secInd < 0) { - secInd = stmt.length(); - } - - repPos[1] = secInd; - String tableName = stmt.substring(startPos, secInd).trim(); - - int ind2 = tableName.indexOf('.'); - if (ind2 > 0) { - tableName = tableName.substring(ind2 + 1); - } - return tableName; - } - - /** - * 获取语句中前关键字位置和占位个数表名位置 - * - * @param upStmt 执行语句 - * @param start 开始位置 - * @return int[] 关键字位置和占位个数 - * - * @author mycat - * - * @modification 修改支持语句中包含“IF NOT EXISTS”的情况 - * @date 2016/12/8 - * @modifiedBy Hash Zhang - */ - public static int[] getCreateTablePos(String upStmt, int start) { - String token1 = "CREATE "; - String token2 = " TABLE "; - String token3 = " EXISTS "; - int createInd = upStmt.indexOf(token1, start); - int tabInd1 = upStmt.indexOf(token2, start); - int tabInd2 = upStmt.indexOf(token3, tabInd1); - // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 - if (createInd >= 0 && tabInd2 > 0 && tabInd2 > createInd) { - return new int[] { tabInd2, token3.length() }; - } else if(createInd >= 0 && tabInd1 > 0 && tabInd1 > createInd) { - return new int[] { tabInd1, token2.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - - /** - * 获取语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getCreateIndexPos(String upStmt, int start) { - String token1 = "CREATE "; - String token2 = " INDEX "; - String token3 = " ON "; - int createInd = upStmt.indexOf(token1, start); - int idxInd = upStmt.indexOf(token2, start); - int onInd = upStmt.indexOf(token3, start); - // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... - if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { - return new int[] {onInd , token3.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - - /** - * 获取ALTER语句中前关键字位置和占位个数表名位置 - * - * @param upStmt 执行语句 - * @param start 开始位置 - * @return int[] 关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getAlterTablePos(String upStmt, int start) { - String token1 = "ALTER "; - String token2 = " TABLE "; - int createInd = upStmt.indexOf(token1, start); - int tabInd = upStmt.indexOf(token2, start); - // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 - if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { - return new int[] { tabInd, token2.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - - /** - * 获取DROP语句中前关键字位置和占位个数表名位置 - * - * @param upStmt 执行语句 - * @param start 开始位置 - * @return int[] 关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getDropTablePos(String upStmt, int start) { - //增加 if exists判断 - if(upStmt.contains("EXISTS")){ - String token1 = "IF "; - String token2 = " EXISTS "; - int ifInd = upStmt.indexOf(token1, start); - int tabInd = upStmt.indexOf(token2, start); - if (ifInd >= 0 && tabInd > 0 && tabInd > ifInd) { - return new int[] { tabInd, token2.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - }else { - String token1 = "DROP "; - String token2 = " TABLE "; - int createInd = upStmt.indexOf(token1, start); - int tabInd = upStmt.indexOf(token2, start); - - if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { - return new int[] { tabInd, token2.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - } - - - /** - * 获取DROP语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author aStoneGod - */ - - public static int[] getDropIndexPos(String upStmt, int start) { - String token1 = "DROP "; - String token2 = " INDEX "; - String token3 = " ON "; - int createInd = upStmt.indexOf(token1, start); - int idxInd = upStmt.indexOf(token2, start); - int onInd = upStmt.indexOf(token3, start); - // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... - if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { - return new int[] {onInd , token3.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - - /** - * 获取TRUNCATE语句中前关键字位置和占位个数表名位置 - * - * @param upStmt 执行语句 - * @param start 开始位置 - * @return int[] 关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getTruncateTablePos(String upStmt, int start) { - String token1 = "TRUNCATE "; - String token2 = " TABLE "; - int createInd = upStmt.indexOf(token1, start); - int tabInd = upStmt.indexOf(token2, start); - // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 - if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { - return new int[] { tabInd, token2.length() }; - } else { - return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 - } - } - - /** - * 获取语句中前关键字位置和占位个数表名位置 - * - * @param upStmt 执行语句 - * @param start 开始位置 - * @return int[] 关键字位置和占位个数 - * @author mycat - */ - public static int[] getSpecPos(String upStmt, int start) { - String token1 = " FROM "; - String token2 = " IN "; - int tabInd1 = upStmt.indexOf(token1, start); - int tabInd2 = upStmt.indexOf(token2, start); - if (tabInd1 > 0) { - if (tabInd2 < 0) { - return new int[] { tabInd1, token1.length() }; - } - return (tabInd1 < tabInd2) ? new int[] { tabInd1, token1.length() } - : new int[] { tabInd2, token2.length() }; - } else { - return new int[] { tabInd2, token2.length() }; - } - } - - /** - * 获取开始位置后的 LIKE、WHERE 位置 如果不含 LIKE、WHERE 则返回执行语句的长度 - * - * @param upStmt 执行sql - * @param start 开始位置 - * @return int - * @author mycat - */ - public static int getSpecEndPos(String upStmt, int start) { - int tabInd = upStmt.toUpperCase().indexOf(" LIKE ", start); - if (tabInd < 0) { - tabInd = upStmt.toUpperCase().indexOf(" WHERE ", start); - } - if (tabInd < 0) { - return upStmt.length(); - } - return tabInd; - } - - public static boolean processWithMycatSeq(SchemaConfig schema, int sqlType, - String origSQL, ServerConnection sc) { - // check if origSQL is with global sequence - // @micmiu it is just a simple judgement - //对应本地文件配置方式:insert into table1(id,name) values(next value for MYCATSEQ_GLOBAL,‘test’); - // edit by dingw,增加mycatseq_ 兼容,因为ServerConnection的373行,进行路由计算时,将原始语句全部转换为小写 - if (origSQL.indexOf(" MYCATSEQ_") != -1 || origSQL.indexOf("mycatseq_") != -1) { - processSQL(sc,schema,origSQL,sqlType); - return true; - } - return false; - } - - public static void processSQL(ServerConnection sc,SchemaConfig schema,String sql,int sqlType){ + private static int countChar(String sql, int end) { + int count = 0; + boolean skipChar = false; + for (int i = 0; i < end; i++) { + if (sql.charAt(i) == '\'' && !skipChar) { + count++; + skipChar = false; + } else if (sql.charAt(i) == '\\') { + skipChar = true; + } else { + skipChar = false; + } + } + return count; + } + + /** + * 获取第一个节点作为路由 + * + * @param rrs 数据路由集合 + * @param dataNode 数据库所在节点 + * @param stmt 执行语句 + * @return 数据路由集合 + * @author mycat + */ + public static RouteResultset routeToSingleNode(RouteResultset rrs, + String dataNode, String stmt) { + if (dataNode == null) { + return rrs; + } + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt);//rrs.getStatement() + nodes[0].setSource(rrs); + rrs.setNodes(nodes); + rrs.setFinishedRoute(true); + if (rrs.getDataNodeSlotMap().containsKey(dataNode)) { + nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if (rrs.getRunOnSlave() != null) { + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + + return rrs; + } + + + /** + * 修复DDL路由 + * + * @return RouteResultset + * @author aStoneGod + */ + public static RouteResultset routeToDDLNode(RouteResultset rrs, int sqlType, String stmt, SchemaConfig schema) throws SQLSyntaxErrorException { + stmt = getFixedSql(stmt); + String tablename = ""; + final String upStmt = stmt.toUpperCase(); + if (upStmt.startsWith("CREATE")) { + if (upStmt.contains("CREATE INDEX ")) { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateIndexPos(upStmt, 0)); + } else { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateTablePos(upStmt, 0)); + } + } else if (upStmt.startsWith("DROP")) { + if (upStmt.contains("DROP INDEX ")) { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropIndexPos(upStmt, 0)); + } else { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropTablePos(upStmt, 0)); + } + } else if (upStmt.startsWith("ALTER")) { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getAlterTablePos(upStmt, 0)); + } else if (upStmt.startsWith("TRUNCATE")) { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getTruncateTablePos(upStmt, 0)); + } + tablename = tablename.toUpperCase(); + + if (schema.getTables().containsKey(tablename)) { + if (ServerParse.DDL == sqlType) { + List dataNodes = new ArrayList<>(); + Map tables = schema.getTables(); + TableConfig tc = tables.get(tablename); + if (tables != null && (tc != null)) { + dataNodes = tc.getDataNodes(); + } + boolean isSlotFunction = tc.getRule() != null && tc.getRule().getRuleAlgorithm() instanceof SlotFunction; + Iterator iterator1 = dataNodes.iterator(); + int nodeSize = dataNodes.size(); + RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSize]; + if (isSlotFunction) { + stmt = changeCreateTable(schema, tablename, stmt); + } + for (int i = 0; i < nodeSize; i++) { + String name = iterator1.next(); + nodes[i] = new RouteResultsetNode(name, sqlType, stmt); + nodes[i].setSource(rrs); + if (rrs.getDataNodeSlotMap().containsKey(name)) { + nodes[i].setSlot(rrs.getDataNodeSlotMap().get(name)); + } else if (isSlotFunction) { + nodes[i].setSlot(-1); + } + } + rrs.setNodes(nodes); + } + return rrs; + } else if (schema.getDataNode() != null) { //默认节点ddl + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(schema.getDataNode(), sqlType, stmt); + nodes[0].setSource(rrs); + rrs.setNodes(nodes); + return rrs; + } + //both tablename and defaultnode null + LOGGER.error("table not in schema----" + tablename); + throw new SQLSyntaxErrorException("op table not in schema----" + tablename); + } + + private static String changeCreateTable(SchemaConfig schema, String tableName, String sql) { + if (schema.getTables().containsKey(tableName)) { + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement insertStatement = parser.parseStatement(); + if (insertStatement instanceof MySqlCreateTableStatement) { + TableConfig tableConfig = schema.getTables().get(tableName); + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if (algorithm instanceof SlotFunction) { + SQLColumnDefinition column = new SQLColumnDefinition(); + column.setDataType(new SQLCharacterDataType("int")); + column.setName(new SQLIdentifierExpr("_slot")); + column.setComment(new SQLCharExpr("自动迁移算法slot,禁止修改")); + ((SQLCreateTableStatement) insertStatement).getTableElementList().add(column); + return insertStatement.toString(); + + } + } + + } + return sql; + } + + /** + * 处理SQL + * + * @param stmt 执行语句 + * @return 处理后SQL + * @author AStoneGod + */ + public static String getFixedSql(String stmt) { + stmt = stmt.replaceAll("\r\n", " "); //对于\r\n的字符 用 空格处理 rainbow + return stmt = stmt.trim(); //.toUpperCase(); + } + + /** + * 获取table名字 + * + * @param stmt 执行语句 + * @param repPos 开始位置和位数 + * @return 表名 + * @author AStoneGod + */ + public static String getTableName(String stmt, int[] repPos) { + int startPos = repPos[0]; + int secInd = stmt.indexOf(' ', startPos + 1); + if (secInd < 0) { + secInd = stmt.length(); + } + int thiInd = stmt.indexOf('(', secInd + 1); + if (thiInd < 0) { + thiInd = stmt.length(); + } + repPos[1] = secInd; + String tableName = ""; + if (stmt.toUpperCase().startsWith("DESC") || stmt.toUpperCase().startsWith("DESCRIBE")) { + tableName = stmt.substring(startPos, thiInd).trim(); + } else { + tableName = stmt.substring(secInd, thiInd).trim(); + } + + //ALTER TABLE + if (tableName.contains(" ")) { + tableName = tableName.substring(0, tableName.indexOf(" ")); + } + int ind2 = tableName.indexOf('.'); + if (ind2 > 0) { + tableName = tableName.substring(ind2 + 1); + } + return tableName; + } + + + /** + * 获取show语句table名字 + * + * @param stmt 执行语句 + * @param repPos 开始位置和位数 + * @return 表名 + * @author AStoneGod + */ + public static String getShowTableName(String stmt, int[] repPos) { + int startPos = repPos[0]; + int secInd = stmt.indexOf(' ', startPos + 1); + if (secInd < 0) { + secInd = stmt.length(); + } + + repPos[1] = secInd; + String tableName = stmt.substring(startPos, secInd).trim(); + + int ind2 = tableName.indexOf('.'); + if (ind2 > 0) { + tableName = tableName.substring(ind2 + 1); + } + return tableName; + } + + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author mycat + * @modification 修改支持语句中包含“IF NOT EXISTS”的情况 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + public static int[] getCreateTablePos(String upStmt, int start) { + String token1 = "CREATE "; + String token2 = " TABLE "; + String token3 = " EXISTS "; + int createInd = upStmt.indexOf(token1, start); + int tabInd1 = upStmt.indexOf(token2, start); + int tabInd2 = upStmt.indexOf(token3, tabInd1); + // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 + if (createInd >= 0 && tabInd2 > 0 && tabInd2 > createInd) { + return new int[]{tabInd2, token3.length()}; + } else if (createInd >= 0 && tabInd1 > 0 && tabInd1 > createInd) { + return new int[]{tabInd1, token2.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[]关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getCreateIndexPos(String upStmt, int start) { + String token1 = "CREATE "; + String token2 = " INDEX "; + String token3 = " ON "; + int createInd = upStmt.indexOf(token1, start); + int idxInd = upStmt.indexOf(token2, start); + int onInd = upStmt.indexOf(token3, start); + // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... + if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { + return new int[]{onInd, token3.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取ALTER语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getAlterTablePos(String upStmt, int start) { + String token1 = "ALTER "; + String token2 = " TABLE "; + int createInd = upStmt.indexOf(token1, start); + int tabInd = upStmt.indexOf(token2, start); + // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 + if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { + return new int[]{tabInd, token2.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取DROP语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getDropTablePos(String upStmt, int start) { + //增加 if exists判断 + if (upStmt.contains("EXISTS")) { + String token1 = "IF "; + String token2 = " EXISTS "; + int ifInd = upStmt.indexOf(token1, start); + int tabInd = upStmt.indexOf(token2, start); + if (ifInd >= 0 && tabInd > 0 && tabInd > ifInd) { + return new int[]{tabInd, token2.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } else { + String token1 = "DROP "; + String token2 = " TABLE "; + int createInd = upStmt.indexOf(token1, start); + int tabInd = upStmt.indexOf(token2, start); + + if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { + return new int[]{tabInd, token2.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + } + + + /** + * 获取DROP语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[]关键字位置和占位个数 + * @author aStoneGod + */ + + public static int[] getDropIndexPos(String upStmt, int start) { + String token1 = "DROP "; + String token2 = " INDEX "; + String token3 = " ON "; + int createInd = upStmt.indexOf(token1, start); + int idxInd = upStmt.indexOf(token2, start); + int onInd = upStmt.indexOf(token3, start); + // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... + if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { + return new int[]{onInd, token3.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取TRUNCATE语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getTruncateTablePos(String upStmt, int start) { + String token1 = "TRUNCATE "; + String token2 = " TABLE "; + int createInd = upStmt.indexOf(token1, start); + int tabInd = upStmt.indexOf(token2, start); + // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 + if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { + return new int[]{tabInd, token2.length()}; + } else { + return new int[]{-1, token2.length()};// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author mycat + */ + public static int[] getSpecPos(String upStmt, int start) { + String token1 = " FROM "; + String token2 = " IN "; + int tabInd1 = upStmt.indexOf(token1, start); + int tabInd2 = upStmt.indexOf(token2, start); + if (tabInd1 > 0) { + if (tabInd2 < 0) { + return new int[]{tabInd1, token1.length()}; + } + return (tabInd1 < tabInd2) ? new int[]{tabInd1, token1.length()} + : new int[]{tabInd2, token2.length()}; + } else { + return new int[]{tabInd2, token2.length()}; + } + } + + /** + * 获取开始位置后的 LIKE、WHERE 位置 如果不含 LIKE、WHERE 则返回执行语句的长度 + * + * @param upStmt 执行sql + * @param start 开始位置 + * @return int + * @author mycat + */ + public static int getSpecEndPos(String upStmt, int start) { + int tabInd = upStmt.toUpperCase().indexOf(" LIKE ", start); + if (tabInd < 0) { + tabInd = upStmt.toUpperCase().indexOf(" WHERE ", start); + } + if (tabInd < 0) { + return upStmt.length(); + } + return tabInd; + } + + public static boolean processWithMycatSeq(SchemaConfig schema, int sqlType, + String origSQL, ServerConnection sc) { + // check if origSQL is with global sequence + // @micmiu it is just a simple judgement + //对应本地文件配置方式:insert into table1(id,name) values(next value for MYCATSEQ_GLOBAL,‘test’); + // edit by dingw,增加mycatseq_ 兼容,因为ServerConnection的373行,进行路由计算时,将原始语句全部转换为小写 + if (origSQL.indexOf(" MYCATSEQ_") != -1 || origSQL.indexOf("mycatseq_") != -1) { + processSQL(sc, schema, origSQL, sqlType); + return true; + } + return false; + } + + public static void processSQL(ServerConnection sc, SchemaConfig schema, String sql, int sqlType) { // int sequenceHandlerType = MycatServer.getInstance().getConfig().getSystem().getSequnceHandlerType(); - final SessionSQLPair sessionSQLPair = new SessionSQLPair(sc.getSession2(), schema, sql, sqlType); + final SessionSQLPair sessionSQLPair = new SessionSQLPair(sc.getSession2(), schema, sql, sqlType); // modify by yanjunli 序列获取修改为多线程方式。使用分段锁方式,一个序列一把锁。 begin // MycatServer.getInstance().getSequnceProcessor().addNewSql(sessionSQLPair); MycatServer.getInstance().getSequenceExecutor().execute(new Runnable() { - @Override - public void run() { - MycatServer.getInstance().getSequnceProcessor().executeSeq(sessionSQLPair); - } - }); + @Override + public void run() { + MycatServer.getInstance().getSequnceProcessor().executeSeq(sessionSQLPair); + } + }); // modify 序列获取修改为多线程方式。使用分段锁方式,一个序列一把锁。 end // } - } - - public static boolean processInsert(SchemaConfig schema, int sqlType, - String origSQL, ServerConnection sc) throws SQLNonTransientException { - String tableName = StringUtil.getTableName(origSQL).toUpperCase(); - TableConfig tableConfig = schema.getTables().get(tableName); - boolean processedInsert=false; - //判断是有自增字段 - if (null != tableConfig && tableConfig.isAutoIncrement()) { - String primaryKey = tableConfig.getPrimaryKey(); - processedInsert=processInsert(sc,schema,sqlType,origSQL,tableName,primaryKey); - } - return processedInsert; - } - - private static boolean isPKInFields(String origSQL,String primaryKey,int firstLeftBracketIndex,int firstRightBracketIndex){ - - if (primaryKey == null) { - throw new RuntimeException("please make sure the primaryKey's config is not null in schemal.xml"); - } - - boolean isPrimaryKeyInFields = false; - String upperSQL = origSQL.substring(firstLeftBracketIndex, firstRightBracketIndex + 1).toUpperCase(); - for (int pkOffset = 0, primaryKeyLength = primaryKey.length(), pkStart = 0;;) { - pkStart = upperSQL.indexOf(primaryKey, pkOffset); - if (pkStart >= 0 && pkStart < firstRightBracketIndex) { - char pkSide = upperSQL.charAt(pkStart - 1); - if (pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == '(') { - pkSide = upperSQL.charAt(pkStart + primaryKey.length()); - isPrimaryKeyInFields = pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == ')'; - } - if (isPrimaryKeyInFields) { - break; - } - pkOffset = pkStart + primaryKeyLength; - } else { - break; - } - } - return isPrimaryKeyInFields; - } - - public static boolean processInsert(ServerConnection sc,SchemaConfig schema, - int sqlType,String origSQL,String tableName,String primaryKey) throws SQLNonTransientException { - - int firstLeftBracketIndex = origSQL.indexOf("("); - int firstRightBracketIndex = origSQL.indexOf(")"); - String upperSql = origSQL.toUpperCase(); - int valuesIndex = upperSql.indexOf("VALUES"); - int selectIndex = upperSql.indexOf("SELECT"); - int fromIndex = upperSql.indexOf("FROM"); - //屏蔽insert into table1 select * from table2语句 - if(firstLeftBracketIndex < 0) { - String msg = "invalid sql:" + origSQL; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - //屏蔽批量插入 - if(selectIndex > 0 &&fromIndex>0&&selectIndex>firstRightBracketIndex&&valuesIndex<0) { - String msg = "multi insert not provided" ; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - //插入语句必须提供列结构,因为MyCat默认对于表结构无感知 - if(valuesIndex + "VALUES".length() <= firstLeftBracketIndex) { - throw new SQLSyntaxErrorException("insert must provide ColumnList"); - } - //如果主键不在插入语句的fields中,则需要进一步处理 - boolean processedInsert=!isPKInFields(origSQL,primaryKey,firstLeftBracketIndex,firstRightBracketIndex); - if(processedInsert){ - handleBatchInsert(sc, schema, sqlType,origSQL, valuesIndex,tableName,primaryKey); - } - return processedInsert; - } - - public static List handleBatchInsert(String origSQL, int valuesIndex) { - List handledSQLs = new LinkedList<>(); - String prefix = origSQL.substring(0, valuesIndex + "VALUES".length()); - String values = origSQL.substring(valuesIndex + "VALUES".length()); - int flag = 0; - StringBuilder currentValue = new StringBuilder(); - currentValue.append(prefix); - for (int i = 0; i < values.length(); i++) { - char j = values.charAt(i); - if (j == '(' && flag == 0) { - flag = 1; - currentValue.append(j); - } else if (j == '\"' && flag == 1) { - flag = 2; - currentValue.append(j); - } else if (j == '\'' && flag == 1) { - flag = 3; - currentValue.append(j); - } else if (j == '\\' && flag == 2) { - flag = 4; - currentValue.append(j); - } else if (j == '\\' && flag == 3) { - flag = 5; - currentValue.append(j); - } else if (flag == 4) { - flag = 2; - currentValue.append(j); - } else if (flag == 5) { - flag = 3; - currentValue.append(j); - } else if (j == '\"' && flag == 2) { - flag = 1; - currentValue.append(j); - } else if (j == '\'' && flag == 3) { - flag = 1; - currentValue.append(j); - } else if (j == ')' && flag == 1) { - flag = 0; - currentValue.append(j); - handledSQLs.add(currentValue.toString()); - currentValue = new StringBuilder(); - currentValue.append(prefix); - } else if (j == ',' && flag == 0) { - continue; - } else { - currentValue.append(j); - } - } - return handledSQLs; - } - - /** - * 对于主键不在插入语句的fields中的SQL,需要改写。比如hotnews主键为id,插入语句为: - * insert into hotnews(title) values('aaa'); - * 需要改写成: - * insert into hotnews(id, title) values(next value for MYCATSEQ_hotnews,'aaa'); - */ + } + + public static boolean processInsert(SchemaConfig schema, int sqlType, + String origSQL, ServerConnection sc) throws SQLNonTransientException { + String tableName = StringUtil.getTableName(origSQL).toUpperCase(); + TableConfig tableConfig = schema.getTables().get(tableName); + boolean processedInsert = false; + //判断是有自增字段 + if (null != tableConfig && tableConfig.isAutoIncrement()) { + String primaryKey = tableConfig.getPrimaryKey(); + processedInsert = processInsert(sc, schema, sqlType, origSQL, tableName, primaryKey); + } + return processedInsert; + } + + private static boolean isPKInFields(String origSQL, String primaryKey, int firstLeftBracketIndex, int firstRightBracketIndex) { + + if (primaryKey == null) { + throw new RuntimeException("please make sure the primaryKey's config is not null in schemal.xml"); + } + + boolean isPrimaryKeyInFields = false; + String upperSQL = origSQL.substring(firstLeftBracketIndex, firstRightBracketIndex + 1).toUpperCase(); + for (int pkOffset = 0, primaryKeyLength = primaryKey.length(), pkStart = 0; ; ) { + pkStart = upperSQL.indexOf(primaryKey, pkOffset); + if (pkStart >= 0 && pkStart < firstRightBracketIndex) { + char pkSide = upperSQL.charAt(pkStart - 1); + if (pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == '(') { + pkSide = upperSQL.charAt(pkStart + primaryKey.length()); + isPrimaryKeyInFields = pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == ')'; + } + if (isPrimaryKeyInFields) { + break; + } + pkOffset = pkStart + primaryKeyLength; + } else { + break; + } + } + return isPrimaryKeyInFields; + } + + public static boolean processInsert(ServerConnection sc, SchemaConfig schema, + int sqlType, String origSQL, String tableName, String primaryKey) throws SQLNonTransientException { + + int firstLeftBracketIndex = origSQL.indexOf("("); + int firstRightBracketIndex = origSQL.indexOf(")"); + String upperSql = origSQL.toUpperCase(); + int valuesIndex = upperSql.indexOf("VALUES"); + int selectIndex = upperSql.indexOf("SELECT"); + int fromIndex = upperSql.indexOf("FROM"); + //屏蔽insert into table1 select * from table2语句 + if (firstLeftBracketIndex < 0) { + String msg = "invalid sql:" + origSQL; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //屏蔽批量插入 + if (selectIndex > 0 && fromIndex > 0 && selectIndex > firstRightBracketIndex && valuesIndex < 0) { + String msg = "multi insert not provided"; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //插入语句必须提供列结构,因为MyCat默认对于表结构无感知 + if (valuesIndex + "VALUES".length() <= firstLeftBracketIndex) { + throw new SQLSyntaxErrorException("insert must provide ColumnList"); + } + //如果主键不在插入语句的fields中,则需要进一步处理 + boolean processedInsert = !isPKInFields(origSQL, primaryKey, firstLeftBracketIndex, firstRightBracketIndex); + if (processedInsert) { + handleBatchInsert(sc, schema, sqlType, origSQL, valuesIndex, tableName, primaryKey); + } + return processedInsert; + } + + public static List handleBatchInsert(String origSQL, int valuesIndex) { + List handledSQLs = new LinkedList<>(); + String prefix = origSQL.substring(0, valuesIndex + "VALUES".length()); + String values = origSQL.substring(valuesIndex + "VALUES".length()); + int flag = 0; + StringBuilder currentValue = new StringBuilder(); + currentValue.append(prefix); + for (int i = 0; i < values.length(); i++) { + char j = values.charAt(i); + if (j == '(' && flag == 0) { + flag = 1; + currentValue.append(j); + } else if (j == '\"' && flag == 1) { + flag = 2; + currentValue.append(j); + } else if (j == '\'' && flag == 1) { + flag = 3; + currentValue.append(j); + } else if (j == '\\' && flag == 2) { + flag = 4; + currentValue.append(j); + } else if (j == '\\' && flag == 3) { + flag = 5; + currentValue.append(j); + } else if (flag == 4) { + flag = 2; + currentValue.append(j); + } else if (flag == 5) { + flag = 3; + currentValue.append(j); + } else if (j == '\"' && flag == 2) { + flag = 1; + currentValue.append(j); + } else if (j == '\'' && flag == 3) { + flag = 1; + currentValue.append(j); + } else if (j == ')' && flag == 1) { + flag = 0; + currentValue.append(j); + handledSQLs.add(currentValue.toString()); + currentValue = new StringBuilder(); + currentValue.append(prefix); + } else if (j == ',' && flag == 0) { + continue; + } else { + currentValue.append(j); + } + } + return handledSQLs; + } + + /** + * 对于主键不在插入语句的fields中的SQL,需要改写。比如hotnews主键为id,插入语句为: + * insert into hotnews(title) values('aaa'); + * 需要改写成: + * insert into hotnews(id, title) values(next value for MYCATSEQ_hotnews,'aaa'); + */ public static void handleBatchInsert(ServerConnection sc, SchemaConfig schema, - int sqlType,String origSQL, int valuesIndex,String tableName, String primaryKey) { - - final String pk = "\\("+primaryKey+","; - final String mycatSeqPrefix = "(next value for MYCATSEQ_"+tableName.toUpperCase()+","; - + int sqlType, String origSQL, int valuesIndex, String tableName, String primaryKey) { + + final String pk = "\\(" + primaryKey + ","; + final String mycatSeqPrefix = "(next value for MYCATSEQ_" + tableName.toUpperCase() + ","; + /*"VALUES".length() ==6 */ String prefix = origSQL.substring(0, valuesIndex + 6); String values = origSQL.substring(valuesIndex + 6); - + prefix = prefix.replaceFirst("\\(", pk); values = values.replaceFirst("\\(", mycatSeqPrefix); - values =Pattern.compile(",\\s*\\(").matcher(values).replaceAll(","+mycatSeqPrefix); - processSQL(sc, schema,prefix+values, sqlType); + values = Pattern.compile(",\\s*\\(").matcher(values).replaceAll("," + mycatSeqPrefix); + processSQL(sc, schema, prefix + values, sqlType); + } + + public static RouteResultset routeToMultiNode(boolean cache, RouteResultset rrs, Collection dataNodes, String stmt) { + RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; + int i = 0; + RouteResultsetNode node; + for (String dataNode : dataNodes) { + node = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt); + node.setSource(rrs); + if (rrs.getDataNodeSlotMap().containsKey(dataNode)) { + node.setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + node.setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if (rrs.getRunOnSlave() != null) { + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + nodes[i++] = node; + } + rrs.setCacheAble(cache); + rrs.setNodes(nodes); + return rrs; + } + + public static RouteResultset routeToMultiNode(boolean cache, RouteResultset rrs, Collection dataNodes, + String stmt, boolean isGlobalTable) { + + rrs = routeToMultiNode(cache, rrs, dataNodes, stmt); + rrs.setGlobalTable(isGlobalTable); + return rrs; + } + + public static void routeForTableMeta(RouteResultset rrs, + SchemaConfig schema, String tableName, String sql) { + String dataNode = null; + if (isNoSharding(schema, tableName)) {//不分库的直接从schema中获取dataNode + dataNode = schema.getDataNode(); + } else { + dataNode = getMetaReadDataNode(schema, tableName); + } + + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), sql); + nodes[0].setSource(rrs); + if (rrs.getDataNodeSlotMap().containsKey(dataNode)) { + nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if (rrs.getRunOnSlave() != null) { + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + rrs.setNodes(nodes); + } + + /** + * 根据表名随机获取一个节点 + * + * @param schema 数据库名 + * @param table 表名 + * @return 数据节点 + * @author mycat + */ + private static String getMetaReadDataNode(SchemaConfig schema, + String table) { + // Table名字被转化为大写的,存储在schema + table = table.toUpperCase(); + String dataNode = null; + Map tables = schema.getTables(); + TableConfig tc; + if (tables != null && (tc = tables.get(table)) != null) { + dataNode = getAliveRandomDataNode(tc); + } + return dataNode; + } + + /** + * 解决getRandomDataNode方法获取错误节点的问题. + * + * @param tc + * @return + */ + private static String getAliveRandomDataNode(TableConfig tc) { + List randomDns = tc.getDataNodes(); + + MycatConfig mycatConfig = MycatServer.getInstance().getConfig(); + if (mycatConfig != null) { + for (String randomDn : randomDns) { + PhysicalDBNode physicalDBNode = mycatConfig.getDataNodes().get(randomDn); + if (physicalDBNode != null) { + if (physicalDBNode.getDbPool().getSource().isAlive()) { + for (PhysicalDBPool pool : MycatServer.getInstance().getConfig().getDataHosts().values()) { + PhysicalDatasource source = pool.getSource(); + if (source.getHostConfig().containDataNode(randomDn) && pool.getSource().isAlive()) { + return randomDn; + } + } + } + } + } + } + + // all fail return default + return tc.getRandomDataNode(); } - public static RouteResultset routeToMultiNode(boolean cache,RouteResultset rrs, Collection dataNodes, String stmt) { - RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; - int i = 0; - RouteResultsetNode node; - for (String dataNode : dataNodes) { - node = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt); - node.setSource(rrs); - if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ - node.setSlot(rrs.getDataNodeSlotMap().get(dataNode)); - } - if (rrs.getCanRunInReadDB() != null) { - node.setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - if(rrs.getRunOnSlave() != null){ - nodes[0].setRunOnSlave(rrs.getRunOnSlave()); - } - nodes[i++] = node; - } - rrs.setCacheAble(cache); - rrs.setNodes(nodes); - return rrs; - } - - public static RouteResultset routeToMultiNode(boolean cache, RouteResultset rrs, Collection dataNodes, - String stmt, boolean isGlobalTable) { - - rrs = routeToMultiNode(cache, rrs, dataNodes, stmt); - rrs.setGlobalTable(isGlobalTable); - return rrs; - } - - public static void routeForTableMeta(RouteResultset rrs, - SchemaConfig schema, String tableName, String sql) { - String dataNode = null; - if (isNoSharding(schema,tableName)) {//不分库的直接从schema中获取dataNode - dataNode = schema.getDataNode(); - } else { - dataNode = getMetaReadDataNode(schema, tableName); - } - - RouteResultsetNode[] nodes = new RouteResultsetNode[1]; - nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), sql); - nodes[0].setSource(rrs); - if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ - nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); - } - if (rrs.getCanRunInReadDB() != null) { - nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - if(rrs.getRunOnSlave() != null){ - nodes[0].setRunOnSlave(rrs.getRunOnSlave()); - } - rrs.setNodes(nodes); - } - - /** - * 根据表名随机获取一个节点 - * - * @param schema 数据库名 - * @param table 表名 - * @return 数据节点 - * @author mycat - */ - private static String getMetaReadDataNode(SchemaConfig schema, - String table) { - // Table名字被转化为大写的,存储在schema - table = table.toUpperCase(); - String dataNode = null; - Map tables = schema.getTables(); - TableConfig tc; - if (tables != null && (tc = tables.get(table)) != null) { - dataNode = getAliveRandomDataNode(tc); - } - return dataNode; - } - - /** - * 解决getRandomDataNode方法获取错误节点的问题. - * @param tc - * @return - */ - private static String getAliveRandomDataNode(TableConfig tc) { - List randomDns = tc.getDataNodes(); - - MycatConfig mycatConfig = MycatServer.getInstance().getConfig(); - if (mycatConfig != null) { - for (String randomDn : randomDns) { - PhysicalDBNode physicalDBNode = mycatConfig.getDataNodes().get(randomDn); - if (physicalDBNode != null) { - if (physicalDBNode.getDbPool().getSource().isAlive()) { - for (PhysicalDBPool pool : MycatServer.getInstance().getConfig().getDataHosts().values()) { - PhysicalDatasource source = pool.getSource(); - if (source.getHostConfig().containDataNode(randomDn) && pool.getSource().isAlive()) { - return randomDn; - } - } - } - } - } - } - - // all fail return default - return tc.getRandomDataNode(); - } - - @Deprecated + @Deprecated private static String getRandomDataNode(TableConfig tc) { //写节点不可用,意味着读节点也不可用。 //直接使用下一个 dataHost @@ -848,90 +826,90 @@ private static String getRandomDataNode(TableConfig tc) { return randomDn; } - /** - * 根据 ER分片规则获取路由集合 - * - * @param stmt 执行的语句 - * @param rrs 数据路由集合 - * @param tc 表实体 - * @param joinKeyVal 连接属性 - * @return RouteResultset(数据路由集合) * - * @throws SQLNonTransientException,IllegalShardingColumnValueException - * @author mycat - */ - - public static RouteResultset routeByERParentKey(ServerConnection sc,SchemaConfig schema, - int sqlType,String stmt, - RouteResultset rrs, TableConfig tc, String joinKeyVal) - throws SQLNonTransientException { - - // only has one parent level and ER parent key is parent - // table's partition key - if (tc.isSecondLevel() - //判断是否为二级子表(父表不再有父表) - && tc.getParentTC().getPartitionColumn() - .equals(tc.getParentKey())) { // using - // parent - // rule to - // find - // datanode - Set parentColVal = new HashSet(1); - ColumnRoutePair pair = new ColumnRoutePair(joinKeyVal); - parentColVal.add(pair); - Set dataNodeSet = ruleCalculate(tc.getParentTC(), parentColVal,rrs.getDataNodeSlotMap()); - if (dataNodeSet.isEmpty() || dataNodeSet.size() > 1) { - throw new SQLNonTransientException( - "parent key can't find valid datanode ,expect 1 but found: " - + dataNodeSet.size()); - } - String dn = dataNodeSet.iterator().next(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " - + dn + " sql :" + stmt); - } - return RouterUtil.routeToSingleNode(rrs, dn, stmt); - } - return null; - } - - /** - * @return dataNodeIndex -> [partitionKeysValueTuple+] - */ - public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConfig tc, - Set colRoutePairSet) throws SQLNonTransientException { - - String joinValue = ""; - - if(colRoutePairSet.size() > 1) { - LOGGER.warn("joinKey can't have multi Value"); - } else { - Iterator it = colRoutePairSet.iterator(); - ColumnRoutePair joinCol = it.next(); - joinValue = joinCol.colValue; - } - - Set retNodeSet = new LinkedHashSet(); - - Set nodeSet; - if (tc.isSecondLevel() - && tc.getParentTC().getPartitionColumn() - .equals(tc.getParentKey())) { // using - // parent - // rule to - // find - // datanode - - nodeSet = ruleCalculate(tc.getParentTC(),colRoutePairSet,rrs.getDataNodeSlotMap()); - if (nodeSet.isEmpty()) { - throw new SQLNonTransientException( - "parent key can't find valid datanode ,expect 1 but found: " - + nodeSet.size()); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " - + nodeSet + " sql :" + rrs.getStatement()); - } - retNodeSet.addAll(nodeSet); + /** + * 根据 ER分片规则获取路由集合 + * + * @param stmt 执行的语句 + * @param rrs 数据路由集合 + * @param tc 表实体 + * @param joinKeyVal 连接属性 + * @return RouteResultset(数据路由集合) * + * @throws SQLNonTransientException,IllegalShardingColumnValueException + * @author mycat + */ + + public static RouteResultset routeByERParentKey(ServerConnection sc, SchemaConfig schema, + int sqlType, String stmt, + RouteResultset rrs, TableConfig tc, String joinKeyVal) + throws SQLNonTransientException { + + // only has one parent level and ER parent key is parent + // table's partition key + if (tc.isSecondLevel() + //判断是否为二级子表(父表不再有父表) + && tc.getParentTC().getPartitionColumn() + .equals(tc.getParentKey())) { // using + // parent + // rule to + // find + // datanode + Set parentColVal = new HashSet(1); + ColumnRoutePair pair = new ColumnRoutePair(joinKeyVal); + parentColVal.add(pair); + Set dataNodeSet = ruleCalculate(tc.getParentTC(), parentColVal, rrs.getDataNodeSlotMap()); + if (dataNodeSet.isEmpty() || dataNodeSet.size() > 1) { + throw new SQLNonTransientException( + "parent key can't find valid datanode ,expect 1 but found: " + + dataNodeSet.size()); + } + String dn = dataNodeSet.iterator().next(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " + + dn + " sql :" + stmt); + } + return RouterUtil.routeToSingleNode(rrs, dn, stmt); + } + return null; + } + + /** + * @return dataNodeIndex -> [partitionKeysValueTuple+] + */ + public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConfig tc, + Set colRoutePairSet) throws SQLNonTransientException { + + String joinValue = ""; + + if (colRoutePairSet.size() > 1) { + LOGGER.warn("joinKey can't have multi Value"); + } else { + Iterator it = colRoutePairSet.iterator(); + ColumnRoutePair joinCol = it.next(); + joinValue = joinCol.colValue; + } + + Set retNodeSet = new LinkedHashSet(); + + Set nodeSet; + if (tc.isSecondLevel() + && tc.getParentTC().getPartitionColumn() + .equals(tc.getParentKey())) { // using + // parent + // rule to + // find + // datanode + + nodeSet = ruleCalculate(tc.getParentTC(), colRoutePairSet, rrs.getDataNodeSlotMap()); + if (nodeSet.isEmpty()) { + throw new SQLNonTransientException( + "parent key can't find valid datanode ,expect 1 but found: " + + nodeSet.size()); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " + + nodeSet + " sql :" + rrs.getStatement()); + } + retNodeSet.addAll(nodeSet); // for(ColumnRoutePair pair : colRoutePairSet) { // nodeSet = ruleCalculate(tc.getParentTC(),colRoutePairSet); @@ -947,813 +925,813 @@ public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConf // } // retNodeSet.addAll(nodeSet); // } - return retNodeSet; - } else { - retNodeSet.addAll(tc.getParentTC().getDataNodes()); - } - - return retNodeSet; - } - - - /** - * @return dataNodeIndex -> [partitionKeysValueTuple+] - */ - public static Set ruleCalculate(TableConfig tc, - Set colRoutePairSet,Map dataNodeSlotMap) { - Set routeNodeSet = new LinkedHashSet(); - String col = tc.getRule().getColumn(); - RuleConfig rule = tc.getRule(); - AbstractPartitionAlgorithm algorithm = rule.getRuleAlgorithm(); - for (ColumnRoutePair colPair : colRoutePairSet) { - if (colPair.colValue != null) { - Integer nodeIndx = algorithm.calculate(colPair.colValue); - if (nodeIndx == null) { - throw new IllegalArgumentException( - "can't find datanode for sharding column:" + col - + " val:" + colPair.colValue); - } else { - String dataNode = tc.getDataNodes().get(nodeIndx); - routeNodeSet.add(dataNode); - if(algorithm instanceof SlotFunction) { - dataNodeSlotMap.put(dataNode,((SlotFunction) algorithm).slotValue()); - } - colPair.setNodeId(nodeIndx); - } - } else if (colPair.rangeValue != null) { - Integer[] nodeRange = algorithm.calculateRange( - String.valueOf(colPair.rangeValue.beginValue), - String.valueOf(colPair.rangeValue.endValue)); - if (nodeRange != null) { - /** - * 不能确认 colPair的 nodeid是否会有其它影响 - */ - if (nodeRange.length == 0) { - routeNodeSet.addAll(tc.getDataNodes()); - } else { - ArrayList dataNodes = tc.getDataNodes(); - String dataNode = null; - for (Integer nodeId : nodeRange) { - dataNode = dataNodes.get(nodeId); - if(algorithm instanceof SlotFunction) { - dataNodeSlotMap.put(dataNode,((SlotFunction) algorithm).slotValue()); - } - routeNodeSet.add(dataNode); - } - } - } - } - - } - return routeNodeSet; - } - - /** - * 多表路由 - */ - public static RouteResultset tryRouteForTables(SchemaConfig schema, DruidShardingParseInfo ctx, - RouteCalculateUnit routeUnit, RouteResultset rrs, boolean isSelect, LayerCachePool cachePool) - throws SQLNonTransientException { - - List tables = ctx.getTables(); - - if(schema.isNoSharding()||(tables.size() >= 1&&isNoSharding(schema,tables.get(0)))) { - return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); - } - - //只有一个表的 - if(tables.size() == 1) { - return RouterUtil.tryRouteForOneTable(schema, ctx, routeUnit, tables.get(0), rrs, isSelect, cachePool); - } - - Set retNodesSet = new HashSet(); - //每个表对应的路由映射 - Map> tablesRouteMap = new HashMap>(); - - //分库解析信息不为空 - Map>> tablesAndConditions = routeUnit.getTablesAndConditions(); - if(tablesAndConditions != null && tablesAndConditions.size() > 0) { - //为分库表找路由 - RouterUtil.findRouteWithcConditionsForTables(schema, rrs, tablesAndConditions, tablesRouteMap, ctx.getSql(), cachePool, isSelect); - if(rrs.isFinishedRoute()) { - return rrs; - } - } - - //为全局表和单库表找路由 - for(String tableName : tables) { - - TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); - - if(tableConfig == null) { - //add 如果表读取不到则先将表名从别名中读取转化后再读取 - String alias = ctx.getTableAliasMap().get(tableName); - if(!StringUtil.isEmpty(alias)){ - tableConfig = schema.getTables().get(alias.toUpperCase()); - } - - if(tableConfig == null){ - String msg = "can't find table define in schema "+ tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - } - if(tableConfig.isGlobalTable()) {//全局表 - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } else if(tablesRouteMap.get(tableName) == null) { //余下的表都是单库表 - tablesRouteMap.put(tableName, new HashSet()); - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } - } - - boolean isFirstAdd = true; - for(Map.Entry> entry : tablesRouteMap.entrySet()) { - if(entry.getValue() == null || entry.getValue().size() == 0) { - throw new SQLNonTransientException("parent key can't find any valid datanode "); - } else { - if(isFirstAdd) { - retNodesSet.addAll(entry.getValue()); - isFirstAdd = false; - } else { - retNodesSet.retainAll(entry.getValue()); - if(retNodesSet.size() == 0) {//两个表的路由无交集 - String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " - + " sql:" + ctx.getSql(); - LOGGER.warn(errMsg); - throw new SQLNonTransientException(errMsg); - } - } - } - } - - if(retNodesSet != null && retNodesSet.size() > 0) { - String tableName = tables.get(0); - TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); - if(tableConfig.isDistTable()){ - routeToDistTableNode(tableName,schema, rrs, ctx.getSql(), tablesAndConditions, cachePool, isSelect); - return rrs; - } - - if(retNodesSet.size() > 1 && isAllGlobalTable(ctx, schema)) { - // mulit routes ,not cache route result - if (isSelect) { - rrs.setCacheAble(false); - routeToSingleNode(rrs, retNodesSet.iterator().next(), ctx.getSql()); - } - else {//delete 删除全局表的记录 - routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql(),true); - } - - } else { - routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql()); - } - - } - return rrs; - - } - - - /** - * - * 单表路由 - */ - public static RouteResultset tryRouteForOneTable(SchemaConfig schema, DruidShardingParseInfo ctx, - RouteCalculateUnit routeUnit, String tableName, RouteResultset rrs, boolean isSelect, - LayerCachePool cachePool) throws SQLNonTransientException { - - if (isNoSharding(schema, tableName)) { - return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); - } - - TableConfig tc = schema.getTables().get(tableName); - if(tc == null) { - String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - if(tc.isDistTable()){ - return routeToDistTableNode(tableName,schema,rrs,ctx.getSql(), routeUnit.getTablesAndConditions(), cachePool,isSelect); - } - - if(tc.isGlobalTable()) {//全局表 - if(isSelect) { - // global select ,not cache route result - rrs.setCacheAble(false); - return routeToSingleNode(rrs, getAliveRandomDataNode(tc)/*getRandomDataNode(tc)*/, ctx.getSql()); - } else {//insert into 全局表的记录 - return routeToMultiNode(false, rrs, tc.getDataNodes(), ctx.getSql(),true); - } - } else {//单表或者分库表 - if (!checkRuleRequired(schema, ctx, routeUnit, tc)) { - throw new IllegalArgumentException("route rule for table " - + tc.getName() + " is required: " + ctx.getSql()); - - } - if(tc.getPartitionColumn() == null && !tc.isSecondLevel()) {//单表且不是childTable + return retNodeSet; + } else { + retNodeSet.addAll(tc.getParentTC().getDataNodes()); + } + + return retNodeSet; + } + + + /** + * @return dataNodeIndex -> [partitionKeysValueTuple+] + */ + public static Set ruleCalculate(TableConfig tc, + Set colRoutePairSet, Map dataNodeSlotMap) { + Set routeNodeSet = new LinkedHashSet(); + String col = tc.getRule().getColumn(); + RuleConfig rule = tc.getRule(); + AbstractPartitionAlgorithm algorithm = rule.getRuleAlgorithm(); + for (ColumnRoutePair colPair : colRoutePairSet) { + if (colPair.colValue != null) { + Integer nodeIndx = algorithm.calculate(colPair.colValue); + if (nodeIndx == null) { + throw new IllegalArgumentException( + "can't find datanode for sharding column:" + col + + " val:" + colPair.colValue); + } else { + String dataNode = tc.getDataNodes().get(nodeIndx); + routeNodeSet.add(dataNode); + if (algorithm instanceof SlotFunction) { + dataNodeSlotMap.put(dataNode, ((SlotFunction) algorithm).slotValue()); + } + colPair.setNodeId(nodeIndx); + } + } else if (colPair.rangeValue != null) { + Integer[] nodeRange = algorithm.calculateRange( + String.valueOf(colPair.rangeValue.beginValue), + String.valueOf(colPair.rangeValue.endValue)); + if (nodeRange != null) { + /** + * 不能确认 colPair的 nodeid是否会有其它影响 + */ + if (nodeRange.length == 0) { + routeNodeSet.addAll(tc.getDataNodes()); + } else { + ArrayList dataNodes = tc.getDataNodes(); + String dataNode = null; + for (Integer nodeId : nodeRange) { + dataNode = dataNodes.get(nodeId); + if (algorithm instanceof SlotFunction) { + dataNodeSlotMap.put(dataNode, ((SlotFunction) algorithm).slotValue()); + } + routeNodeSet.add(dataNode); + } + } + } + } + + } + return routeNodeSet; + } + + /** + * 多表路由 + */ + public static RouteResultset tryRouteForTables(SchemaConfig schema, DruidShardingParseInfo ctx, + RouteCalculateUnit routeUnit, RouteResultset rrs, boolean isSelect, LayerCachePool cachePool) + throws SQLNonTransientException { + + List tables = ctx.getTables(); + + if (schema.isNoSharding() || (tables.size() >= 1 && isNoSharding(schema, tables.get(0)))) { + return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); + } + + //只有一个表的 + if (tables.size() == 1) { + return RouterUtil.tryRouteForOneTable(schema, ctx, routeUnit, tables.get(0), rrs, isSelect, cachePool); + } + + Set retNodesSet = new HashSet(); + //每个表对应的路由映射 + Map> tablesRouteMap = new HashMap>(); + + //分库解析信息不为空 + Map>> tablesAndConditions = routeUnit.getTablesAndConditions(); + if (tablesAndConditions != null && tablesAndConditions.size() > 0) { + //为分库表找路由 + RouterUtil.findRouteWithcConditionsForTables(schema, rrs, tablesAndConditions, tablesRouteMap, ctx.getSql(), cachePool, isSelect); + if (rrs.isFinishedRoute()) { + return rrs; + } + } + + //为全局表和单库表找路由 + for (String tableName : tables) { + + TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); + + if (tableConfig == null) { + //add 如果表读取不到则先将表名从别名中读取转化后再读取 + String alias = ctx.getTableAliasMap().get(tableName); + if (!StringUtil.isEmpty(alias)) { + tableConfig = schema.getTables().get(alias.toUpperCase()); + } + + if (tableConfig == null) { + String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + } + if (tableConfig.isGlobalTable()) {//全局表 + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } else if (tablesRouteMap.get(tableName) == null) { //余下的表都是单库表 + tablesRouteMap.put(tableName, new HashSet()); + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } + } + + boolean isFirstAdd = true; + for (Map.Entry> entry : tablesRouteMap.entrySet()) { + if (entry.getValue() == null || entry.getValue().size() == 0) { + throw new SQLNonTransientException("parent key can't find any valid datanode "); + } else { + if (isFirstAdd) { + retNodesSet.addAll(entry.getValue()); + isFirstAdd = false; + } else { + retNodesSet.retainAll(entry.getValue()); + if (retNodesSet.size() == 0) {//两个表的路由无交集 + String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " + + " sql:" + ctx.getSql(); + LOGGER.warn(errMsg); + throw new SQLNonTransientException(errMsg); + } + } + } + } + + if (retNodesSet != null && retNodesSet.size() > 0) { + String tableName = tables.get(0); + TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); + if (tableConfig.isDistTable()) { + routeToDistTableNode(tableName, schema, rrs, ctx.getSql(), tablesAndConditions, cachePool, isSelect); + return rrs; + } + + if (retNodesSet.size() > 1 && isAllGlobalTable(ctx, schema)) { + // mulit routes ,not cache route result + if (isSelect) { + rrs.setCacheAble(false); + routeToSingleNode(rrs, retNodesSet.iterator().next(), ctx.getSql()); + } else {//delete 删除全局表的记录 + routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql(), true); + } + + } else { + routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql()); + } + + } + return rrs; + + } + + + /** + * 单表路由 + */ + public static RouteResultset tryRouteForOneTable(SchemaConfig schema, DruidShardingParseInfo ctx, + RouteCalculateUnit routeUnit, String tableName, RouteResultset rrs, boolean isSelect, + LayerCachePool cachePool) throws SQLNonTransientException { + + if (isNoSharding(schema, tableName)) { + return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); + } + + TableConfig tc = schema.getTables().get(tableName); + if (tc == null) { + String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + if (tc.isDistTable()) { + return routeToDistTableNode(tableName, schema, rrs, ctx.getSql(), routeUnit.getTablesAndConditions(), cachePool, isSelect); + } + + if (tc.isGlobalTable()) {//全局表 + if (isSelect) { + // global select ,not cache route result + rrs.setCacheAble(false); + return routeToSingleNode(rrs, getAliveRandomDataNode(tc)/*getRandomDataNode(tc)*/, ctx.getSql()); + } else {//insert into 全局表的记录 + return routeToMultiNode(false, rrs, tc.getDataNodes(), ctx.getSql(), true); + } + } else {//单表或者分库表 + if (!checkRuleRequired(schema, ctx, routeUnit, tc)) { + throw new IllegalArgumentException("route rule for table " + + tc.getName() + " is required: " + ctx.getSql()); + + } + if (tc.getPartitionColumn() == null && !tc.isSecondLevel()) {//单表且不是childTable // return RouterUtil.routeToSingleNode(rrs, tc.getDataNodes().get(0),ctx.getSql()); - return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); - } else { - //每个表对应的路由映射 - Map> tablesRouteMap = new HashMap>(); - if(routeUnit.getTablesAndConditions() != null && routeUnit.getTablesAndConditions().size() > 0) { - RouterUtil.findRouteWithcConditionsForTables(schema, rrs, routeUnit.getTablesAndConditions(), tablesRouteMap, ctx.getSql(), cachePool, isSelect); - if(rrs.isFinishedRoute()) { - return rrs; - } - } - - if(tablesRouteMap.get(tableName) == null) { - return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); - } else { - return routeToMultiNode(rrs.isCacheAble(), rrs, tablesRouteMap.get(tableName), ctx.getSql()); - } - } - } - } - - private static RouteResultset routeToDistTableNode(String tableName, SchemaConfig schema, RouteResultset rrs, - String orgSql, Map>> tablesAndConditions, - LayerCachePool cachePool, boolean isSelect) throws SQLNonTransientException { - - TableConfig tableConfig = schema.getTables().get(tableName); - if(tableConfig == null) { - String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(tableConfig.isGlobalTable()){ - String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for global table "; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - String partionCol = tableConfig.getPartitionColumn(); + return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); + } else { + //每个表对应的路由映射 + Map> tablesRouteMap = new HashMap>(); + if (routeUnit.getTablesAndConditions() != null && routeUnit.getTablesAndConditions().size() > 0) { + RouterUtil.findRouteWithcConditionsForTables(schema, rrs, routeUnit.getTablesAndConditions(), tablesRouteMap, ctx.getSql(), cachePool, isSelect); + if (rrs.isFinishedRoute()) { + return rrs; + } + } + + if (tablesRouteMap.get(tableName) == null) { + return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); + } else { + return routeToMultiNode(rrs.isCacheAble(), rrs, tablesRouteMap.get(tableName), ctx.getSql()); + } + } + } + } + + private static RouteResultset routeToDistTableNode(String tableName, SchemaConfig schema, RouteResultset rrs, + String orgSql, Map>> tablesAndConditions, + LayerCachePool cachePool, boolean isSelect) throws SQLNonTransientException { + + TableConfig tableConfig = schema.getTables().get(tableName); + if (tableConfig == null) { + String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if (tableConfig.isGlobalTable()) { + String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for global table "; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + String partionCol = tableConfig.getPartitionColumn(); // String primaryKey = tableConfig.getPrimaryKey(); - boolean isLoadData=false; + boolean isLoadData = false; Set tablesRouteSet = new HashSet(); List dataNodes = tableConfig.getDataNodes(); - if(dataNodes.size()>1){ - String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for mutiple dataNode " + dataNodes; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); + if (dataNodes.size() > 1) { + String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for mutiple dataNode " + dataNodes; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); } String dataNode = dataNodes.get(0); - //主键查找缓存暂时不实现 - if(tablesAndConditions.isEmpty()){ - List subTables = tableConfig.getDistTables(); - tablesRouteSet.addAll(subTables); - } - - for(Map.Entry>> entry : tablesAndConditions.entrySet()) { - boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; - Map> columnsMap = entry.getValue(); - - Set partitionValue = columnsMap.get(partionCol); - if(partitionValue == null || partitionValue.size() == 0) { - tablesRouteSet.addAll(tableConfig.getDistTables()); - } else { - for(ColumnRoutePair pair : partitionValue) { - AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); - if(pair.colValue != null) { - Integer tableIndex = algorithm.calculate(pair.colValue); - if(tableIndex == null) { - String msg = "can't find any valid datanode :" + tableConfig.getName() - + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - String subTable = tableConfig.getDistTables().get(tableIndex); - if(subTable != null) { - tablesRouteSet.add(subTable); - if(algorithm instanceof SlotFunction){ - rrs.getDataNodeSlotMap().put(subTable,((SlotFunction) algorithm).slotValue()); - } - } - } - if(pair.rangeValue != null) { - Integer[] tableIndexs = algorithm - .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); - for(Integer idx : tableIndexs) { - String subTable = tableConfig.getDistTables().get(idx); - if(subTable != null) { - tablesRouteSet.add(subTable); - if(algorithm instanceof SlotFunction){ - rrs.getDataNodeSlotMap().put(subTable,((SlotFunction) algorithm).slotValue()); - } - } - } - } - } - } - } - - Object[] subTables = tablesRouteSet.toArray(); - RouteResultsetNode[] nodes = new RouteResultsetNode[subTables.length]; - Map dataNodeSlotMap= rrs.getDataNodeSlotMap(); - for(int i=0;i>> tablesAndConditions, - Map> tablesRouteMap, String sql, LayerCachePool cachePool, boolean isSelect) - throws SQLNonTransientException { - - //为分库表找路由 - for(Map.Entry>> entry : tablesAndConditions.entrySet()) { - String tableName = entry.getKey().toUpperCase(); - TableConfig tableConfig = schema.getTables().get(tableName); - if(tableConfig == null) { - String msg = "can't find table define in schema " - + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(tableConfig.getDistTables()!=null && tableConfig.getDistTables().size()>0){ - routeToDistTableNode(tableName,schema,rrs,sql, tablesAndConditions, cachePool,isSelect); - } - //全局表或者不分库的表略过(全局表后面再计算) - if(tableConfig.isGlobalTable() || schema.getTables().get(tableName).getDataNodes().size() == 1) { - continue; - } else {//非全局表:分库表、childTable、其他 - Map> columnsMap = entry.getValue(); - String joinKey = tableConfig.getJoinKey(); - String partionCol = tableConfig.getPartitionColumn(); - String primaryKey = tableConfig.getPrimaryKey(); - boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; - boolean isLoadData=false; + //主键查找缓存暂时不实现 + if (tablesAndConditions.isEmpty()) { + // 新增对汇总表的支持 + if (null != tableConfig.getSumTable()) tablesRouteSet.add(tableConfig.getSumTable()); + else { + List subTables = tableConfig.getDistTables(); + tablesRouteSet.addAll(subTables); + } + } + + for (Map.Entry>> entry : tablesAndConditions.entrySet()) { + boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; + Map> columnsMap = entry.getValue(); + + Set partitionValue = columnsMap.get(partionCol); + if (partitionValue == null || partitionValue.size() == 0) { + tablesRouteSet.addAll(tableConfig.getDistTables()); + } else { + for (ColumnRoutePair pair : partitionValue) { + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if (pair.colValue != null) { + Integer tableIndex = algorithm.calculate(pair.colValue); + if (tableIndex == null) { + String msg = "can't find any valid datanode :" + tableConfig.getName() + + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + String subTable = tableConfig.getDistTables().get(tableIndex); + if (subTable != null) { + tablesRouteSet.add(subTable); + if (algorithm instanceof SlotFunction) { + rrs.getDataNodeSlotMap().put(subTable, ((SlotFunction) algorithm).slotValue()); + } + } + } + if (pair.rangeValue != null) { + Integer[] tableIndexs = algorithm + .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); + for (Integer idx : tableIndexs) { + String subTable = tableConfig.getDistTables().get(idx); + if (subTable != null) { + tablesRouteSet.add(subTable); + if (algorithm instanceof SlotFunction) { + rrs.getDataNodeSlotMap().put(subTable, ((SlotFunction) algorithm).slotValue()); + } + } + } + } + } + } + } + + Object[] subTables = tablesRouteSet.toArray(); + RouteResultsetNode[] nodes = new RouteResultsetNode[subTables.length]; + Map dataNodeSlotMap = rrs.getDataNodeSlotMap(); + for (int i = 0; i < nodes.length; i++) { + String table = String.valueOf(subTables[i]); + String changeSql = orgSql; + nodes[i] = new RouteResultsetNode(dataNode, rrs.getSqlType(), changeSql);//rrs.getStatement() + nodes[i].setSubTableName(table); + nodes[i].setSource(rrs); + if (rrs.getDataNodeSlotMap().containsKey(dataNode)) { + nodes[i].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + nodes[i].setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if (dataNodeSlotMap.containsKey(table)) { + nodes[i].setSlot(dataNodeSlotMap.get(table)); + } + if (rrs.getRunOnSlave() != null) { + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + } + rrs.setNodes(nodes); + rrs.setSubTables(tablesRouteSet); + rrs.setFinishedRoute(true); + + return rrs; + } + + /** + * 处理分库表路由 + */ + public static void findRouteWithcConditionsForTables(SchemaConfig schema, RouteResultset rrs, + Map>> tablesAndConditions, + Map> tablesRouteMap, String sql, LayerCachePool cachePool, boolean isSelect) + throws SQLNonTransientException { + + //为分库表找路由 + for (Map.Entry>> entry : tablesAndConditions.entrySet()) { + String tableName = entry.getKey().toUpperCase(); + TableConfig tableConfig = schema.getTables().get(tableName); + if (tableConfig == null) { + String msg = "can't find table define in schema " + + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if (tableConfig.getDistTables() != null && tableConfig.getDistTables().size() > 0) { + routeToDistTableNode(tableName, schema, rrs, sql, tablesAndConditions, cachePool, isSelect); + } + //全局表或者不分库的表略过(全局表后面再计算) + if (tableConfig.isGlobalTable() || schema.getTables().get(tableName).getDataNodes().size() == 1) { + continue; + } else {//非全局表:分库表、childTable、其他 + Map> columnsMap = entry.getValue(); + String joinKey = tableConfig.getJoinKey(); + String partionCol = tableConfig.getPartitionColumn(); + String primaryKey = tableConfig.getPrimaryKey(); + boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; + boolean isLoadData = false; if (LOGGER.isDebugEnabled() - && sql.startsWith(LoadData.loadDataHint)||rrs.isLoadData()) { - //由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能 - isLoadData=true; + && sql.startsWith(LoadData.loadDataHint) || rrs.isLoadData()) { + //由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能 + isLoadData = true; + } + if (entry.getValue().get(primaryKey) != null && entry.getValue().size() == 1 && !isLoadData) {//主键查找 + // try by primary key if found in cache + Set primaryKeyPairs = entry.getValue().get(primaryKey); + if (primaryKeyPairs != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("try to find cache by primary key "); + } + String tableKey = schema.getName() + '_' + tableName; + boolean allFound = true; + for (ColumnRoutePair pair : primaryKeyPairs) {//可能id in(1,2,3)多主键 + String cacheKey = pair.colValue; + String dataNode = (String) cachePool.get(tableKey, cacheKey); + if (dataNode == null) { + allFound = false; + continue; + } else { + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).add(dataNode); + continue; + } + } + if (!allFound) { + // need cache primary key ->datanode relation + if (isSelect && tableConfig.getPrimaryKey() != null) { + rrs.setPrimaryKey(tableKey + '.' + tableConfig.getPrimaryKey()); + } + } else {//主键缓存中找到了就执行循环的下一轮 + continue; + } + } } - if(entry.getValue().get(primaryKey) != null && entry.getValue().size() == 1&&!isLoadData) - {//主键查找 - // try by primary key if found in cache - Set primaryKeyPairs = entry.getValue().get(primaryKey); - if (primaryKeyPairs != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("try to find cache by primary key "); - } - String tableKey = schema.getName() + '_' + tableName; - boolean allFound = true; - for (ColumnRoutePair pair : primaryKeyPairs) {//可能id in(1,2,3)多主键 - String cacheKey = pair.colValue; - String dataNode = (String) cachePool.get(tableKey, cacheKey); - if (dataNode == null) { - allFound = false; - continue; - } else { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).add(dataNode); - continue; - } - } - if (!allFound) { - // need cache primary key ->datanode relation - if (isSelect && tableConfig.getPrimaryKey() != null) { - rrs.setPrimaryKey(tableKey + '.' + tableConfig.getPrimaryKey()); - } - } else {//主键缓存中找到了就执行循环的下一轮 - continue; - } - } - } - if (isFoundPartitionValue) {//分库表 - Set partitionValue = columnsMap.get(partionCol); - if(partitionValue == null || partitionValue.size() == 0) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } else { - for(ColumnRoutePair pair : partitionValue) { - AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); - if(pair.colValue != null) { - Integer nodeIndex = algorithm.calculate(pair.colValue); - if(nodeIndex == null) { - String msg = "can't find any valid datanode :" + tableConfig.getName() - + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - ArrayList dataNodes = tableConfig.getDataNodes(); - String node; - if (nodeIndex >=0 && nodeIndex < dataNodes.size()) { - node = dataNodes.get(nodeIndex); - - } else { - node = null; - String msg = "Can't find a valid data node for specified node index :" - + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn() - + " -> " + pair.colValue + " -> " + "Index : " + nodeIndex; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(node != null) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - if(algorithm instanceof SlotFunction){ - rrs.getDataNodeSlotMap().put(node,((SlotFunction) algorithm).slotValue()); - } - tablesRouteMap.get(tableName).add(node); - } - } - if(pair.rangeValue != null) { - Integer[] nodeIndexs = algorithm - .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); - ArrayList dataNodes = tableConfig.getDataNodes(); - String node; - for(Integer idx : nodeIndexs) { - if (idx >= 0 && idx < dataNodes.size()) { - node = dataNodes.get(idx); - } else { - String msg = "Can't find valid data node(s) for some of specified node indexes :" - + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(node != null) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - if(algorithm instanceof SlotFunction){ - rrs.getDataNodeSlotMap().put(node,((SlotFunction) algorithm).slotValue()); - } - tablesRouteMap.get(tableName).add(node); - - } - } - } - } - } - } else if(joinKey != null && columnsMap.get(joinKey) != null && columnsMap.get(joinKey).size() != 0) {//childTable (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root table - Set joinKeyValue = columnsMap.get(joinKey); - - Set dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue); - - if (dataNodeSet.isEmpty()) { - throw new SQLNonTransientException( - "parent key can't find any valid datanode "); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion nodes (using parent partion rule directly) for child table to update " - + Arrays.toString(dataNodeSet.toArray()) + " sql :" + sql); - } - if (dataNodeSet.size() > 1) { - routeToMultiNode(rrs.isCacheAble(), rrs, dataNodeSet, sql); - rrs.setFinishedRoute(true); - return; - } else { - rrs.setCacheAble(true); - routeToSingleNode(rrs, dataNodeSet.iterator().next(), sql); - return; - } - - } else { - //没找到拆分字段,该表的所有节点都路由 - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - boolean isSlotFunction= tableConfig.getRule() != null && tableConfig.getRule().getRuleAlgorithm() instanceof SlotFunction; - if(isSlotFunction){ - for (String dn : tableConfig.getDataNodes()) { - rrs.getDataNodeSlotMap().put(dn,-1); - } - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } - } - } - } - - public static boolean isAllGlobalTable(DruidShardingParseInfo ctx, SchemaConfig schema) { - boolean isAllGlobal = false; - for(String table : ctx.getTables()) { - TableConfig tableConfig = schema.getTables().get(table); - if(tableConfig!=null && tableConfig.isGlobalTable()) { - isAllGlobal = true; - } else { - return false; - } - } - return isAllGlobal; - } - - /** - * - * @param schema - * @param ctx - * @param tc - * @return true表示校验通过,false表示检验不通过 - */ - public static boolean checkRuleRequired(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, TableConfig tc) { - if(!tc.isRuleRequired()) { - return true; - } - boolean hasRequiredValue = false; - String tableName = tc.getName(); - if(routeUnit.getTablesAndConditions().get(tableName) == null || routeUnit.getTablesAndConditions().get(tableName).size() == 0) { - hasRequiredValue = false; - } else { - for(Map.Entry> condition : routeUnit.getTablesAndConditions().get(tableName).entrySet()) { - - String colName = condition.getKey(); - //条件字段是拆分字段 - if(colName.equals(tc.getPartitionColumn())) { - hasRequiredValue = true; - break; - } - } - } - return hasRequiredValue; - } - - - /** - * 增加判断支持未配置分片的表走默认的dataNode - * @param schemaConfig - * @param tableName - * @return - */ - public static boolean isNoSharding(SchemaConfig schemaConfig, String tableName) { - // Table名字被转化为大写的,存储在schema - tableName = tableName.toUpperCase(); - if (schemaConfig.isNoSharding()) { - return true; - } - - if (schemaConfig.getDataNode() != null && !schemaConfig.getTables().containsKey(tableName)) { - return true; - } - - return false; - } - - /** - * 系统表判断,某些sql语句会查询系统表或者跟系统表关联 - * @author lian - * @date 2016年12月2日 - * @param tableName - * @return - */ - public static boolean isSystemSchema(String tableName) { - // 以information_schema, mysql开头的是系统表 - if (tableName.startsWith("INFORMATION_SCHEMA.") - || tableName.startsWith("MYSQL.") - || tableName.startsWith("PERFORMANCE_SCHEMA.")) { - return true; - } - - return false; - } - - /** - * 判断条件是否永真 - * @param expr - * @return - */ - public static boolean isConditionAlwaysTrue(SQLExpr expr) { - Object o = WallVisitorUtils.getValue(expr); - if(Boolean.TRUE.equals(o)) { - return true; - } - return false; - } - - /** - * 判断条件是否永假的 - * @param expr - * @return - */ - public static boolean isConditionAlwaysFalse(SQLExpr expr) { - Object o = WallVisitorUtils.getValue(expr); - if(Boolean.FALSE.equals(o)) { - return true; - } - return false; - } - - - /** - * 该方法,返回是否是ER子表 - * @param schema - * @param origSQL - * @param sc - * @return - * @throws SQLNonTransientException - * - * 备注说明: - * edit by ding.w at 2017.4.28, 主要处理 CLIENT_MULTI_STATEMENTS(insert into ; insert into)的情况 - * 目前仅支持mysql,并COM_QUERY请求包中的所有insert语句要么全部是er表,要么全部不是 - * - * - */ - public static boolean processERChildTable(final SchemaConfig schema, final String origSQL, - final ServerConnection sc) throws SQLNonTransientException { - - MySqlStatementParser parser = new MySqlStatementParser(origSQL); - List statements = parser.parseStatementList(); - - if(statements == null || statements.isEmpty() ) { - throw new SQLNonTransientException(String.format("无效的SQL语句:%s", origSQL)); - } - - - boolean erFlag = false; //是否是er表 - for(SQLStatement stmt : statements ) { - MySqlInsertStatement insertStmt = (MySqlInsertStatement) stmt; - String tableName = insertStmt.getTableName().getSimpleName().toUpperCase(); - final TableConfig tc = schema.getTables().get(tableName); - - if (null != tc && tc.isChildTable()) { - erFlag = true; - - String sql = insertStmt.toString(); - - final RouteResultset rrs = new RouteResultset(sql, ServerParse.INSERT); - String joinKey = tc.getJoinKey(); - //因为是Insert语句,用MySqlInsertStatement进行parse + if (isFoundPartitionValue) {//分库表 + Set partitionValue = columnsMap.get(partionCol); + if (partitionValue == null || partitionValue.size() == 0) { + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } else { + for (ColumnRoutePair pair : partitionValue) { + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if (pair.colValue != null) { + Integer nodeIndex = algorithm.calculate(pair.colValue); + if (nodeIndex == null) { + String msg = "can't find any valid datanode :" + tableConfig.getName() + + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + ArrayList dataNodes = tableConfig.getDataNodes(); + String node; + if (nodeIndex >= 0 && nodeIndex < dataNodes.size()) { + node = dataNodes.get(nodeIndex); + + } else { + node = null; + String msg = "Can't find a valid data node for specified node index :" + + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn() + + " -> " + pair.colValue + " -> " + "Index : " + nodeIndex; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if (node != null) { + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + if (algorithm instanceof SlotFunction) { + rrs.getDataNodeSlotMap().put(node, ((SlotFunction) algorithm).slotValue()); + } + tablesRouteMap.get(tableName).add(node); + } + } + if (pair.rangeValue != null) { + Integer[] nodeIndexs = algorithm + .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); + ArrayList dataNodes = tableConfig.getDataNodes(); + String node; + for (Integer idx : nodeIndexs) { + if (idx >= 0 && idx < dataNodes.size()) { + node = dataNodes.get(idx); + } else { + String msg = "Can't find valid data node(s) for some of specified node indexes :" + + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if (node != null) { + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + if (algorithm instanceof SlotFunction) { + rrs.getDataNodeSlotMap().put(node, ((SlotFunction) algorithm).slotValue()); + } + tablesRouteMap.get(tableName).add(node); + + } + } + } + } + } + } else if (joinKey != null && columnsMap.get(joinKey) != null && columnsMap.get(joinKey).size() != 0) {//childTable (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root table + Set joinKeyValue = columnsMap.get(joinKey); + + Set dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue); + + if (dataNodeSet.isEmpty()) { + throw new SQLNonTransientException( + "parent key can't find any valid datanode "); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion nodes (using parent partion rule directly) for child table to update " + + Arrays.toString(dataNodeSet.toArray()) + " sql :" + sql); + } + if (dataNodeSet.size() > 1) { + routeToMultiNode(rrs.isCacheAble(), rrs, dataNodeSet, sql); + rrs.setFinishedRoute(true); + return; + } else { + rrs.setCacheAble(true); + routeToSingleNode(rrs, dataNodeSet.iterator().next(), sql); + return; + } + + } else { + //没找到拆分字段,该表的所有节点都路由 + if (tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + boolean isSlotFunction = tableConfig.getRule() != null && tableConfig.getRule().getRuleAlgorithm() instanceof SlotFunction; + if (isSlotFunction) { + for (String dn : tableConfig.getDataNodes()) { + rrs.getDataNodeSlotMap().put(dn, -1); + } + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } + } + } + } + + public static boolean isAllGlobalTable(DruidShardingParseInfo ctx, SchemaConfig schema) { + boolean isAllGlobal = false; + for (String table : ctx.getTables()) { + TableConfig tableConfig = schema.getTables().get(table); + if (tableConfig != null && tableConfig.isGlobalTable()) { + isAllGlobal = true; + } else { + return false; + } + } + return isAllGlobal; + } + + /** + * @param schema + * @param ctx + * @param tc + * @return true表示校验通过,false表示检验不通过 + */ + public static boolean checkRuleRequired(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, TableConfig tc) { + if (!tc.isRuleRequired()) { + return true; + } + boolean hasRequiredValue = false; + String tableName = tc.getName(); + if (routeUnit.getTablesAndConditions().get(tableName) == null || routeUnit.getTablesAndConditions().get(tableName).size() == 0) { + hasRequiredValue = false; + } else { + for (Map.Entry> condition : routeUnit.getTablesAndConditions().get(tableName).entrySet()) { + + String colName = condition.getKey(); + //条件字段是拆分字段 + if (colName.equals(tc.getPartitionColumn())) { + hasRequiredValue = true; + break; + } + } + } + return hasRequiredValue; + } + + + /** + * 增加判断支持未配置分片的表走默认的dataNode + * + * @param schemaConfig + * @param tableName + * @return + */ + public static boolean isNoSharding(SchemaConfig schemaConfig, String tableName) { + // Table名字被转化为大写的,存储在schema + tableName = tableName.toUpperCase(); + if (schemaConfig.isNoSharding()) { + return true; + } + + if (schemaConfig.getDataNode() != null && !schemaConfig.getTables().containsKey(tableName)) { + return true; + } + + return false; + } + + /** + * 系统表判断,某些sql语句会查询系统表或者跟系统表关联 + * + * @param tableName + * @return + * @author lian + * @date 2016年12月2日 + */ + public static boolean isSystemSchema(String tableName) { + // 以information_schema, mysql开头的是系统表 + if (tableName.startsWith("INFORMATION_SCHEMA.") + || tableName.startsWith("MYSQL.") + || tableName.startsWith("PERFORMANCE_SCHEMA.")) { + return true; + } + + return false; + } + + /** + * 判断条件是否永真 + * + * @param expr + * @return + */ + public static boolean isConditionAlwaysTrue(SQLExpr expr) { + Object o = WallVisitorUtils.getValue(expr); + if (Boolean.TRUE.equals(o)) { + return true; + } + return false; + } + + /** + * 判断条件是否永假的 + * + * @param expr + * @return + */ + public static boolean isConditionAlwaysFalse(SQLExpr expr) { + Object o = WallVisitorUtils.getValue(expr); + if (Boolean.FALSE.equals(o)) { + return true; + } + return false; + } + + + /** + * 该方法,返回是否是ER子表 + * + * @param schema + * @param origSQL + * @param sc + * @return + * @throws SQLNonTransientException 备注说明: + * edit by ding.w at 2017.4.28, 主要处理 CLIENT_MULTI_STATEMENTS(insert into ; insert into)的情况 + * 目前仅支持mysql,并COM_QUERY请求包中的所有insert语句要么全部是er表,要么全部不是 + */ + public static boolean processERChildTable(final SchemaConfig schema, final String origSQL, + final ServerConnection sc) throws SQLNonTransientException { + + MySqlStatementParser parser = new MySqlStatementParser(origSQL); + List statements = parser.parseStatementList(); + + if (statements == null || statements.isEmpty()) { + throw new SQLNonTransientException(String.format("无效的SQL语句:%s", origSQL)); + } + + + boolean erFlag = false; //是否是er表 + for (SQLStatement stmt : statements) { + MySqlInsertStatement insertStmt = (MySqlInsertStatement) stmt; + String tableName = insertStmt.getTableName().getSimpleName().toUpperCase(); + final TableConfig tc = schema.getTables().get(tableName); + + if (null != tc && tc.isChildTable()) { + erFlag = true; + + String sql = insertStmt.toString(); + + final RouteResultset rrs = new RouteResultset(sql, ServerParse.INSERT); + String joinKey = tc.getJoinKey(); + //因为是Insert语句,用MySqlInsertStatement进行parse // MySqlInsertStatement insertStmt = (MySqlInsertStatement) (new MySqlStatementParser(origSQL)).parseInsert(); - //判断条件完整性,取得解析后语句列中的joinkey列的index - int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); - if (joinKeyIndex == -1) { - String inf = "joinKey not provided :" + tc.getJoinKey() + "," + insertStmt; - LOGGER.warn(inf); - throw new SQLNonTransientException(inf); - } - //子表不支持批量插入 - if (isMultiInsert(insertStmt)) { - String msg = "ChildTable multi insert not provided"; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - //取得joinkey的值 - String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString(); - //解决bug #938,当关联字段的值为char类型时,去掉前后"'" - String realVal = joinKeyVal; - if (joinKeyVal.startsWith("'") && joinKeyVal.endsWith("'") && joinKeyVal.length() > 2) { - realVal = joinKeyVal.substring(1, joinKeyVal.length() - 1); - } - - - - // try to route by ER parent partion key - //如果是二级子表(父表不再有父表),并且分片字段正好是joinkey字段,调用routeByERParentKey - RouteResultset theRrs = RouterUtil.routeByERParentKey(sc, schema, ServerParse.INSERT, sql, rrs, tc, realVal); - if (theRrs != null) { - boolean processedInsert=false; - //判断是否需要全局序列号 - if ( sc!=null && tc.isAutoIncrement()) { - String primaryKey = tc.getPrimaryKey(); - processedInsert=processInsert(sc,schema,ServerParse.INSERT,sql,tc.getName(),primaryKey); - } - if(processedInsert==false){ - rrs.setFinishedRoute(true); - sc.getSession2().execute(rrs, ServerParse.INSERT); - } - // return true; - //继续处理下一条 - continue; - } - - // route by sql query root parent's datanode - //如果不是二级子表或者分片字段不是joinKey字段结果为空,则启动异步线程去后台分片查询出datanode - //只要查询出上一级表的parentkey字段的对应值在哪个分片即可 - final String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("find root parent's node sql " + findRootTBSql); - } - - ListenableFuture listenableFuture = MycatServer.getInstance(). - getListeningExecutorService().submit(new Callable() { - @Override - public String call() throws Exception { - FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); + //判断条件完整性,取得解析后语句列中的joinkey列的index + int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); + if (joinKeyIndex == -1) { + String inf = "joinKey not provided :" + tc.getJoinKey() + "," + insertStmt; + LOGGER.warn(inf); + throw new SQLNonTransientException(inf); + } + //子表不支持批量插入 + if (isMultiInsert(insertStmt)) { + String msg = "ChildTable multi insert not provided"; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //取得joinkey的值 + String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString(); + //解决bug #938,当关联字段的值为char类型时,去掉前后"'" + String realVal = joinKeyVal; + if (joinKeyVal.startsWith("'") && joinKeyVal.endsWith("'") && joinKeyVal.length() > 2) { + realVal = joinKeyVal.substring(1, joinKeyVal.length() - 1); + } + + + // try to route by ER parent partion key + //如果是二级子表(父表不再有父表),并且分片字段正好是joinkey字段,调用routeByERParentKey + RouteResultset theRrs = RouterUtil.routeByERParentKey(sc, schema, ServerParse.INSERT, sql, rrs, tc, realVal); + if (theRrs != null) { + boolean processedInsert = false; + //判断是否需要全局序列号 + if (sc != null && tc.isAutoIncrement()) { + String primaryKey = tc.getPrimaryKey(); + processedInsert = processInsert(sc, schema, ServerParse.INSERT, sql, tc.getName(), primaryKey); + } + if (processedInsert == false) { + rrs.setFinishedRoute(true); + sc.getSession2().execute(rrs, ServerParse.INSERT); + } + // return true; + //继续处理下一条 + continue; + } + + // route by sql query root parent's datanode + //如果不是二级子表或者分片字段不是joinKey字段结果为空,则启动异步线程去后台分片查询出datanode + //只要查询出上一级表的parentkey字段的对应值在哪个分片即可 + final String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("find root parent's node sql " + findRootTBSql); + } + + ListenableFuture listenableFuture = MycatServer.getInstance(). + getListeningExecutorService().submit(new Callable() { + @Override + public String call() throws Exception { + FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); // return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes()); - return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes(), sc); - } - }); - - - Futures.addCallback(listenableFuture, new FutureCallback() { - @Override - public void onSuccess(String result) { - //结果为空,证明上一级表中不存在那条记录,失败 - if (Strings.isNullOrEmpty(result)) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + - " err:" + "can't find (root) parent sharding node for sql:" + origSQL); - if(!sc.isAutocommit()) { // 处于事务下失败, 必须回滚 - sc.setTxInterrupt("can't find (root) parent sharding node for sql:" + origSQL); - } - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "can't find (root) parent sharding node for sql:" + origSQL); - return; - } - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node for child table to insert " + result + " sql :" + origSQL); - } - //找到分片,进行插入(和其他的一样,需要判断是否需要全局自增ID) - boolean processedInsert=false; - if ( sc!=null && tc.isAutoIncrement()) { - try { - String primaryKey = tc.getPrimaryKey(); - processedInsert=processInsert(sc,schema,ServerParse.INSERT,origSQL,tc.getName(),primaryKey); - } catch (SQLNonTransientException e) { - LOGGER.warn("sequence processInsert error,",e); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR , "sequence processInsert error," + e.getMessage()); - } - } - if(processedInsert==false){ - RouteResultset executeRrs = RouterUtil.routeToSingleNode(rrs, result, origSQL); - sc.getSession2().execute(executeRrs, ServerParse.INSERT); - } - - } - - @Override - public void onFailure(Throwable t) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + - " err:" + t.getMessage()); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, t.getMessage() + " " + s.toString()); - } - }, MycatServer.getInstance(). - getListeningExecutorService()); - - } else if(erFlag) { - throw new SQLNonTransientException(String.format("%s包含不是ER分片的表", origSQL)); - } - } - - - return erFlag; - } - - /** - * 寻找joinKey的索引 - * - * @param columns - * @param joinKey - * @return -1表示没找到,>=0表示找到了 - */ - private static int getJoinKeyIndex(List columns, String joinKey) { - for (int i = 0; i < columns.size(); i++) { - String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); - if (col.equals(joinKey)) { - return i; - } - } - return -1; - } - - /** - * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... - * - * @param insertStmt - * @return - */ - private static boolean isMultiInsert(MySqlInsertStatement insertStmt) { - return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) - || insertStmt.getQuery() != null; - } + return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes(), sc); + } + }); + + + Futures.addCallback(listenableFuture, new FutureCallback() { + @Override + public void onSuccess(String result) { + //结果为空,证明上一级表中不存在那条记录,失败 + if (Strings.isNullOrEmpty(result)) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + + " err:" + "can't find (root) parent sharding node for sql:" + origSQL); + if (!sc.isAutocommit()) { // 处于事务下失败, 必须回滚 + sc.setTxInterrupt("can't find (root) parent sharding node for sql:" + origSQL); + } + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "can't find (root) parent sharding node for sql:" + origSQL); + return; + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node for child table to insert " + result + " sql :" + origSQL); + } + //找到分片,进行插入(和其他的一样,需要判断是否需要全局自增ID) + boolean processedInsert = false; + if (sc != null && tc.isAutoIncrement()) { + try { + String primaryKey = tc.getPrimaryKey(); + processedInsert = processInsert(sc, schema, ServerParse.INSERT, origSQL, tc.getName(), primaryKey); + } catch (SQLNonTransientException e) { + LOGGER.warn("sequence processInsert error,", e); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "sequence processInsert error," + e.getMessage()); + } + } + if (processedInsert == false) { + RouteResultset executeRrs = RouterUtil.routeToSingleNode(rrs, result, origSQL); + sc.getSession2().execute(executeRrs, ServerParse.INSERT); + } + + } + + @Override + public void onFailure(Throwable t) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + + " err:" + t.getMessage()); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, t.getMessage() + " " + s.toString()); + } + }, MycatServer.getInstance(). + getListeningExecutorService()); + + } else if (erFlag) { + throw new SQLNonTransientException(String.format("%s包含不是ER分片的表", origSQL)); + } + } + + + return erFlag; + } + + /** + * 寻找joinKey的索引 + * + * @param columns + * @param joinKey + * @return -1表示没找到,>=0表示找到了 + */ + private static int getJoinKeyIndex(List columns, String joinKey) { + for (int i = 0; i < columns.size(); i++) { + String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); + if (col.equals(joinKey)) { + return i; + } + } + return -1; + } + + /** + * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... + * + * @param insertStmt + * @return + */ + private static boolean isMultiInsert(MySqlInsertStatement insertStmt) { + return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) + || insertStmt.getQuery() != null; + } } diff --git a/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java b/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java index 219dfa82c..5530136f8 100644 --- a/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java +++ b/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java @@ -154,7 +154,7 @@ private String fixName(String tableName) { public List parseTableNames(String sql) { final List tables = new ArrayList(); try{ - + SQLStatement stmt = parseStmt(sql); if (stmt instanceof MySqlReplaceStatement ) { String table = ((MySqlReplaceStatement)stmt).getTableName().getSimpleName(); diff --git a/src/main/java/io/mycat/util/ByteUtil.java b/src/main/java/io/mycat/util/ByteUtil.java index 721e02861..50e6856c3 100644 --- a/src/main/java/io/mycat/util/ByteUtil.java +++ b/src/main/java/io/mycat/util/ByteUtil.java @@ -28,321 +28,362 @@ public class ByteUtil { - /** - * compare to number or dicamal ascii byte array, for number :123456 ,store - * to array [1,2,3,4,5,6] - * - * @param b1 - * @param b2 - * @return -1 means b1 < b2, or 0 means b1=b2 else return 1 - */ - public static int compareNumberByte(byte[] b1, byte[] b2) { - if(b1 == null || b1.length == 0) { - return -1; - } - else if(b2 == null || b2.length == 0) { - return 1; - } - boolean isNegetive = b1[0] == 45 || b2[0] == 45; - if (isNegetive == false && b1.length != b2.length) { - return b1.length - b2.length; - } - int len = b1.length > b2.length ? b2.length : b1.length; - int result = 0; - int index = -1; - for (int i = 0; i < len; i++) { - int b1val = b1[i]; - int b2val = b2[i]; - if (b1val > b2val) { - result = 1; - index = i; - break; - } else if (b1val < b2val) { - index = i; - result = -1; - break; - } - } - if (index == 0) { - // first byte compare - return result; - } else { - if( b1.length != b2.length ) { - - int lenDelta = b1.length - b2.length; - return isNegetive ? 0 - lenDelta : lenDelta; - - } else { - return isNegetive ? 0 - result : result; + /** + * compare to number or dicamal ascii byte array, for number :123456 ,store + * to array [1,2,3,4,5,6] + * + * @param b1 + * @param b2 + * @return -1 means b1 < b2, or 0 means b1=b2 else return 1 + */ + public static int compareNumberByte(byte[] b1, byte[] b2) { + if ((b1 == null || b1.length == 0) && b2 != null && b2.length != 0) { + return -1; + } else if (b1 == null || b1.length == 0) { + // 此时 b2 == null || b2.length == 0 为 true + return 0; + } else if (b2 == null || b2.length == 0) { + return 1; + } + // 判断正负 + boolean b1IsNegative = b1[0] == 45; + boolean b2IsNegative = b2[0] == 45; + if (b1IsNegative != b2IsNegative) return b1IsNegative ? -1 : 1; + // 只比较整数部分 + byte[] longB1 = getLongBytes(b1); + int longB1Length = longB1.length; + byte[] longB2 = getLongBytes(b2); + int longB2Length = longB2.length; + // 位数不同 + if (longB1Length != longB2Length) { + if (!b1IsNegative) return longB1.length - longB2.length; + else return longB2.length - longB1.length; + } + // 位数相同 比较每一位的大小 + int result = 0; + int index = -1; + int length = b1.length; + if (b1.length < b2.length) length = b2.length; + for (int i = 0; i < length; i++) { + int b1val = 48; // '0' + int b2val = 48; // '0' + if (i < b1.length) + b1val = b1[i]; + if (i < b2.length) + b2val = b2[i]; + if (b1val > b2val) { + result = 1; + index = i; + break; + } else if (b1val < b2val) { + index = i; + result = -1; + break; } - } - } - - public static byte[] compareNumberArray2(byte[] b1, byte[] b2, int order) { - if (b1.length <= 0 && b2.length > 0) { - return b2; - } - if (b1.length > 0 && b2.length <= 0) { - return b1; - } - int len = b1.length > b2.length ? b1.length : b2.length; - for (int i = 0; i < len; i++) { - if (b1[i] != b2[i]) { - if (order == 1) { - return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b1 : b2; - } else { - return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b2 : b1; - } - } - } - - return b1; - } - - public static byte[] getBytes(short data) { - byte[] bytes = new byte[2]; - bytes[0] = (byte) (data & 0xff); - bytes[1] = (byte) ((data & 0xff00) >> 8); - return bytes; - } - - public static byte[] getBytes(char data) { - byte[] bytes = new byte[2]; - bytes[0] = (byte) (data); - bytes[1] = (byte) (data >> 8); - return bytes; - } - - public static byte[] getBytes(int data) { - byte[] bytes = new byte[4]; - bytes[0] = (byte) (data & 0xff); - bytes[1] = (byte) ((data & 0xff00) >> 8); - bytes[2] = (byte) ((data & 0xff0000) >> 16); - bytes[3] = (byte) ((data & 0xff000000) >> 24); - return bytes; - } - - public static byte[] getBytes(long data) { - byte[] bytes = new byte[8]; - bytes[0] = (byte) (data & 0xff); - bytes[1] = (byte) ((data >> 8) & 0xff); - bytes[2] = (byte) ((data >> 16) & 0xff); - bytes[3] = (byte) ((data >> 24) & 0xff); - bytes[4] = (byte) ((data >> 32) & 0xff); - bytes[5] = (byte) ((data >> 40) & 0xff); - bytes[6] = (byte) ((data >> 48) & 0xff); - bytes[7] = (byte) ((data >> 56) & 0xff); - return bytes; - } - - public static byte[] getBytes(float data) { - int intBits = Float.floatToIntBits(data); - return getBytes(intBits); - } - - public static byte[] getBytes(double data) { - long intBits = Double.doubleToLongBits(data); - return getBytes(intBits); - } - - public static byte[] getBytes(String data, String charsetName) { - Charset charset = Charset.forName(charsetName); - return data.getBytes(charset); - } - - public static byte[] getBytes(String data) { - return getBytes(data, "GBK"); - } - - public static short getShort(byte[] bytes) { - return Short.parseShort(new String(bytes)); + } + if (index == 0) { + // first byte compare + return result; + } else if (index == -1 && b1.length == b2.length) { + // 每一位都一样 + return 0; + } else if (b1IsNegative) { + // 两个数是负数 比较结果求反 + return 0 - result; + } else return result; + } + + public static int compareNumberByte2(byte[] b1, byte[] b2) { + double double1 = getDouble(b1); + double double2 = getDouble(b2); + double m = double1 - double2; + if (m == 0) return 0; + else if (m > 0) return 1; + else if (m < 0) return -1; + else throw new RuntimeException("ignored"); + } + + private static byte[] getLongBytes(byte[] b1) { + int longB1Length = 0; + byte[] longB1 = new byte[0]; + for (int i = 0; i < b1.length; i++) { + if (b1[i] == '.') break; + if (longB1Length >= longB1.length - 2) { + byte[] newLongB1 = new byte[longB1Length * 2 + 1]; + System.arraycopy(longB1, 0, newLongB1, 0, longB1.length); + longB1 = newLongB1; + } + longB1Length++; + longB1[i] = b1[i]; + } + return longB1; + } + + public static byte[] compareNumberArray2(byte[] b1, byte[] b2, int order) { + if (b1.length <= 0 && b2.length > 0) { + return b2; + } + if (b1.length > 0 && b2.length <= 0) { + return b1; + } + int len = b1.length > b2.length ? b1.length : b2.length; + for (int i = 0; i < len; i++) { + if (b1[i] != b2[i]) { + if (order == 1) { + return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b1 : b2; + } else { + return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b2 : b1; + } + } + } + + return b1; + } + + public static byte[] getBytes(short data) { + byte[] bytes = new byte[2]; + bytes[0] = (byte) (data & 0xff); + bytes[1] = (byte) ((data & 0xff00) >> 8); + return bytes; + } + + public static byte[] getBytes(char data) { + byte[] bytes = new byte[2]; + bytes[0] = (byte) (data); + bytes[1] = (byte) (data >> 8); + return bytes; + } + + public static byte[] getBytes(int data) { + byte[] bytes = new byte[4]; + bytes[0] = (byte) (data & 0xff); + bytes[1] = (byte) ((data & 0xff00) >> 8); + bytes[2] = (byte) ((data & 0xff0000) >> 16); + bytes[3] = (byte) ((data & 0xff000000) >> 24); + return bytes; + } + + public static byte[] getBytes(long data) { + byte[] bytes = new byte[8]; + bytes[0] = (byte) (data & 0xff); + bytes[1] = (byte) ((data >> 8) & 0xff); + bytes[2] = (byte) ((data >> 16) & 0xff); + bytes[3] = (byte) ((data >> 24) & 0xff); + bytes[4] = (byte) ((data >> 32) & 0xff); + bytes[5] = (byte) ((data >> 40) & 0xff); + bytes[6] = (byte) ((data >> 48) & 0xff); + bytes[7] = (byte) ((data >> 56) & 0xff); + return bytes; + } + + public static byte[] getBytes(float data) { + int intBits = Float.floatToIntBits(data); + return getBytes(intBits); + } + + public static byte[] getBytes(double data) { + long intBits = Double.doubleToLongBits(data); + return getBytes(intBits); + } + + public static byte[] getBytes(String data, String charsetName) { + Charset charset = Charset.forName(charsetName); + return data.getBytes(charset); + } + + public static byte[] getBytes(String data) { + return getBytes(data, "GBK"); + } + + public static short getShort(byte[] bytes) { + return Short.parseShort(new String(bytes)); // return (short) ((0xff & bytes[0]) | (0xff00 & (bytes[1] << 8))); - } - - public static char getChar(byte[] bytes) { - return (char) ((0xff & bytes[0]) | (0xff00 & (bytes[1] << 8))); - } - - public static int getInt(byte[] bytes) { - return Integer.parseInt(new String(bytes)); - // return (0xff & bytes[0]) | (0xff00 & (bytes[1] << 8)) | (0xff0000 & - // (bytes[2] << 16)) | (0xff000000 & (bytes[3] << 24)); - } - - public static long getLong(byte[] bytes) { - return Long.parseLong(new String(bytes)); - // return(0xffL & (long)bytes[0]) | (0xff00L & ((long)bytes[1] << 8)) | - // (0xff0000L & ((long)bytes[2] << 16)) | (0xff000000L & ((long)bytes[3] - // << 24)) - // | (0xff00000000L & ((long)bytes[4] << 32)) | (0xff0000000000L & - // ((long)bytes[5] << 40)) | (0xff000000000000L & ((long)bytes[6] << - // 48)) | (0xff00000000000000L & ((long)bytes[7] << 56)); - } - - public static double getDouble(byte[] bytes) { - return Double.parseDouble(new String(bytes)); - } - - public static float getFloat(byte[] bytes) { - return Float.parseFloat(new String(bytes)); - } - - public static String getString(byte[] bytes, String charsetName) { - return new String(bytes, Charset.forName(charsetName)); - } - - public static String getString(byte[] bytes) { - return getString(bytes, "UTF-8"); - } - - public static String getDate(byte[] bytes) { - return new String(bytes); - } - - public static String getTime(byte[] bytes) { - return new String(bytes); - } - - public static String getTimestmap(byte[] bytes) { - return new String(bytes); - } - - public static byte[] getBytes(Date date, boolean isTime) { - if(isTime) { - return getBytesFromTime(date); - } else { - return getBytesFromDate(date); - } } - - private static byte[] getBytesFromTime(Date date) { - int day = 0; - int hour = DateUtil.getHour(date); - int minute = DateUtil.getMinute(date); - int second = DateUtil.getSecond(date); - int microSecond = DateUtil.getMicroSecond(date); - byte[] bytes = null; - byte[] tmp = null; - if(day == 0 && hour == 0 && minute == 0 - && second == 0 && microSecond == 0) { - bytes = new byte[1]; - bytes[0] = (byte) 0; - } else if(microSecond == 0) { - bytes = new byte[1 + 8]; - bytes[0] = (byte) 8; - bytes[1] = (byte) 0; // is_negative (1) -- (1 if minus, 0 for plus) - tmp = getBytes(day); - bytes[2] = tmp[0]; - bytes[3] = tmp[1]; - bytes[4] = tmp[2]; - bytes[5] = tmp[3]; - bytes[6] = (byte) hour; - bytes[7] = (byte) minute; - bytes[8] = (byte) second; - } else { - bytes = new byte[1 + 12]; - bytes[0] = (byte) 12; - bytes[1] = (byte) 0; // is_negative (1) -- (1 if minus, 0 for plus) - tmp = getBytes(day); - bytes[2] = tmp[0]; - bytes[3] = tmp[1]; - bytes[4] = tmp[2]; - bytes[5] = tmp[3]; - bytes[6] = (byte) hour; - bytes[7] = (byte) minute; - bytes[8] = (byte) second; - tmp = getBytes(microSecond); - bytes[9] = tmp[0]; - bytes[10] = tmp[1]; - bytes[11] = tmp[2]; - bytes[12] = tmp[3]; - } - return bytes; - } - - private static byte[] getBytesFromDate(Date date) { - int year = DateUtil.getYear(date); - int month = DateUtil.getMonth(date); - int day = DateUtil.getDay(date); - int hour = DateUtil.getHour(date); - int minute = DateUtil.getMinute(date); - int second = DateUtil.getSecond(date); - int microSecond = DateUtil.getMicroSecond(date); - byte[] bytes = null; - byte[] tmp = null; - if(year == 0 && month == 0 && day == 0 - && hour == 0 && minute == 0 && second == 0 - && microSecond == 0) { - bytes = new byte[1]; - bytes[0] = (byte) 0; - } else if(hour == 0 && minute == 0 && second == 0 - && microSecond == 0) { - bytes = new byte[1 + 4]; - bytes[0] = (byte) 4; - tmp = getBytes((short) year); - bytes[1] = tmp[0]; - bytes[2] = tmp[1]; - bytes[3] = (byte) month; - bytes[4] = (byte) day; - } else if(microSecond == 0) { - bytes = new byte[1 + 7]; - bytes[0] = (byte) 7; - tmp = getBytes((short) year); - bytes[1] = tmp[0]; - bytes[2] = tmp[1]; - bytes[3] = (byte) month; - bytes[4] = (byte) day; - bytes[5] = (byte) hour; - bytes[6] = (byte) minute; - bytes[7] = (byte) second; - } else { - bytes = new byte[1 + 11]; - bytes[0] = (byte) 11; - tmp = getBytes((short) year); - bytes[1] = tmp[0]; - bytes[2] = tmp[1]; - bytes[3] = (byte) month; - bytes[4] = (byte) day; - bytes[5] = (byte) hour; - bytes[6] = (byte) minute; - bytes[7] = (byte) second; - tmp = getBytes(microSecond); - bytes[8] = tmp[0]; - bytes[9] = tmp[1]; - bytes[10] = tmp[2]; - bytes[11] = tmp[3]; - } - return bytes; - } - - // 支持 byte dump - //--------------------------------------------------------------------- - public static String dump(byte[] data, int offset, int length) { - - StringBuilder sb = new StringBuilder(); - sb.append(" byte dump log "); - sb.append(System.lineSeparator()); - sb.append(" offset ").append( offset ); - sb.append(" length ").append( length ); - sb.append(System.lineSeparator()); - int lines = (length - 1) / 16 + 1; - for (int i = 0, pos = 0; i < lines; i++, pos += 16) { - sb.append(String.format("0x%04X ", i * 16)); - for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { - sb.append(pos1 < length ? String.format("%02X ", data[offset + pos1]) : " "); - } - sb.append(" "); - for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { - sb.append(pos1 < length ? print(data[offset + pos1]) : '.'); - } - sb.append(System.lineSeparator()); - } - sb.append(length).append(" bytes").append(System.lineSeparator()); - return sb.toString(); - } - - public static char print(byte b) { - return (b < 32 || b > 127) ? '.' : (char) b; - } + + public static char getChar(byte[] bytes) { + return (char) ((0xff & bytes[0]) | (0xff00 & (bytes[1] << 8))); + } + + public static int getInt(byte[] bytes) { + return Integer.parseInt(new String(bytes)); + // return (0xff & bytes[0]) | (0xff00 & (bytes[1] << 8)) | (0xff0000 & + // (bytes[2] << 16)) | (0xff000000 & (bytes[3] << 24)); + } + + public static long getLong(byte[] bytes) { + return Long.parseLong(new String(bytes)); + // return(0xffL & (long)bytes[0]) | (0xff00L & ((long)bytes[1] << 8)) | + // (0xff0000L & ((long)bytes[2] << 16)) | (0xff000000L & ((long)bytes[3] + // << 24)) + // | (0xff00000000L & ((long)bytes[4] << 32)) | (0xff0000000000L & + // ((long)bytes[5] << 40)) | (0xff000000000000L & ((long)bytes[6] << + // 48)) | (0xff00000000000000L & ((long)bytes[7] << 56)); + } + + public static double getDouble(byte[] bytes) { + return Double.parseDouble(new String(bytes)); + } + + public static float getFloat(byte[] bytes) { + return Float.parseFloat(new String(bytes)); + } + + public static String getString(byte[] bytes, String charsetName) { + return new String(bytes, Charset.forName(charsetName)); + } + + public static String getString(byte[] bytes) { + return getString(bytes, "UTF-8"); + } + + public static String getDate(byte[] bytes) { + return new String(bytes); + } + + public static String getTime(byte[] bytes) { + return new String(bytes); + } + + public static String getTimestmap(byte[] bytes) { + return new String(bytes); + } + + public static byte[] getBytes(Date date, boolean isTime) { + if (isTime) { + return getBytesFromTime(date); + } else { + return getBytesFromDate(date); + } + } + + private static byte[] getBytesFromTime(Date date) { + int day = 0; + int hour = DateUtil.getHour(date); + int minute = DateUtil.getMinute(date); + int second = DateUtil.getSecond(date); + int microSecond = DateUtil.getMicroSecond(date); + byte[] bytes = null; + byte[] tmp = null; + if (day == 0 && hour == 0 && minute == 0 + && second == 0 && microSecond == 0) { + bytes = new byte[1]; + bytes[0] = (byte) 0; + } else if (microSecond == 0) { + bytes = new byte[1 + 8]; + bytes[0] = (byte) 8; + bytes[1] = (byte) 0; // is_negative (1) -- (1 if minus, 0 for plus) + tmp = getBytes(day); + bytes[2] = tmp[0]; + bytes[3] = tmp[1]; + bytes[4] = tmp[2]; + bytes[5] = tmp[3]; + bytes[6] = (byte) hour; + bytes[7] = (byte) minute; + bytes[8] = (byte) second; + } else { + bytes = new byte[1 + 12]; + bytes[0] = (byte) 12; + bytes[1] = (byte) 0; // is_negative (1) -- (1 if minus, 0 for plus) + tmp = getBytes(day); + bytes[2] = tmp[0]; + bytes[3] = tmp[1]; + bytes[4] = tmp[2]; + bytes[5] = tmp[3]; + bytes[6] = (byte) hour; + bytes[7] = (byte) minute; + bytes[8] = (byte) second; + tmp = getBytes(microSecond); + bytes[9] = tmp[0]; + bytes[10] = tmp[1]; + bytes[11] = tmp[2]; + bytes[12] = tmp[3]; + } + return bytes; + } + + private static byte[] getBytesFromDate(Date date) { + int year = DateUtil.getYear(date); + int month = DateUtil.getMonth(date); + int day = DateUtil.getDay(date); + int hour = DateUtil.getHour(date); + int minute = DateUtil.getMinute(date); + int second = DateUtil.getSecond(date); + int microSecond = DateUtil.getMicroSecond(date); + byte[] bytes = null; + byte[] tmp = null; + if (year == 0 && month == 0 && day == 0 + && hour == 0 && minute == 0 && second == 0 + && microSecond == 0) { + bytes = new byte[1]; + bytes[0] = (byte) 0; + } else if (hour == 0 && minute == 0 && second == 0 + && microSecond == 0) { + bytes = new byte[1 + 4]; + bytes[0] = (byte) 4; + tmp = getBytes((short) year); + bytes[1] = tmp[0]; + bytes[2] = tmp[1]; + bytes[3] = (byte) month; + bytes[4] = (byte) day; + } else if (microSecond == 0) { + bytes = new byte[1 + 7]; + bytes[0] = (byte) 7; + tmp = getBytes((short) year); + bytes[1] = tmp[0]; + bytes[2] = tmp[1]; + bytes[3] = (byte) month; + bytes[4] = (byte) day; + bytes[5] = (byte) hour; + bytes[6] = (byte) minute; + bytes[7] = (byte) second; + } else { + bytes = new byte[1 + 11]; + bytes[0] = (byte) 11; + tmp = getBytes((short) year); + bytes[1] = tmp[0]; + bytes[2] = tmp[1]; + bytes[3] = (byte) month; + bytes[4] = (byte) day; + bytes[5] = (byte) hour; + bytes[6] = (byte) minute; + bytes[7] = (byte) second; + tmp = getBytes(microSecond); + bytes[8] = tmp[0]; + bytes[9] = tmp[1]; + bytes[10] = tmp[2]; + bytes[11] = tmp[3]; + } + return bytes; + } + + // 支持 byte dump + //--------------------------------------------------------------------- + public static String dump(byte[] data, int offset, int length) { + + StringBuilder sb = new StringBuilder(); + sb.append(" byte dump log "); + sb.append(System.lineSeparator()); + sb.append(" offset ").append(offset); + sb.append(" length ").append(length); + sb.append(System.lineSeparator()); + int lines = (length - 1) / 16 + 1; + for (int i = 0, pos = 0; i < lines; i++, pos += 16) { + sb.append(String.format("0x%04X ", i * 16)); + for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { + sb.append(pos1 < length ? String.format("%02X ", data[offset + pos1]) : " "); + } + sb.append(" "); + for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { + sb.append(pos1 < length ? print(data[offset + pos1]) : '.'); + } + sb.append(System.lineSeparator()); + } + sb.append(length).append(" bytes").append(System.lineSeparator()); + return sb.toString(); + } + + public static char print(byte b) { + return (b < 32 || b > 127) ? '.' : (char) b; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/util/SplitUtil.java b/src/main/java/io/mycat/util/SplitUtil.java index 22865dd74..2a0782328 100644 --- a/src/main/java/io/mycat/util/SplitUtil.java +++ b/src/main/java/io/mycat/util/SplitUtil.java @@ -23,6 +23,8 @@ */ package io.mycat.util; +import com.sun.javafx.binding.StringFormatter; + import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -229,25 +231,37 @@ public static String[] split(String src, char c1, char c2, char c3, char c4) { } else { String[] s = split(src, c1, true); String[] scope = split(s[1], c2, true); + int integerLength = scope[0].length(); int min = Integer.parseInt(scope[0]); int max = Integer.parseInt(scope[scope.length - 1]); if (c3 == '0') { for (int x = min; x <= max; x++) { - list.add(new StringBuilder(s[0]).append(x).toString()); + String splitString = buildSubString(integerLength, x); + list.add(s[0] + splitString); } } else if (c4 == '0') { for (int x = min; x <= max; x++) { - list.add(new StringBuilder(s[0]).append(c3).append(x).toString()); + String splitString = buildSubString(integerLength, x); + list.add(s[0] + c3 + splitString); } } else { for (int x = min; x <= max; x++) { - list.add(new StringBuilder(s[0]).append(c3).append(x).append(c4).toString()); + String splitString = buildSubString(integerLength, x); + list.add(s[0] + c3 + splitString + c4); } } } return list.toArray(new String[list.size()]); } + private static String buildSubString(int integerLength, int x) { + String splitString; + if (integerLength <= 1) + splitString = String.valueOf(x); + else splitString = String.format("%0" + integerLength + "d", x); + return splitString; + } + public static String[] split(String src, char fi, char se, char th) { return split(src, fi, se, th, '0', '0'); } diff --git a/src/main/resources/rule.xml b/src/main/resources/rule.xml index 94679cdbd..c26caa7bc 100644 --- a/src/main/resources/rule.xml +++ b/src/main/resources/rule.xml @@ -9,6 +9,12 @@ under the License. --> + + + supplier_code + partition-by-post-fix + + id @@ -29,12 +35,12 @@ hash-int - - + + id - rang-long - - + rang-long + + id @@ -65,14 +71,14 @@ latestMonth - + id rang-mod - + id @@ -81,12 +87,12 @@ + class="io.mycat.route.function.PartitionByMurmurHash"> 0 2 160 - @@ -95,11 +101,11 @@ 2 + class="io.mycat.route.function.PartitionByFileMap"> partition-hash-int.txt + class="io.mycat.route.function.AutoPartitionByLong"> autopartition-long.txt @@ -107,10 +113,13 @@ 3 + + 0 + 8 128 - + 24 @@ -120,11 +129,11 @@ yyyy-MM-dd 2015-01-01 - + partition-range-mod.txt - + 3 diff --git a/src/main/resources/schema.dtd b/src/main/resources/schema.dtd index 1a275bc34..92b3b6862 100644 --- a/src/main/resources/schema.dtd +++ b/src/main/resources/schema.dtd @@ -30,6 +30,7 @@ + diff --git a/src/main/resources/schema.xml b/src/main/resources/schema.xml index d5d5f58e2..651f3343f 100644 --- a/src/main/resources/schema.xml +++ b/src/main/resources/schema.xml @@ -2,77 +2,79 @@ - - -

+ + + - -
-
- -
- -
-
- - - - -
- - - - - - - - - select user() - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + select user() + + + + + + + + + + + + + --> - - \ No newline at end of file + + + + + diff --git a/src/main/resources/server.xml b/src/main/resources/server.xml index 9777c8274..1edc39677 100644 --- a/src/main/resources/server.xml +++ b/src/main/resources/server.xml @@ -12,17 +12,17 @@ 0 1 - 0 - 0 + 0 + 0 2 - - - - + + + + 0 @@ -32,15 +32,15 @@ 0 - - + + 1 - false + true @@ -71,7 +71,7 @@ - + @@ -90,23 +90,23 @@ --> - 123456 - TESTDB - + utopia + supanalyse + - user - TESTDB + supanalyse true diff --git a/src/test/java/io/mycat/mpp/TestSorter.java b/src/test/java/io/mycat/mpp/TestSorter.java index 422c3e282..161890a43 100644 --- a/src/test/java/io/mycat/mpp/TestSorter.java +++ b/src/test/java/io/mycat/mpp/TestSorter.java @@ -23,84 +23,102 @@ */ package io.mycat.mpp; +import io.mycat.util.ByteUtil; import org.junit.Assert; import org.junit.Test; -import io.mycat.util.ByteUtil; - public class TestSorter { + private byte[] b1; + private byte[] b2; + + @Test + public void testDecimal() { + String d1 = "-1223.000"; + byte[] d1b = d1.getBytes(); + Assert.assertEquals(true, -1223.0 == ByteUtil.getDouble(d1b)); + d1b = "-99999.890".getBytes(); + Assert.assertEquals(true, -99999.890 == ByteUtil.getDouble(d1b)); + // 221346.000 + byte[] data2 = new byte[]{50, 50, 49, 51, 52, 54, 46, 48, 48, 48}; + Assert.assertEquals(true, 221346.000 == ByteUtil.getDouble(data2)); + // 1234567890 + byte[] data3 = new byte[]{49, 50, 51, 52, 53, 54, 55, 56, 57, 48}; + Assert.assertEquals(true, 1234567890 == ByteUtil.getInt(data3)); + + // 0123456789 + byte[] data4 = new byte[]{48, 49, 50, 51, 52, 53, 54, 55, 56, 57}; + Assert.assertEquals(true, 123456789 == ByteUtil.getInt(data4)); + } + + @Test + public void testNumberCompare() { + byte[] b1 = "0".getBytes(); + byte[] b2 = "0".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) == 0); + + b1 = "0".getBytes(); + b2 = "1".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) < 0); + } + @Test + public void testNumberCompare2() { + b1 = "10".getBytes(); + b2 = "1".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) > 0); + + b1 = "100.0".getBytes(); + b2 = "100.0".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) == 0); + + } + @Test + public void testNumberCompare4() { + b1 = "100.000".getBytes(); + b2 = "100.0".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) == 0); + + b1 = "-100.000".getBytes(); + b2 = "-100.0".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) == 0); + + } + + @Test + public void testNumberCompare6() { + b1 = "-10666666666232352345234523235666666660.001".getBytes(); + b2 = "10666666666232352345234523235666666660.0".getBytes(); + long start1 = System.nanoTime(); + ByteUtil.compareNumberByte(b1, b2); + long end1 = System.nanoTime(); + System.out.println(end1 - start1); + long start2 = System.nanoTime(); + ByteUtil.compareNumberByte2(b1, b2); + long end2 = System.nanoTime(); + System.out.println(end2 - start2); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) < 0); + + b1 = "-100.001".getBytes(); + b2 = "100.0".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) < 0); + + b1 = "90".getBytes(); + b2 = "10000".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) < 0); + b1 = "-90".getBytes(); + b2 = "-10000".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) > 0); + + b1 = "98".getBytes(); + b2 = "98000".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) < 0); + + b1 = "-98".getBytes(); + b2 = "-98000".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) > 0); + + b1 = "12002585786".getBytes(); + b2 = "12002585785".getBytes(); + Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) > 0); - @Test - public void testDecimal() { - String d1 = "-1223.000"; - byte[] d1b = d1.getBytes(); - Assert.assertEquals(true, -1223.0 == ByteUtil.getDouble(d1b)); - d1b = "-99999.890".getBytes(); - Assert.assertEquals(true, -99999.890 == ByteUtil.getDouble(d1b)); - // 221346.000 - byte[] data2 = new byte[] { 50, 50, 49, 51, 52, 54, 46, 48, 48, 48 }; - Assert.assertEquals(true, 221346.000 == ByteUtil.getDouble(data2)); - // 1234567890 - byte[] data3 = new byte[] { 49, 50, 51, 52, 53, 54, 55, 56, 57, 48 }; - Assert.assertEquals(true, 1234567890 == ByteUtil.getInt(data3)); - - // 0123456789 - byte[] data4 = new byte[] { 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 }; - Assert.assertEquals(true, 123456789 == ByteUtil.getInt(data4)); - } - - @Test - public void testNumberCompare() { - byte[] b1 = "0".getBytes(); - byte[] b2 = "0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2) == 0); - - b1 = "0".getBytes(); - b2 = "1".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)< 0); - - b1 = "10".getBytes(); - b2 = "1".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)> 0); - - b1 = "100.0".getBytes(); - b2 = "100.0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)==0); - - b1 = "100.000".getBytes(); - b2 = "100.0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)>0); - - b1 = "-100.000".getBytes(); - b2 = "-100.0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)<0); - - b1 = "-100.001".getBytes(); - b2 = "-100.0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)<0); - - b1 = "-100.001".getBytes(); - b2 = "100.0".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)<0); - - b1 = "90".getBytes(); - b2 = "10000".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)<0); - b1 = "-90".getBytes(); - b2 = "-10000".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)>0); - - b1 = "98".getBytes(); - b2 = "98000".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)<0); - - b1 = "-98".getBytes(); - b2= "-98000".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)>0); - - b1="12002585786".getBytes(); - b2="12002585785".getBytes(); - Assert.assertEquals(true, ByteUtil.compareNumberByte(b1, b2)>0); - - } -} \ No newline at end of file + } +} diff --git a/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java b/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java index 80667c3f5..ef3b8eaf7 100644 --- a/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java +++ b/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java @@ -1,46 +1,44 @@ package io.mycat.route.function; -import java.util.Arrays; - -import org.junit.Assert; -import org.junit.Test; - import io.mycat.config.model.TableConfig; import io.mycat.config.model.rule.RuleConfig; import io.mycat.util.SplitUtil; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; /** * 测试分片算法定义是否符合分片表的定义, 主要测试分区数是否符合分片表分片数 - * - * @author CrazyPig * + * @author CrazyPig */ public class RuleFunctionSuitTableTest { - - @Test - public void testAutoPartitionByLong() { - AutoPartitionByLong autoPartition=new AutoPartitionByLong(); - autoPartition.setMapFile("autopartition-long.txt"); - autoPartition.init(); // partition = 3 - Assert.assertEquals(3, autoPartition.getPartitionNum()); - RuleConfig rule = new RuleConfig("id", "auto-partition-long"); - rule.setRuleAlgorithm(autoPartition); - TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", - null, rule, true, null, false, null, null, null); - int suit1 = autoPartition.suitableFor(tableConf); - Assert.assertEquals(-1, suit1); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3")); - - int suit2 = autoPartition.suitableFor(tableConf); - Assert.assertEquals(0, suit2); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); - int suit3 = autoPartition.suitableFor(tableConf); - Assert.assertEquals(1, suit3); - + + @Test + public void testAutoPartitionByLong() { + AutoPartitionByLong autoPartition = new AutoPartitionByLong(); + autoPartition.setMapFile("autopartition-long.txt"); + autoPartition.init(); // partition = 3 + Assert.assertEquals(3, autoPartition.getPartitionNum()); + RuleConfig rule = new RuleConfig("id", "auto-partition-long"); + rule.setRuleAlgorithm(autoPartition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null, null); + int suit1 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3")); + + int suit2 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit3 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + /* * autopartition-long-dupl.txt * 0-1000=0 @@ -48,121 +46,121 @@ public void testAutoPartitionByLong() { * 2001-3000=0 * 3001-4000=1 */ - AutoPartitionByLong autoPartition2 = new AutoPartitionByLong(); - autoPartition2.setMapFile("autopartition-long-dupl.txt"); - autoPartition2.init(); - Assert.assertEquals(2, autoPartition2.getPartitionNum()); - RuleConfig rule2 = new RuleConfig("id", "auto-partition-long-dupl"); - rule2.setRuleAlgorithm(autoPartition2); - TableConfig tableConf2 = new TableConfig("test2", "id", true, false, -1, "dn1,dn2", - null, rule, true, null, false, null, null, null); - Assert.assertEquals(0, autoPartition2.suitableFor(tableConf2)); - - Assert.assertEquals(0, autoPartition2.calculate("500").intValue()); - Assert.assertEquals(1, autoPartition2.calculate("1500").intValue()); - Assert.assertEquals(1, autoPartition2.calculate("2000").intValue()); - Assert.assertEquals(0, autoPartition2.calculate("3000").intValue()); - Assert.assertEquals(1, autoPartition2.calculate("3001").intValue()); - } - - @Test - public void testPartitionByDate() { - - PartitionByDate partition = new PartitionByDate(); - partition.setDateFormat("yyyy-MM-dd"); - partition.setsBeginDate("2014-01-01"); - partition.setsEndDate("2014-01-31"); - partition.setsPartionDay("10"); - partition.init(); // partition = 4 - Assert.assertEquals(4, partition.getPartitionNum()); - - RuleConfig rule = new RuleConfig("col_date", "partition-date"); - rule.setRuleAlgorithm(partition); - TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", - null, rule, true, null, false, null, null, null); - int suit1 = partition.suitableFor(tableConf); - - Assert.assertEquals(-1, suit1); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); - int suit2 = partition.suitableFor(tableConf); - Assert.assertEquals(0, suit2); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4", "dn5")); - int suit3 = partition.suitableFor(tableConf); - Assert.assertEquals(1, suit3); - - PartitionByDate partition1 = new PartitionByDate(); - partition.setDateFormat("yyyy-MM-dd"); - partition.setsBeginDate("2014-01-01"); - partition.setsPartionDay("10"); - partition.init(); // partition no limit - - int suit4 = partition1.suitableFor(tableConf); - Assert.assertEquals(0, suit4); - - } - - @Test - public void testPartitionByHashMod() { - - PartitionByHashMod partition = new PartitionByHashMod(); - partition.setCount(3); // partition = 3; - Assert.assertEquals(3, partition.getPartitionNum()); - - RuleConfig rule = new RuleConfig("id", "partition-hash-mod"); - rule.setRuleAlgorithm(partition); - TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", - null, rule, true, null, false, null, null, null); - int suit1 = partition.suitableFor(tableConf); - Assert.assertEquals(0, suit1); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); - int suit2 = partition.suitableFor(tableConf); - Assert.assertEquals(1, suit2); - - tableConf.getDataNodes().clear(); - tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2")); - int suit3 = partition.suitableFor(tableConf); - Assert.assertEquals(-1, suit3); - } - - @Test - public void testPartitionByRangeMod() { - PartitionByRangeMod partition = new PartitionByRangeMod(); - partition.setMapFile("partition-range-mod.txt"); - partition.init(); - - Assert.assertEquals(20, partition.getPartitionNum()); // partition = 20 - RuleConfig rule = new RuleConfig("id", "partition-range-mod"); - rule.setRuleAlgorithm(partition); - TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn$1-10", - null, rule, true, null, false, null, null, null); - int suit1 = partition.suitableFor(tableConf); - Assert.assertEquals(-1, suit1); - - tableConf.getDataNodes().clear(); - String[] dataNodes = SplitUtil.split("dn$1-20", ',', '$', '-'); - tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); - int suit2 = partition.suitableFor(tableConf); - Assert.assertEquals(0, suit2); - - tableConf.getDataNodes().clear(); - dataNodes = SplitUtil.split("dn$1-30", ',', '$', '-'); - tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); - int suit3 = partition.suitableFor(tableConf); - Assert.assertEquals(1, suit3); - - } - - @Test - public void testPartitionByPattern() { - PartitionByPattern partition = new PartitionByPattern(); - partition.setMapFile("partition-pattern.txt"); - partition.init(); + AutoPartitionByLong autoPartition2 = new AutoPartitionByLong(); + autoPartition2.setMapFile("autopartition-long-dupl.txt"); + autoPartition2.init(); + Assert.assertEquals(2, autoPartition2.getPartitionNum()); + RuleConfig rule2 = new RuleConfig("id", "auto-partition-long-dupl"); + rule2.setRuleAlgorithm(autoPartition2); + TableConfig tableConf2 = new TableConfig("test2", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null, null); + Assert.assertEquals(0, autoPartition2.suitableFor(tableConf2)); + + Assert.assertEquals(0, autoPartition2.calculate("500").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("1500").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("2000").intValue()); + Assert.assertEquals(0, autoPartition2.calculate("3000").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("3001").intValue()); + } + + @Test + public void testPartitionByDate() { + + PartitionByDate partition = new PartitionByDate(); + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsEndDate("2014-01-31"); + partition.setsPartionDay("10"); + partition.init(); // partition = 4 + Assert.assertEquals(4, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("col_date", "partition-date"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", + null, rule, true, null, false, null, null, null, null); + int suit1 = partition.suitableFor(tableConf); + + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4", "dn5")); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + + PartitionByDate partition1 = new PartitionByDate(); + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsPartionDay("10"); + partition.init(); // partition no limit + + int suit4 = partition1.suitableFor(tableConf); + Assert.assertEquals(0, suit4); + + } + + @Test + public void testPartitionByHashMod() { + + PartitionByHashMod partition = new PartitionByHashMod(); + partition.setCount(3); // partition = 3; + Assert.assertEquals(3, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("id", "partition-hash-mod"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", + null, rule, true, null, false, null, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2")); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit3); + } + + @Test + public void testPartitionByRangeMod() { + PartitionByRangeMod partition = new PartitionByRangeMod(); + partition.setMapFile("partition-range-mod.txt"); + partition.init(); + + Assert.assertEquals(20, partition.getPartitionNum()); // partition = 20 + RuleConfig rule = new RuleConfig("id", "partition-range-mod"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn$1-10", + null, rule, true, null, false, null, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + String[] dataNodes = SplitUtil.split("dn$1-20", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + dataNodes = SplitUtil.split("dn$1-30", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + + } + + @Test + public void testPartitionByPattern() { + PartitionByPattern partition = new PartitionByPattern(); + partition.setMapFile("partition-pattern.txt"); + partition.init(); /* * partition-pattern.txt @@ -176,16 +174,16 @@ public void testPartitionByPattern() { * 225-256=7 * 0-0=7 */ - - Assert.assertEquals(8, partition.getPartitionNum()); - - } - - @Test - public void testPartitionByPrefixPattern() { - PartitionByPrefixPattern partition = new PartitionByPrefixPattern(); - partition.setMapFile("partition_prefix_pattern.txt"); - partition.init(); + + Assert.assertEquals(8, partition.getPartitionNum()); + + } + + @Test + public void testPartitionByPrefixPattern() { + PartitionByPrefixPattern partition = new PartitionByPrefixPattern(); + partition.setMapFile("partition_prefix_pattern.txt"); + partition.init(); /* @@ -200,26 +198,26 @@ public void testPartitionByPrefixPattern() { * 29-32=7 * 0-0=7 */ - Assert.assertEquals(8, partition.getPartitionNum()); - - RuleConfig rule = new RuleConfig("id", "partition-prefix-pattern"); - rule.setRuleAlgorithm(partition); - TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", - null, rule, true, null, false, null, null, null); - int suit1 = partition.suitableFor(tableConf); - Assert.assertEquals(-1, suit1); - - tableConf.getDataNodes().clear(); - String[] dataNodes = SplitUtil.split("dn$1-8", ',', '$', '-'); - tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); - int suit2 = partition.suitableFor(tableConf); - Assert.assertEquals(0, suit2); - - tableConf.getDataNodes().clear(); - dataNodes = SplitUtil.split("dn$1-10", ',', '$', '-'); - tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); - int suit3 = partition.suitableFor(tableConf); - Assert.assertEquals(1, suit3); - } + Assert.assertEquals(8, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("id", "partition-prefix-pattern"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + String[] dataNodes = SplitUtil.split("dn$1-8", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + dataNodes = SplitUtil.split("dn$1-10", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + } } diff --git a/src/test/java/io/mycat/util/ByteUtilTest.java b/src/test/java/io/mycat/util/ByteUtilTest.java new file mode 100644 index 000000000..2268575c5 --- /dev/null +++ b/src/test/java/io/mycat/util/ByteUtilTest.java @@ -0,0 +1,22 @@ +package io.mycat.util; + +import org.junit.Test; + +import static org.junit.Assert.*; + +/** + *

+ * COPYRIGHT © 2001 - 2016 VOYAGE ONE GROUP INC. ALL RIGHTS RESERVED. + * + * @author vantis 2017/9/29 + * @version 1.0.0 + */ +public class ByteUtilTest { + @Test + public void compareNumberByte() throws Exception { + byte[] b1 = {'1', '.', '2'}; + byte[] b2 = {'1', '2', '2'}; + int i = ByteUtil.compareNumberByte(b1, b2); + } + +} \ No newline at end of file diff --git a/src/test/java/io/mycat/util/SplitUtilTest.java b/src/test/java/io/mycat/util/SplitUtilTest.java index 08b3cb62a..d2fbc4179 100644 --- a/src/test/java/io/mycat/util/SplitUtilTest.java +++ b/src/test/java/io/mycat/util/SplitUtilTest.java @@ -82,4 +82,10 @@ public void test4() { Assert.assertEquals("offer[3]", dest[3]); } + @Test + public void test5() { + String src = "wms_bt_channel_stock_c$00-999"; + String [] dest = SplitUtil.split(src, ',', '$', '-'); + Assert.assertEquals(1000, dest.length); + } } \ No newline at end of file diff --git a/version.txt b/version.txt index f680a5d56..9e935aa9c 100644 --- a/version.txt +++ b/version.txt @@ -1,6 +1,6 @@ -BuildTime 2017-04-24 09:41:48 -GitVersion 36626e4d819c30da8e281594758559ec13f00679 -MavenVersion 1.6.5-BETA +BuildTime 2017-09-30 07:52:21 +GitVersion 8cf5282e639acc09a84612397f1de07459e3b5bf +MavenVersion 1.6.5-release GitUrl https://github.com/MyCATApache/Mycat-Server.git MyCatSite http://www.mycat.org.cn QQGroup 106088787