diff --git a/.github/workflows/cluster-it.yml b/.github/workflows/cluster-it.yml new file mode 100644 index 000000000000..323bb3182936 --- /dev/null +++ b/.github/workflows/cluster-it.yml @@ -0,0 +1,79 @@ +name: New Cluster IT + +on: + push: + branches: + - master + paths-ignore: + - 'docs/**' + pull_request: + branches: + - master + paths-ignore: + - 'docs/**' + # allow manually run the action: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + +jobs: + ClusterIT: + strategy: + fail-fast: false + max-parallel: 20 + matrix: + java: [ 8, 11, 17 ] + os: [ ubuntu-latest, macos-latest, windows-latest ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v2 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Check Apache Rat + run: mvn -B apache-rat:check -P site -P code-coverage + - name: Adjust network dynamic TCP ports range + if: ${{ runner.os == 'Windows' }} + shell: pwsh + run: | + netsh int ipv4 set dynamicport tcp start=32768 num=32768 + netsh int ipv4 set dynamicport udp start=32768 num=32768 + netsh int ipv6 set dynamicport tcp start=32768 num=32768 + netsh int ipv6 set dynamicport udp start=32768 num=32768 + - name: Adjust Linux kernel somaxconn + if: ${{ runner.os == 'Linux' }} + shell: bash + run: sudo sysctl -w net.core.somaxconn=65535 + - name: Adjust Mac kernel somaxconn + if: ${{ runner.os == 'macOS' }} + shell: bash + run: sudo sysctl -w kern.ipc.somaxconn=65535 + - name: IT/UT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + mvn clean verify \ + -DskipUTs \ + -pl integration-test \ + -am -PClusterIT + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v3 + with: + name: cluster-log-java${{ matrix.java }}-${{ runner.os }} + path: integration-test/target/cluster-logs + retention-days: 1 diff --git a/.github/workflows/cluster.yml b/.github/workflows/cluster.yml deleted file mode 100644 index e6c3f48ec386..000000000000 --- a/.github/workflows/cluster.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Cluster Test - -on: - push: - branches: - - test_cluster - paths-ignore: - - 'docs/**' - pull_request: - branches: - - test_cluster - paths-ignore: - - 'docs/**' - # allow manually run the action: - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - -jobs: - unix: - strategy: - fail-fast: false - max-parallel: 20 - matrix: - java: [ 8 ] - os: [ ubuntu-latest ] - runs-on: ${{ matrix.os}} - - steps: - - uses: actions/checkout@v2 - - name: Set up JDK ${{ matrix.java }} - uses: actions/setup-java@v1 - with: - java-version: ${{ matrix.java }} - - name: Cache Maven packages - uses: actions/cache@v2 - with: - path: ~/.m2 - key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: ${{ runner.os }}-m2- - - name: Check Apache Rat - run: mvn -B apache-rat:check -P site -P code-coverage - - name: IT/UT Test - shell: bash - # we do not compile client-cpp for saving time, it is tested in client.yml - # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml - run: mvn -B clean verify -Dsession.test.skip=true -Diotdb.test.skip=true -Dcluster.test.skip=true -Dtsfile.test.skip=true -pl integration -am -PCluster diff --git a/.github/workflows/sonar-coveralls.yml b/.github/workflows/sonar-coveralls.yml index 84b30f0d0b97..8b8450789b47 100644 --- a/.github/workflows/sonar-coveralls.yml +++ b/.github/workflows/sonar-coveralls.yml @@ -70,4 +70,4 @@ jobs: -Dsonar.projectKey=apache_incubator-iotdb \ -Dsonar.host.url=https://sonarcloud.io \ -Dsonar.login=${{ secrets.SONARCLOUD_TOKEN }} \ - -DskipTests -pl '!distribution' -P '!testcontainer' -am + -DskipTests -pl '!distribution,!integration-test' -P '!testcontainer' -am diff --git a/.github/workflows/standalone-it-for-mpp.yml b/.github/workflows/standalone-it-for-mpp.yml new file mode 100644 index 000000000000..6617292221ff --- /dev/null +++ b/.github/workflows/standalone-it-for-mpp.yml @@ -0,0 +1,81 @@ +name: New Standalone IT + +on: + push: + branches: + - master + paths-ignore: + - 'docs/**' + pull_request: + branches: + - master + paths-ignore: + - 'docs/**' + # allow manually run the action: + workflow_dispatch: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 + +jobs: + StandaloneMppIT: + strategy: + fail-fast: false + max-parallel: 20 + matrix: + java: [ 8, 11, 17 ] + os: [ ubuntu-latest, macos-latest, windows-latest ] + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@v2 + - name: Set up JDK ${{ matrix.java }} + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - name: Cache Maven packages + uses: actions/cache@v2 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2- + - name: Check Apache Rat + run: mvn -B apache-rat:check -P site -P code-coverage + - name: Adjust network dynamic TCP ports range + if: ${{ runner.os == 'Windows' }} + shell: pwsh + run: | + netsh int ipv4 set dynamicport tcp start=32768 num=32768 + netsh int ipv4 set dynamicport udp start=32768 num=32768 + netsh int ipv6 set dynamicport tcp start=32768 num=32768 + netsh int ipv6 set dynamicport udp start=32768 num=32768 + - name: Adjust Linux kernel somaxconn + if: ${{ runner.os == 'Linux' }} + shell: bash + run: sudo sysctl -w net.core.somaxconn=65535 + - name: Adjust Mac kernel somaxconn + if: ${{ runner.os == 'macOS' }} + shell: bash + run: sudo sysctl -w kern.ipc.somaxconn=65535 + - name: IT/UT Test + shell: bash + # we do not compile client-cpp for saving time, it is tested in client.yml + # we can skip influxdb-protocol because it has been tested separately in influxdb-protocol.yml + run: | + mvn clean verify \ + -DskipUTs \ + -DintegrationTest.forkCount=2 \ + -pl integration-test \ + -am -PLocalStandaloneOnMppIT + - name: Upload Artifact + if: failure() + uses: actions/upload-artifact@v3 + with: + name: standalone-log-java${{ matrix.java }}-${{ runner.os }} + path: integration-test/target/cluster-logs + retention-days: 1 + diff --git a/.gitignore b/.gitignore index 68d7afef1865..6b90b2f1f5c6 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ tsfile-jdbc/src/main/resources/output/queryRes.csv *.gz *.tar.gz *.tar +*.tokens #src/test/resources/logback.xml ### CSV ### diff --git a/LICENSE b/LICENSE index 2cabd6a47c68..6c45bf8f5d7c 100644 --- a/LICENSE +++ b/LICENSE @@ -237,10 +237,40 @@ License: http://www.apache.org/licenses/LICENSE-2.0 -------------------------------------------------------------------------------- +The following files include code modified from Apache HBase project. + +./confignode/src/main/java/org/apache/iotdb/procedure/Procedure.java +./confignode/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java +./confignode/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java +./confignode/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java +./confignode/src/main/java/org/apache/iotdb/procedure/StoppableThread.java + +Copyright: 2016-2018 Michael Burman and/or other contributors +Project page: https://github.com/burmanm/gorilla-tsc +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + The following files include code modified from Eclipse Collections project. ./tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ByteArrayList.java Copyright: 2021 Goldman Sachs Project page: https://www.eclipse.org/collections -License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt \ No newline at end of file +License: https://github.com/eclipse/eclipse-collections/blob/master/LICENSE-EDL-1.0.txt + +-------------------------------------------------------------------------------- + +The following files include code modified from Micrometer project. + +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmClassLoaderMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmCompileMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmGcMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmMemoryMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/jvm/JvmThreadMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/predefined/logback/LogbackMetrics +./metrics/interface/src/main/java/org/apache/iotdb/metrics/utils/JvmUtils + +Copyright: 2017 VMware +Project page: https://github.com/micrometer-metrics/micrometer +License: https://github.com/micrometer-metrics/micrometer/blob/main/LICENSE \ No newline at end of file diff --git a/README.md b/README.md index 0ccc8d0dc834..3b05b9d6deef 100644 --- a/README.md +++ b/README.md @@ -175,26 +175,12 @@ and "`antlr/target/generated-sources/antlr4`" need to be added to sources roots **In IDEA, you just need to right click on the root project name and choose "`Maven->Reload Project`" after you run `mvn package` successfully.** -#### Spotless problem -**NOTE**: IF you are using JDK16+, you have to create a file called `jvm.config`, -put it under `.mvn/`, before you use `spotless:apply`. The file contains the following content: -``` ---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -``` - -This is [an issue of Spotless](https://github.com/diffplug/spotless/issues/834), -Once the issue is fixed, we can remove this file. - ### Configurations configuration files are under "conf" folder - * environment config module (`iotdb-env.bat`, `iotdb-env.sh`), - * system config module (`iotdb-engine.properties`) + * environment config module (`datanode-env.bat`, `datanode-env.sh`), + * system config module (`iotdb-datanode.properties`) * log config module (`logback.xml`). For more information, please see [Config Manual](https://iotdb.apache.org/UserGuide/Master/Reference/Config-Manual.html). diff --git a/README_ZH.md b/README_ZH.md index 9b50c1d13787..de98411dd7e7 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -161,24 +161,11 @@ git checkout vx.x.x **IDEA的操作方法:在上述maven命令编译好后,右键项目名称,选择"`Maven->Reload project`",即可。** -#### Spotless问题(JDK16+) -**NOTE**: 如果你在使用 JDK16+, 并且要做`spotless:apply`或者`spotless:check`, -那么需要在`.mvn/`文件夹下创建一个文件 `jvm.config`, 内容如下: -``` ---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -``` -这是spotless依赖的googlecodeformat的 [问题](https://github.com/diffplug/spotless/issues/834), -近期可能会被官方解决。 - ### 配置 配置文件在"conf"文件夹下 -* 环境配置模块(`iotdb-env.bat`, `iotdb-env.sh`), -* 系统配置模块(`iotdb-engine.properties`) +* 环境配置模块(`datanode-env.bat`, `datanode-env.sh`), +* 系统配置模块(`iotdb-datanode.properties`) * 日志配置模块(`logback.xml`)。 有关详细信息,请参见[配置参数](https://iotdb.apache.org/zh/UserGuide/Master/Reference/Config-Manual.html)。 diff --git a/antlr/pom.xml b/antlr/pom.xml index 64015796385e..a66513db4c64 100644 --- a/antlr/pom.xml +++ b/antlr/pom.xml @@ -47,6 +47,7 @@ false true + src/main/antlr4/org/apache/iotdb/db/qp/sql antlr4 diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 new file mode 100644 index 000000000000..4387f7c22565 --- /dev/null +++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IdentifierParser.g4 @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +parser grammar IdentifierParser; + +options { tokenVocab=SqlLexer; } + +identifier + : keyWords + | ID + | QUOTED_ID + ; + + +// List of keywords, new keywords that can be used as identifiers should be added into this list. For example, 'not' is an identifier but can not be used as an identifier in node name. + +keyWords + : ADD + | AFTER + | ALIAS + | ALIGN + | ALIGNED + | ALL + | ALTER + | ANY + | APPEND + | AS + | ASC + | ATTRIBUTES + | AUTOREGISTER + | BEFORE + | BEGIN + | BOUNDARY + | BY + | CACHE + | CHILD + | CLEAR + | CLUSTER + | CONCAT + | CONFIGURATION + | CONTINUOUS + | COUNT + | CONTAIN + | CQ + | CQS + | CREATE + | DATA + | DEBUG + | DELETE + | DESC + | DESCRIBE + | DEVICE + | DEVICES + | DISABLE + | DROP + | END + | EVERY + | EXPLAIN + | FILL + | FLUSH + | FOR + | FROM + | FULL + | FUNCTION + | FUNCTIONS + | GLOBAL + | GRANT + | GROUP + | INDEX + | INFO + | INSERT + | INTO + | KILL + | LABEL + | LAST + | LATEST + | LEVEL + | LIKE + | LIMIT + | LINEAR + | LINK + | LIST + | LOAD + | LOCAL + | LOCK + | MERGE + | METADATA + | NODES + | NOW + | OF + | OFF + | OFFSET + | ON + | ORDER + | PARTITION + | PASSWORD + | PATHS + | PIPE + | PIPES + | PIPESERVER + | PIPESINK + | PIPESINKS + | PIPESINKTYPE + | PREVIOUS + | PREVIOUSUNTILLAST + | PRIVILEGES + | PROCESSLIST + | PROPERTY + | PRUNE + | QUERIES + | QUERY + | READONLY + | REGEXP + | REGIONS + | REMOVE + | RENAME + | RESAMPLE + | RESOURCE + | REVOKE + | ROLE + | SCHEMA + | SELECT + | SET + | SETTLE + | SGLEVEL + | SHOW + | SLIMIT + | SOFFSET + | STORAGE + | START + | STOP + | SYSTEM + | TAGS + | TASK + | TEMPLATE + | TEMPLATES + | TIMESERIES + | TO + | TOLERANCE + | TOP + | TRACING + | TRIGGER + | TRIGGERS + | TTL + | UNLINK + | UNLOAD + | UNSET + | UPDATE + | UPSERT + | USER + | USING + | VALUES + | VERIFY + | VERSION + | WHERE + | WITH + | WITHOUT + | WRITABLE + | PRIVILEGE_VALUE + ; \ No newline at end of file diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4 index 0ffdcb9f2288..381f3e297977 100644 --- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4 +++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/InfluxDBSqlParser.g4 @@ -21,6 +21,8 @@ parser grammar InfluxDBSqlParser; options { tokenVocab=SqlLexer; } +import IdentifierParser; + singleStatement : statement SEMI? EOF ; @@ -71,20 +73,12 @@ fromClause nodeName : STAR - | ID - | QUOTED_ID + | identifier | LAST | COUNT | DEVICE ; -// Identifier - -identifier - : ID - | QUOTED_ID - ; - // Constant & Literal @@ -126,4 +120,4 @@ realLiteral datetimeLiteral : DATETIME_LITERAL | NOW LR_BRACKET RR_BRACKET - ; + ; \ No newline at end of file diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 index 56ca7bd9b576..1dbb03f7cecf 100644 --- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 +++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/IoTDBSqlParser.g4 @@ -21,6 +21,7 @@ parser grammar IoTDBSqlParser; options { tokenVocab=SqlLexer; } +import IdentifierParser; /** * 1. Top Level Description @@ -42,7 +43,7 @@ ddlStatement | dropFunction | dropTrigger | dropContinuousQuery | dropSchemaTemplate | setTTL | unsetTTL | startTrigger | stopTrigger | setSchemaTemplate | unsetSchemaTemplate | showStorageGroup | showDevices | showTimeseries | showChildPaths | showChildNodes - | showFunctions | showTriggers | showContinuousQueries | showTTL | showAllTTL + | showFunctions | showTriggers | showContinuousQueries | showTTL | showAllTTL | showCluster | showRegion | showSchemaTemplates | showNodesInSchemaTemplate | showPathsUsingSchemaTemplate | showPathsSetSchemaTemplate | countStorageGroup | countDevices | countTimeseries | countNodes @@ -59,7 +60,7 @@ dclStatement ; utilityStatement - : merge | fullMerge | flush | clearCache | settle + : merge | fullMerge | flush | clearCache | settle | explain | setSystemStatus | showVersion | showFlushInfo | showLockInfo | showQueryResource | showQueryProcesslist | killQuery | grantWatermarkEmbedding | revokeWatermarkEmbedding | loadConfiguration | loadTimeseries | loadFile | removeFile | unloadFile; @@ -75,15 +76,19 @@ syncStatement // Create Storage Group setStorageGroup - : SET STORAGE GROUP TO prefixPath (WITH storageGroupAttributeClause (COMMA storageGroupAttributeClause)*)? + : SET STORAGE GROUP TO prefixPath storageGroupAttributesClause? ; -storageGroupAttributeClause - : (TTL | SCHEMA_REPLICATION_FACTOR | DATA_REPLICATION_FACTOR | TIME_PARTITION_INTERVAL) '=' INTEGER_LITERAL +createStorageGroup + : CREATE STORAGE GROUP prefixPath storageGroupAttributesClause? ; -createStorageGroup - : CREATE STORAGE GROUP prefixPath +storageGroupAttributesClause + : WITH storageGroupAttributeClause (COMMA storageGroupAttributeClause)* + ; + +storageGroupAttributeClause + : (TTL | SCHEMA_REPLICATION_FACTOR | DATA_REPLICATION_FACTOR | TIME_PARTITION_INTERVAL) '=' INTEGER_LITERAL ; // Create Timeseries @@ -114,7 +119,11 @@ createTimeseriesOfSchemaTemplate // Create Function createFunction - : CREATE FUNCTION udfName=identifier AS className=STRING_LITERAL + : CREATE FUNCTION udfName=identifier AS className=STRING_LITERAL (USING uri (COMMA uri)*)? + ; + +uri + : STRING_LITERAL ; // Create Trigger @@ -160,7 +169,7 @@ alterTimeseries alterClause : RENAME beforeName=attributeKey TO currentName=attributeKey | SET attributePair (COMMA attributePair)* - | DROP STRING_LITERAL (COMMA STRING_LITERAL)* + | DROP attributeKey (COMMA attributeKey)* | ADD TAGS attributePair (COMMA attributePair)* | ADD ATTRIBUTES attributePair (COMMA attributePair)* | UPSERT aliasClause? tagClause? attributeClause? @@ -294,6 +303,16 @@ showAllTTL : SHOW ALL TTL ; +// Show Cluster +showCluster + : SHOW CLUSTER + ; + +// Show Region +showRegion + : SHOW (SCHEMA | DATA)? REGIONS + ; + // Show Schema Template showSchemaTemplates : SHOW SCHEMA? TEMPLATES @@ -411,10 +430,10 @@ withoutNullClause ; oldTypeClause - : (dataType=DATATYPE_VALUE | ALL) LS_BRACKET linearClause RS_BRACKET - | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET previousClause RS_BRACKET - | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET specificValueClause RS_BRACKET - | (dataType=DATATYPE_VALUE | ALL) LS_BRACKET previousUntilLastClause RS_BRACKET + : (ALL | dataType=attributeValue) LS_BRACKET linearClause RS_BRACKET + | (ALL | dataType=attributeValue) LS_BRACKET previousClause RS_BRACKET + | (ALL | dataType=attributeValue) LS_BRACKET specificValueClause RS_BRACKET + | (ALL | dataType=attributeValue) LS_BRACKET previousUntilLastClause RS_BRACKET ; linearClause @@ -491,7 +510,7 @@ alterUser // Grant User Privileges grantUser - : GRANT USER userName=identifier PRIVILEGES privileges ON prefixPath + : GRANT USER userName=identifier PRIVILEGES privileges (ON prefixPath)? ; // Grant Role Privileges @@ -506,7 +525,7 @@ grantRoleToUser // Revoke User Privileges revokeUser - : REVOKE USER userName=identifier PRIVILEGES privileges ON prefixPath + : REVOKE USER userName=identifier PRIVILEGES privileges (ON prefixPath)? ; // Revoke Role Privileges @@ -600,7 +619,7 @@ fullMerge // Flush flush - : FLUSH prefixPath? (COMMA prefixPath)* BOOLEAN_LITERAL? + : FLUSH prefixPath? (COMMA prefixPath)* BOOLEAN_LITERAL? (ON (LOCAL | CLUSTER))? ; // Clear Cache @@ -613,6 +632,11 @@ settle : SETTLE (prefixPath|tsFilePath=STRING_LITERAL) ; +// Explain +explain + : EXPLAIN selectStatement + ; + // Set System To ReadOnly/Writable setSystemStatus : SET SYSTEM TO (READONLY|WRITABLE) @@ -789,14 +813,6 @@ wildcard ; -// Identifier - -identifier - : ID - | QUOTED_ID - ; - - // Constant & Literal constant @@ -845,6 +861,7 @@ expression | leftExpression=expression (PLUS | MINUS) rightExpression=expression | leftExpression=expression (OPERATOR_GT | OPERATOR_GTE | OPERATOR_LT | OPERATOR_LTE | OPERATOR_SEQ | OPERATOR_DEQ | OPERATOR_NEQ) rightExpression=expression | unaryBeforeRegularOrLikeExpression=expression (REGEXP | LIKE) STRING_LITERAL + | unaryBeforeIsNullExpression=expression OPERATOR_IS OPERATOR_NOT? NULL_LITERAL | unaryBeforeInExpression=expression OPERATOR_NOT? (OPERATOR_IN | OPERATOR_CONTAINS) LR_BRACKET constant (COMMA constant)* RR_BRACKET | leftExpression=expression OPERATOR_AND rightExpression=expression | leftExpression=expression OPERATOR_OR rightExpression=expression @@ -886,16 +903,12 @@ fromClause // Attribute Clause attributeClauses - : aliasNodeName? WITH DATATYPE operator_eq dataType=DATATYPE_VALUE - (COMMA ENCODING operator_eq encoding=ENCODING_VALUE)? - (COMMA (COMPRESSOR | COMPRESSION) operator_eq compressor=COMPRESSOR_VALUE)? + : aliasNodeName? WITH attributeKey operator_eq dataType=attributeValue (COMMA attributePair)* tagClause? attributeClause? // Simplified version (supported since v0.13) - | aliasNodeName? WITH? (DATATYPE operator_eq)? dataType=DATATYPE_VALUE - (ENCODING operator_eq encoding=ENCODING_VALUE)? - ((COMPRESSOR | COMPRESSION) operator_eq compressor=COMPRESSOR_VALUE)? + | aliasNodeName? WITH? (attributeKey operator_eq)? dataType=attributeValue attributePair* tagClause? attributeClause? @@ -914,7 +927,7 @@ attributeClause ; attributePair - : key=attributeKey (OPERATOR_SEQ | OPERATOR_DEQ) value=attributeValue + : key=attributeKey operator_eq value=attributeValue ; attributeKey diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4 new file mode 100644 index 000000000000..546be58b6eee --- /dev/null +++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/PathParser.g4 @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +parser grammar PathParser; + +options { tokenVocab=SqlLexer; } + +import IdentifierParser; + +/** + * PartialPath and Path used by Session API and TsFile API should be parsed by Antlr4. + */ + +path + : prefixPath EOF + | suffixPath EOF + ; + +prefixPath + : ROOT (DOT nodeName)* + ; + +suffixPath + : nodeName (DOT nodeName)* + ; + +nodeName + : wildcard + | wildcard? identifier wildcard? + | identifier + ; + +wildcard + : STAR + | DOUBLE_STAR + ; diff --git a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 index f92fcbba5b4f..7e9b769e880e 100644 --- a/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 +++ b/antlr/src/main/antlr4/org/apache/iotdb/db/qp/sql/SqlLexer.g4 @@ -32,7 +32,7 @@ WS /** - * 2. Keywords + * 2. Keywords, new keywords should be added into IdentifierParser.g4 */ // Common Keywords @@ -117,12 +117,8 @@ CLEAR : C L E A R ; -COMPRESSION - : C O M P R E S S I O N - ; - -COMPRESSOR - : C O M P R E S S O R +CLUSTER + : C L U S T E R ; CONCAT @@ -157,8 +153,8 @@ CREATE : C R E A T E ; -DATATYPE - : D A T A T Y P E +DATA + : D A T A ; DEBUG @@ -193,10 +189,6 @@ DROP : D R O P ; -ENCODING - : E N C O D I N G - ; - END : E N D ; @@ -310,6 +302,10 @@ LOAD : L O A D ; +LOCAL + : L O C A L + ; + LOCK : L O C K ; @@ -426,6 +422,10 @@ REGEXP : R E G E X P ; +REGIONS + : R E G I O N S + ; + REMOVE : R E M O V E ; @@ -619,103 +619,6 @@ WRITABLE ; -// Data Type Keywords - -DATATYPE_VALUE - : BOOLEAN | DOUBLE | FLOAT | INT32 | INT64 | TEXT - ; - -BOOLEAN - : B O O L E A N - ; - -DOUBLE - : D O U B L E - ; - -FLOAT - : F L O A T - ; - -INT32 - : I N T '3' '2' - ; - -INT64 - : I N T '6' '4' - ; - -TEXT - : T E X T - ; - - -// Encoding Type Keywords - -ENCODING_VALUE - : DICTIONARY | DIFF | GORILLA | PLAIN | REGULAR | RLE | TS_2DIFF | ZIGZAG | FREQ - ; - -DICTIONARY - : D I C T I O N A R Y - ; - -DIFF - : D I F F - ; - -GORILLA - : G O R I L L A - ; - -PLAIN - : P L A I N - ; - -REGULAR - : R E G U L A R - ; - -RLE - : R L E - ; - -TS_2DIFF - : T S '_' '2' D I F F - ; - -ZIGZAG - : Z I G Z A G - ; - -FREQ - : F R E Q - ; - - -// Compressor Type Keywords - -COMPRESSOR_VALUE - : GZIP | LZ4 | SNAPPY | UNCOMPRESSED - ; - -GZIP - : G Z I P - ; - -LZ4 - : L Z '4' - ; - -SNAPPY - : S N A P P Y - ; - -UNCOMPRESSED - : U N C O M P R E S S E D - ; - - // Privileges Keywords PRIVILEGE_VALUE @@ -870,6 +773,8 @@ OPERATOR_LT : '<'; OPERATOR_LTE : '<='; OPERATOR_NEQ : '!=' | '<>'; +OPERATOR_IS : I S; + OPERATOR_IN : I N; OPERATOR_AND @@ -973,9 +878,8 @@ NAN_LITERAL : N A N ; - /** - * 6. Identifier + * 6. ID */ ID @@ -1005,15 +909,15 @@ fragment CN_CHAR ; fragment DQUOTA_STRING - : '"' ( '\\'. | '""' | ~('"'| '\\') )* '"' + : '"' ( '\\'. | '""' | ~('"') )* '"' ; fragment SQUOTA_STRING - : '\'' ( '\\'. | '\'\'' |~('\''| '\\') )* '\'' + : '\'' ( '\\'. | '\'\'' |~('\'') )* '\'' ; fragment BQUOTA_STRING - : '`' ( '\\' ~('`') | '``' | ~('`'| '\\') )* '`' + : '`' ( '\\' ~('`') | '``' | ~('`') )* '`' ; diff --git a/checkstyle.xml b/checkstyle.xml index 4d4eb175a92d..f74e443f9e7c 100644 --- a/checkstyle.xml +++ b/checkstyle.xml @@ -40,7 +40,25 @@ + + + + + + + + + + + + + + + + + + @@ -52,13 +70,6 @@ - - - - - - - @@ -212,13 +223,10 @@ - - + - - diff --git a/cli/src/assembly/resources/sbin/start-cli.bat b/cli/src/assembly/resources/sbin/start-cli.bat index 21bb4000c522..cbd375e6f2b7 100644 --- a/cli/src/assembly/resources/sbin/start-cli.bat +++ b/cli/src/assembly/resources/sbin/start-cli.bat @@ -37,7 +37,7 @@ set JAVA_OPTS=-ea^ -DIOTDB_HOME="%IOTDB_HOME%" REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable. -set CLASSPATH="%IOTDB_HOME%\lib\*" +if EXIST %IOTDB_HOME%\lib (set CLASSPATH="%IOTDB_HOME%\lib\*") else set CLASSPATH="%IOTDB_HOME%\..\lib\*" REM ----------------------------------------------------------------------------- set PARAMETERS=%* diff --git a/cli/src/assembly/resources/sbin/start-cli.sh b/cli/src/assembly/resources/sbin/start-cli.sh index 20fb4506a027..dbeedc725059 100644 --- a/cli/src/assembly/resources/sbin/start-cli.sh +++ b/cli/src/assembly/resources/sbin/start-cli.sh @@ -30,8 +30,14 @@ IOTDB_CLI_CONF=${IOTDB_HOME}/conf MAIN_CLASS=org.apache.iotdb.cli.Cli +if [ -d ${IOTDB_HOME}/lib ]; then +LIB_PATH=${IOTDB_HOME}/lib +else +LIB_PATH=${IOTDB_HOME}/../lib +fi + CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do +for f in ${LIB_PATH}/*.jar; do CLASSPATH=${CLASSPATH}":"$f done diff --git a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java index 14374603173a..ae0b97f31890 100644 --- a/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java +++ b/cli/src/main/java/org/apache/iotdb/tool/ImportCsv.java @@ -714,8 +714,7 @@ private static boolean queryType( try { sessionDataSet = session.executeQueryStatement(sql); } catch (StatementExecutionException e) { - System.out.println( - "Meet error when query the type of timeseries because the IoTDB v0.13 don't support that the path contains any purely digital path."); + System.out.println("Meet error when query the type of timeseries because " + e.getMessage()); return false; } List columnNames = sessionDataSet.getColumnNames(); diff --git a/client-py/README.md b/client-py/README.md index 41c0a113b8f1..beef55e3513b 100644 --- a/client-py/README.md +++ b/client-py/README.md @@ -273,6 +273,98 @@ session.execute_query_statement(sql) session.execute_non_query_statement(sql) ``` +* Execute statement + +```python +session.execute_statement(sql) +``` + +### Schema Template +#### Create Schema Template +The step for creating a metadata template is as follows +1. Create the template class +2. Adding child Node,InternalNode and MeasurementNode can be chose +3. Execute create schema template function + +```python +template = Template(name=template_name, share_time=True) + +i_node_gps = InternalNode(name="GPS", share_time=False) +i_node_v = InternalNode(name="vehicle", share_time=True) +m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY) + +i_node_gps.add_child(m_node_x) +i_node_v.add_child(m_node_x) + +template.add_template(i_node_gps) +template.add_template(i_node_v) +template.add_template(m_node_x) + +session.create_schema_template(template) +``` +#### Modify Schema Template nodes +Modify nodes in a template, the template must be already created. These are functions that add or delete some measurement nodes. +* add node in template +```python +session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned) +``` + +* delete node in template +```python +session.delete_node_in_template(template_name, path) +``` + +#### Set Schema Template +```python +session.set_schema_template(template_name, prefix_path) +``` + +#### Uset Schema Template +```python +session.unset_schema_template(template_name, prefix_path) +``` + +#### Show Schema Template +* Show all schema templates +```python +session.show_all_templates() +``` +* Count all nodes in templates +```python +session.count_measurements_in_template(template_name) +``` + +* Judge whether the path is measurement or not in templates, This measurement must be in the template +```python +session.count_measurements_in_template(template_name, path) +``` + +* Judge whether the path is exist or not in templates, This path may not belong to the template +```python +session.is_path_exist_in_template(template_name, path) +``` + +* Show nodes under in schema template +```python +session.show_measurements_in_template(template_name) +``` + +* Show the path prefix where a schema template is set +```python +session.show_paths_template_set_on(template_name) +``` + +* Show the path prefix where a schema template is used (i.e. the time series has been created) +```python +session.show_paths_template_using_on(template_name) +``` + +#### Drop Schema Template +Delete an existing metadata template,dropping an already set template is not supported +```python +session.drop_schema_template("template_python") +``` + ### Pandas Support @@ -322,6 +414,150 @@ class MyTestCase(unittest.TestCase): by default it will load the image `apache/iotdb:latest`, if you want a specific version just pass it like e.g. `IoTDBContainer("apache/iotdb:0.12.0")` to get version `0.12.0` running. +### IoTDB DBAPI + +IoTDB DBAPI implements the Python DB API 2.0 specification (https://peps.python.org/pep-0249/), which defines a common +interface for accessing databases in Python. + +#### Examples ++ Initialization + +The initialized parameters are consistent with the session part (except for the sqlalchemy_mode). +```python +from iotdb.dbapi import connect + +ip = "127.0.0.1" +port_ = "6667" +username_ = "root" +password_ = "root" +conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False) +cursor = conn.cursor() +``` ++ simple SQL statement execution +```python +cursor.execute("SELECT * FROM root.*") +for row in cursor.fetchall(): + print(row) +``` + ++ execute SQL with parameter + +IoTDB DBAPI supports pyformat style parameters +```python +cursor.execute("SELECT * FROM root.* WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"}) +for row in cursor.fetchall(): + print(row) +``` + ++ execute SQL with parameter sequences +```python +seq_of_parameters = [ + {"timestamp": 1, "temperature": 1}, + {"timestamp": 2, "temperature": 2}, + {"timestamp": 3, "temperature": 3}, + {"timestamp": 4, "temperature": 4}, + {"timestamp": 5, "temperature": 5}, +] +sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)" +cursor.executemany(sql,seq_of_parameters) +``` + ++ close the connection and cursor +```python +cursor.close() +conn.close() +``` + +### IoTDB SQLAlchemy Dialect (Experimental) +The SQLAlchemy dialect of IoTDB is written to adapt to Apache Superset. +This part is still being improved. +Please do not use it in the production environment! +#### Mapping of the metadata +The data model used by SQLAlchemy is a relational data model, which describes the relationships between different entities through tables. +While the data model of IoTDB is a hierarchical data model, which organizes the data through a tree structure. +In order to adapt IoTDB to the dialect of SQLAlchemy, the original data model in IoTDB needs to be reorganized. +Converting the data model of IoTDB into the data model of SQLAlchemy. + +The metadata in the IoTDB are: + +1. Storage Group +2. Path +3. Entity +4. Measurement + +The metadata in the SQLAlchemy are: +1. Schema +2. Table +3. Column + +The mapping relationship between them is: + +| The metadata in the SQLAlchemy | The metadata in the IoTDB | +| -------------------- | ---------------------------------------------- | +| Schema | Storage Group | +| Table | Path ( from storage group to entity ) + Entity | +| Column | Measurement | + +The following figure shows the relationship between the two more intuitively: + +![sqlalchemy-to-iotdb](https://github.com/apache/iotdb-bin-resources/blob/main/docs/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true) + +#### Data type mapping +| data type in IoTDB | data type in SQLAlchemy | +|--------------------|-------------------------| +| BOOLEAN | Boolean | +| INT32 | Integer | +| INT64 | BigInteger | +| FLOAT | Float | +| DOUBLE | Float | +| TEXT | Text | +| LONG | BigInteger | +#### Example + ++ execute statement + +```python +from sqlalchemy import create_engine + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") +connect = engine.connect() +result = connect.execute("SELECT ** FROM root") +for row in result.fetchall(): + print(row) +``` + ++ ORM (now only simple queries are supported) + +```python +from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + +metadata = MetaData( + schema='root.factory' +) +Base = declarative_base(metadata=metadata) + + +class Device(Base): + __tablename__ = "room2.device1" + Time = Column(BigInteger, primary_key=True) + temperature = Column(Float) + status = Column(Float) + + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") + +DbSession = sessionmaker(bind=engine) +session = DbSession() + +res = session.query(Device.status).filter(Device.temperature > 1) + +for row in res: + print(row) +``` + + ## Developers ### Introduction diff --git a/client-py/SessionExample.py b/client-py/SessionExample.py index 93aa839c3bd4..61e82234dbf9 100644 --- a/client-py/SessionExample.py +++ b/client-py/SessionExample.py @@ -20,6 +20,9 @@ import numpy as np from iotdb.Session import Session +from iotdb.template.InternalNode import InternalNode +from iotdb.template.MeasurementNode import MeasurementNode +from iotdb.template.Template import Template from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor from iotdb.utils.Tablet import Tablet from iotdb.utils.NumpyTablet import NumpyTablet @@ -280,6 +283,17 @@ while session_data_set.has_next(): print(session_data_set.next()) +# execute statement +with session.execute_statement( + "select * from root.sg_test_01.d_01" +) as session_data_set: + while session_data_set.has_next(): + print(session_data_set.next()) + +session.execute_statement( + "insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)" +) + # insert string records of one device time_list = [1, 2, 3] measurements_list = [ @@ -313,6 +327,90 @@ # delete storage group session.delete_storage_group("root.sg_test_01") +# create measurement node template +template = Template(name="template_python", share_time=False) +m_node_1 = MeasurementNode( + name="s1", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, +) +m_node_2 = MeasurementNode( + name="s2", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, +) +m_node_3 = MeasurementNode( + name="s3", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, +) +template.add_template(m_node_1) +template.add_template(m_node_2) +template.add_template(m_node_3) +session.create_schema_template(template) +print("create template success template_python") + +# create internal node template +template_name = "treeTemplate_python" +template = Template(name=template_name, share_time=True) +i_node_gps = InternalNode(name="GPS", share_time=False) +i_node_v = InternalNode(name="vehicle", share_time=True) +m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY) + +i_node_gps.add_child(m_node_x) +i_node_v.add_child(m_node_x) +template.add_template(i_node_gps) +template.add_template(i_node_v) +template.add_template(m_node_x) + +session.create_schema_template(template) +print("create template success treeTemplate_python}") + +print(session.is_measurement_in_template(template_name, "GPS")) +print(session.is_measurement_in_template(template_name, "GPS.x")) +print(session.show_all_templates()) + +# # append schema template +data_types = [TSDataType.FLOAT, TSDataType.FLOAT, TSDataType.DOUBLE] +encoding_list = [TSEncoding.RLE, TSEncoding.RLE, TSEncoding.GORILLA] +compressor_list = [Compressor.SNAPPY, Compressor.SNAPPY, Compressor.LZ4] + +measurements_aligned_path = ["aligned.s1", "aligned.s2", "aligned.s3"] +session.add_measurements_in_template( + template_name, + measurements_aligned_path, + data_types, + encoding_list, + compressor_list, + is_aligned=True, +) +# session.drop_schema_template("add_template_python") +measurements_aligned_path = ["unaligned.s1", "unaligned.s2", "unaligned.s3"] +session.add_measurements_in_template( + template_name, + measurements_aligned_path, + data_types, + encoding_list, + compressor_list, + is_aligned=False, +) +session.delete_node_in_template(template_name, "aligned.s1") +print(session.count_measurements_in_template(template_name)) +print(session.is_path_exist_in_template(template_name, "aligned.s1")) +print(session.is_path_exist_in_template(template_name, "aligned.s2")) + +session.set_schema_template(template_name, "root.python.set") +print(session.show_paths_template_using_on(template_name)) +print(session.show_paths_template_set_on(template_name)) +session.unset_schema_template(template_name, "root.python.set") + +# drop template +session.drop_schema_template("template_python") +session.drop_schema_template(template_name) +print("drop template success, template_python and treeTemplate_python") # close session connection. session.close() diff --git a/client-py/iotdb/Session.py b/client-py/iotdb/Session.py index 780ecc7d7c35..c5a83d3e053f 100644 --- a/client-py/iotdb/Session.py +++ b/client-py/iotdb/Session.py @@ -18,12 +18,12 @@ import logging import struct import time - -from iotdb.utils.SessionDataSet import SessionDataSet - from thrift.protocol import TBinaryProtocol, TCompactProtocol from thrift.transport import TSocket, TTransport +from iotdb.utils.SessionDataSet import SessionDataSet +from .template.Template import Template +from .template.TemplateQueryType import TemplateQueryType from .thrift.rpc.TSIService import ( Client, TSCreateTimeseriesReq, @@ -38,6 +38,13 @@ TSInsertTabletsReq, TSInsertRecordsReq, TSInsertRecordsOfOneDeviceReq, + TSCreateSchemaTemplateReq, + TSDropSchemaTemplateReq, + TSAppendSchemaTemplateReq, + TSPruneSchemaTemplateReq, + TSSetSchemaTemplateReq, + TSUnsetSchemaTemplateReq, + TSQueryTemplateReq, ) from .thrift.rpc.ttypes import ( TSDeleteDataReq, @@ -47,7 +54,6 @@ TSLastDataQueryReq, TSInsertStringRecordsOfOneDeviceReq, ) - # for debug # from IoTDBConstants import * # from SessionDataSet import SessionDataSet @@ -1027,12 +1033,19 @@ def verify_success(status): if status.code == Session.SUCCESS_CODE: return 0 - logger.error("error status is", status) + logger.error("error status is %s", status) return -1 def execute_raw_data_query( self, paths: list, start_time: int, end_time: int ) -> SessionDataSet: + """ + execute query statement and returns SessionDataSet + :param paths: String path list + :param start_time: Query start time + :param end_time: Query end time + :return: SessionDataSet, contains query results and relevant info (see SessionDataSet.py) + """ request = TSRawDataQueryReq( self.__session_id, paths, @@ -1057,6 +1070,12 @@ def execute_raw_data_query( ) def execute_last_data_query(self, paths: list, last_time: int) -> SessionDataSet: + """ + execute query statement and returns SessionDataSet + :param paths: String path list + :param last_time: Query last time + :return: SessionDataSet, contains query results and relevant info (see SessionDataSet.py) + """ request = TSLastDataQueryReq( self.__session_id, paths, @@ -1088,6 +1107,16 @@ def insert_string_records_of_one_device( values_list: list, have_sorted: bool = False, ): + """ + insert multiple row of string record into database: + timestamp, m1, m2, m3 + 0, text1, text2, text3 + :param device_id: String, device id + :param times: Timestamp list + :param measurements_list: Measurements list + :param values_list: Value list + :param have_sorted: have these list been sorted by timestamp + """ if (len(times) != len(measurements_list)) or (len(times) != len(values_list)): raise RuntimeError( "insert records of one device error: times, measurementsList and valuesList's size should be equal!" @@ -1151,3 +1180,274 @@ def gen_insert_string_records_of_one_device_request( is_aligned, ) return request + + def create_schema_template(self, template: Template): + """ + create schema template, users using this method should use the template class as an argument + :param template: The template contains multiple child node(see Template.py) + """ + bytes_array = template.serialize + request = TSCreateSchemaTemplateReq( + self.__session_id, template.get_name(), bytes_array + ) + status = self.__client.createSchemaTemplate(request) + logger.debug( + "create one template {} template name: {}".format( + self.__session_id, template.get_name() + ) + ) + return Session.verify_success(status) + + def drop_schema_template(self, template_name: str): + """ + drop schema template, this method should be used to the template unset anything + :param template_name: template name + """ + request = TSDropSchemaTemplateReq(self.__session_id, template_name) + status = self.__client.dropSchemaTemplate(request) + logger.debug( + "drop one template {} template name: {}".format( + self.__session_id, template_name + ) + ) + return Session.verify_success(status) + + def execute_statement(self, sql: str, timeout=0): + request = TSExecuteStatementReq( + self.__session_id, sql, self.__statement_id, self.__fetch_size, timeout + ) + try: + resp = self.__client.executeStatement(request) + status = resp.status + logger.debug("execute statement {} message: {}".format(sql, status.message)) + if Session.verify_success(status) == 0: + if resp.columns: + return SessionDataSet( + sql, + resp.columns, + resp.dataTypeList, + resp.columnNameIndexMap, + resp.queryId, + self.__client, + self.__statement_id, + self.__session_id, + resp.queryDataSet, + resp.ignoreTimeStamp, + ) + else: + return None + else: + raise RuntimeError( + "execution of statement fails because: {}", status.message + ) + except TTransport.TException as e: + raise RuntimeError("execution of statement fails because: ", e) + + def add_measurements_in_template( + self, + template_name: str, + measurements_path: list, + data_types: list, + encodings: list, + compressors: list, + is_aligned: bool = False, + ): + """ + add measurements in the template, the template must already create. This function adds some measurements node. + :param template_name: template name, string list, like ["name_x", "name_y", "name_z"] + :param measurements_path: when ths is_aligned is True, recommend the name like a.b, like [python.x, python.y, iotdb.z] + :param data_types: using TSDataType(see IoTDBConstants.py) + :param encodings: using TSEncoding(see IoTDBConstants.py) + :param compressors: using Compressor(see IoTDBConstants.py) + :param is_aligned: True is aligned, False is unaligned + """ + request = TSAppendSchemaTemplateReq( + self.__session_id, + template_name, + is_aligned, + measurements_path, + list(map(lambda x: x.value, data_types)), + list(map(lambda x: x.value, encodings)), + list(map(lambda x: x.value, compressors)), + ) + status = self.__client.appendSchemaTemplate(request) + logger.debug( + "append unaligned template {} template name: {}".format( + self.__session_id, template_name + ) + ) + return Session.verify_success(status) + + def delete_node_in_template(self, template_name: str, path: str): + """ + delete a node in the template, this node must be already in the template + :param template_name: template name + :param path: measurements path + """ + request = TSPruneSchemaTemplateReq(self.__session_id, template_name, path) + status = self.__client.pruneSchemaTemplate(request) + logger.debug( + "append unaligned template {} template name: {}".format( + self.__session_id, template_name + ) + ) + return Session.verify_success(status) + + def set_schema_template(self, template_name, prefix_path): + """ + set template in prefix path, template already exit, prefix path is not measurements + :param template_name: template name + :param prefix_path: prefix path + """ + request = TSSetSchemaTemplateReq(self.__session_id, template_name, prefix_path) + status = self.__client.setSchemaTemplate(request) + logger.debug( + "set schema template to path{} template name: {}, path:{}".format( + self.__session_id, template_name, prefix_path + ) + ) + return Session.verify_success(status) + + def unset_schema_template(self, template_name, prefix_path): + """ + unset schema template from prefix path, this method unsetting the template from entities, + which have already inserted records using the template, is not supported. + :param template_name: template name + :param prefix_path: + """ + request = TSUnsetSchemaTemplateReq( + self.__session_id, prefix_path, template_name + ) + status = self.__client.unsetSchemaTemplate(request) + logger.debug( + "set schema template to path{} template name: {}, path:{}".format( + self.__session_id, template_name, prefix_path + ) + ) + return Session.verify_success(status) + + def count_measurements_in_template(self, template_name: str): + """ + drop schema template, this method should be used to the template unset anything + :param template_name: template name + """ + request = TSQueryTemplateReq( + self.__session_id, + template_name, + TemplateQueryType.COUNT_MEASUREMENTS.value, + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "count measurements template {}, template name is {}, count is {}".format( + self.__session_id, template_name, response.measurements + ) + ) + return response.count + + def is_measurement_in_template(self, template_name: str, path: str): + """ + judge the node in the template is measurement or not, this node must in the template + :param template_name: template name + :param path: + """ + request = TSQueryTemplateReq( + self.__session_id, + template_name, + TemplateQueryType.IS_MEASUREMENT.value, + path, + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "judge the path is measurement or not in template {}, template name is {}, result is {}".format( + self.__session_id, template_name, response.result + ) + ) + return response.result + + def is_path_exist_in_template(self, template_name: str, path: str): + """ + judge whether the node is a measurement or not in the template, this node must be in the template + :param template_name: template name + :param path: + """ + request = TSQueryTemplateReq( + self.__session_id, template_name, TemplateQueryType.PATH_EXIST.value, path + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "judge the path is in template or not {}, template name is {}, result is {}".format( + self.__session_id, template_name, response.result + ) + ) + return response.result + + def show_measurements_in_template(self, template_name: str, pattern: str = ""): + """ + show all measurements under the pattern in template + :param template_name: template name + :param pattern: parent path, if default, show all measurements + """ + request = TSQueryTemplateReq( + self.__session_id, + template_name, + TemplateQueryType.SHOW_MEASUREMENTS.value, + pattern, + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "show measurements in template {}, template name is {}, result is {}".format( + self.__session_id, template_name, response.measurements + ) + ) + return response.measurements + + def show_all_templates(self): + """ + show all schema templates + """ + request = TSQueryTemplateReq( + self.__session_id, + "", + TemplateQueryType.SHOW_TEMPLATES.value, + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "show all template {}, measurements is {}".format( + self.__session_id, response.measurements + ) + ) + return response.measurements + + def show_paths_template_set_on(self, template_name): + """ + show the path prefix where a schema template is set + :param template_name: + """ + request = TSQueryTemplateReq( + self.__session_id, template_name, TemplateQueryType.SHOW_SET_TEMPLATES.value + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "show paths template set {}, on {}".format( + self.__session_id, response.measurements + ) + ) + return response.measurements + + def show_paths_template_using_on(self, template_name): + """ + show the path prefix where a schema template is used + :param template_name: + """ + request = TSQueryTemplateReq( + self.__session_id, + template_name, + TemplateQueryType.SHOW_USING_TEMPLATES.value, + ) + response = self.__client.querySchemaTemplate(request) + logger.debug( + "show paths template using {}, on {}".format( + self.__session_id, response.measurements + ) + ) + return response.measurements diff --git a/client-py/iotdb/dbapi/Connection.py b/client-py/iotdb/dbapi/Connection.py new file mode 100644 index 000000000000..aee5520e9af9 --- /dev/null +++ b/client-py/iotdb/dbapi/Connection.py @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import logging + +from iotdb.Session import Session + +from .Cursor import Cursor +from .Exceptions import ConnectionError, ProgrammingError + +logger = logging.getLogger("IoTDB") + + +class Connection(object): + def __init__( + self, + host, + port, + username=Session.DEFAULT_USER, + password=Session.DEFAULT_PASSWORD, + fetch_size=Session.DEFAULT_FETCH_SIZE, + zone_id=Session.DEFAULT_ZONE_ID, + enable_rpc_compression=False, + sqlalchemy_mode=False, + ): + self.__session = Session(host, port, username, password, fetch_size, zone_id) + self.__sqlalchemy_mode = sqlalchemy_mode + self.__is_close = True + try: + self.__session.open(enable_rpc_compression) + self.__is_close = False + except Exception as e: + raise ConnectionError(e) + + def close(self): + """ + Close the connection now + """ + if self.__is_close: + return + self.__session.close() + self.__is_close = True + + def cursor(self): + """ + Return a new Cursor Object using the connection. + """ + if not self.__is_close: + return Cursor(self, self.__session, self.__sqlalchemy_mode) + else: + raise ProgrammingError("Connection closed") + + def commit(self): + """ + Not supported method. + """ + pass + + def rollback(self): + """ + Not supported method. + """ + pass + + @property + def is_close(self): + """ + This read-only attribute specified whether the object is closed + """ + return self.__is_close + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/client-py/iotdb/dbapi/Cursor.py b/client-py/iotdb/dbapi/Cursor.py new file mode 100644 index 000000000000..a1d6e2caabac --- /dev/null +++ b/client-py/iotdb/dbapi/Cursor.py @@ -0,0 +1,288 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import logging +import warnings + +from iotdb.Session import Session + +from .Exceptions import ProgrammingError + +logger = logging.getLogger("IoTDB") + + +class Cursor(object): + def __init__(self, connection, session: Session, sqlalchemy_mode): + self.__connection = connection + self.__session = session + self.__sqlalchemy_mode = sqlalchemy_mode + self.__arraysize = 1 + self.__is_close = False + self.__result = None + self.__rows = None + self.__rowcount = -1 + + @property + def description(self): + """ + This read-only attribute is a sequence of 7-item sequences. + """ + if self.__is_close or not self.__result["col_names"]: + return + + description = [] + + col_names = self.__result["col_names"] + col_types = self.__result["col_types"] + + for i in range(len(col_names)): + description.append( + ( + col_names[i], + None if self.__sqlalchemy_mode is True else col_types[i].value, + None, + None, + None, + None, + col_names[i] == "Time", + ) + ) + return tuple(description) + + @property + def arraysize(self): + """ + This read/write attribute specifies the number of rows to fetch at a time with .fetchmany(). + """ + return self.__arraysize + + @arraysize.setter + def arraysize(self, value): + """ + Set the arraysize. + :param value: arraysize + """ + try: + self.__arraysize = int(value) + except TypeError: + self.__arraysize = 1 + + @property + def rowcount(self): + """ + This read-only attribute specifies the number of rows that the last + .execute*() produced (for DQL statements like ``SELECT``) or affected + (for DML statements like ``DELETE`` or ``INSERT`` return 0 if successful, -1 if unsuccessful). + """ + if self.__is_close or self.__result is None or "row_count" not in self.__result: + return -1 + return self.__result.get("row_count", -1) + + def execute(self, operation, parameters=None): + """ + Prepare and execute a database operation (query or command). + :param operation: a database operation + :param parameters: parameters of the operation + """ + if self.__connection.is_close: + raise ProgrammingError("Connection closed!") + + if self.__is_close: + raise ProgrammingError("Cursor closed!") + + if parameters is None: + sql = operation + else: + sql = operation % parameters + + time_index = [] + time_names = [] + if self.__sqlalchemy_mode: + sql_seqs = [] + seqs = sql.split("\n") + for seq in seqs: + if seq.find("FROM Time Index") >= 0: + time_index = [ + int(index) + for index in seq.replace("FROM Time Index", "").split() + ] + elif seq.find("FROM Time Name") >= 0: + time_names = [ + name for name in seq.replace("FROM Time Name", "").split() + ] + else: + sql_seqs.append(seq) + sql = "\n".join(sql_seqs) + + try: + data_set = self.__session.execute_statement(sql) + col_names = None + col_types = None + rows = [] + + if data_set: + data = data_set.todf() + + if self.__sqlalchemy_mode and time_index: + time_column = data.columns[0] + time_column_value = data.Time + del data[time_column] + for i in range(len(time_index)): + data.insert(time_index[i], time_names[i], time_column_value) + + col_names = data.columns.tolist() + col_types = data_set.get_column_types() + rows = data.values.tolist() + data_set.close_operation_handle() + + self.__result = { + "col_names": col_names, + "col_types": col_types, + "rows": rows, + "row_count": len(rows), + } + except Exception: + logger.error("failed to execute statement:{}".format(sql)) + self.__result = { + "col_names": None, + "col_types": None, + "rows": [], + "row_count": -1, + } + self.__rows = iter(self.__result["rows"]) + + def executemany(self, operation, seq_of_parameters=None): + """ + Prepare a database operation (query or command) and then execute it + against all parameter sequences or mappings found in the sequence + ``seq_of_parameters`` + :param operation: a database operation + :param seq_of_parameters: pyformat style parameter list of the operation + """ + if self.__connection.is_close: + raise ProgrammingError("Connection closed!") + + if self.__is_close: + raise ProgrammingError("Cursor closed!") + + rows = [] + if seq_of_parameters is None: + self.execute(operation) + rows.extend(self.__result["rows"]) + else: + for parameters in seq_of_parameters: + self.execute(operation, parameters) + rows.extend(self.__result["rows"]) + + self.__result["rows"] = rows + self.__rows = iter(self.__result["rows"]) + + def fetchone(self): + """ + Fetch the next row of a query result set, returning a single sequence, + or None when no more data is available. + Alias for ``next()``. + """ + try: + return self.next() + except StopIteration: + return None + + def fetchmany(self, count=None): + """ + Fetch the next set of rows of a query result, returning a sequence of + sequences (e.g. a list of tuples). An empty sequence is returned when + no more rows are available. + """ + if count is None: + count = self.__arraysize + if count == 0: + return self.fetchall() + result = [] + for i in range(count): + try: + result.append(self.next()) + except StopIteration: + pass + return result + + def fetchall(self): + """ + Fetch all (remaining) rows of a query result, returning them as a + sequence of sequences (e.g. a list of tuples). Note that the cursor's + arraysize attribute can affect the performance of this operation. + """ + result = [] + iterate = True + while iterate: + try: + result.append(self.next()) + except StopIteration: + iterate = False + return result + + def next(self): + """ + Return the next row of a query result set, respecting if cursor was + closed. + """ + if self.__result is None: + raise ProgrammingError( + "No result available. execute() or executemany() must be called first." + ) + elif not self.__is_close: + return next(self.__rows) + else: + raise ProgrammingError("Cursor closed!") + + __next__ = next + + def close(self): + """ + Close the cursor now. + """ + self.__is_close = True + self.__result = None + + def setinputsizes(self, sizes): + """ + Not supported method. + """ + pass + + def setoutputsize(self, size, column=None): + """ + Not supported method. + """ + pass + + def __iter__(self): + """ + Support iterator interface: + http://legacy.python.org/dev/peps/pep-0249/#iter + This iterator is shared. Advancing this iterator will advance other + iterators created from this cursor. + """ + warnings.warn("DB-API extension cursor.__iter__() used") + return self + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/client-py/iotdb/dbapi/Exceptions.py b/client-py/iotdb/dbapi/Exceptions.py new file mode 100644 index 000000000000..d58689c86930 --- /dev/null +++ b/client-py/iotdb/dbapi/Exceptions.py @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +class Error(Exception): + pass + + +class Warning(Exception): + pass + + +class DatabaseError(Error): + pass + + +class DataError(DatabaseError): + pass + + +class InterfaceError(Error): + pass + + +class InternalError(DatabaseError): + pass + + +class IntegrityError(DatabaseError): + pass + + +class OperationalError(DatabaseError): + pass + + +class ProgrammingError(DatabaseError): + pass + + +class NotSupportedError(DatabaseError): + pass + + +class ConnectionError(DatabaseError): + pass diff --git a/client-py/iotdb/dbapi/__init__.py b/client-py/iotdb/dbapi/__init__.py new file mode 100644 index 000000000000..9f8006175008 --- /dev/null +++ b/client-py/iotdb/dbapi/__init__.py @@ -0,0 +1,26 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .Connection import Connection as connect +from .Exceptions import Error + +__all__ = [connect, Error] + +apilevel = "2.0" +threadsafety = 2 +paramstyle = "pyformat" diff --git a/client-py/iotdb/dbapi/tests/__init__.py b/client-py/iotdb/dbapi/tests/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/dbapi/tests/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/dbapi/tests/test_connection.py b/client-py/iotdb/dbapi/tests/test_connection.py new file mode 100644 index 000000000000..cb1f6c1e65b0 --- /dev/null +++ b/client-py/iotdb/dbapi/tests/test_connection.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.IoTDBContainer import IoTDBContainer +from iotdb.dbapi import connect + +final_flag = True +failed_count = 0 + + +def test_fail(): + global failed_count + global final_flag + final_flag = False + failed_count += 1 + + +def print_message(message): + print("*********") + print(message) + print("*********") + + +def test_connection(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + conn = connect(db.get_container_host_ip(), db.get_exposed_port(6667)) + if conn.is_close: + print("can't create connect") + exit(1) + conn.close() + if not conn.is_close: + test_fail() + print_message("failed to close the connection!") + + +if final_flag: + print("All executions done!!") +else: + print("Some test failed, please have a check") + print("failed count: ", failed_count) + exit(1) diff --git a/client-py/iotdb/dbapi/tests/test_cursor.py b/client-py/iotdb/dbapi/tests/test_cursor.py new file mode 100644 index 000000000000..6cd42257dc03 --- /dev/null +++ b/client-py/iotdb/dbapi/tests/test_cursor.py @@ -0,0 +1,123 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.IoTDBContainer import IoTDBContainer +from iotdb.dbapi import connect +from iotdb.dbapi.Cursor import Cursor + +final_flag = True +failed_count = 0 + + +def test_fail(): + global failed_count + global final_flag + final_flag = False + failed_count += 1 + + +def print_message(message): + print("*********") + print(message) + print("*********") + + +def test_cursor(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + conn = connect(db.get_container_host_ip(), db.get_exposed_port(6667)) + cursor: Cursor = conn.cursor() + + # execute test + cursor.execute("create storage group root.cursor") + cursor.execute("create storage group root.cursor_s1") + cursor.execute("delete storage group root.cursor_s1") + if cursor.rowcount < 0: + test_fail() + print_message("execute test failed!") + + # execute with args test + cursor.execute( + "create timeseries root.cursor.temperature with datatype=FLOAT,encoding=RLE" + ) + cursor.execute( + "insert into root.cursor(timestamp,temperature) values(1,%(temperature)s)", + {"temperature": 0.3}, + ) + cursor.execute( + "insert into root.cursor(timestamp,temperature) values(2,%(temperature)s)", + {"temperature": 0.4}, + ) + cursor.execute("select * from root.cursor") + count = 2 + actual_count = 0 + for row in cursor.fetchall(): + actual_count += 1 + if count != actual_count: + test_fail() + print_message("execute with args test failed!") + + # executemany with args test + args = [ + {"timestamp": 3, "temperature": 3}, + {"timestamp": 4, "temperature": 4}, + {"timestamp": 5, "temperature": 5}, + {"timestamp": 6, "temperature": 6}, + {"timestamp": 7, "temperature": 7}, + ] + cursor.executemany( + "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)", + args, + ) + cursor.execute("select * from root.cursor") + count = 7 + actual_count = 0 + for row in cursor.fetchall(): + actual_count += 1 + if count != actual_count: + test_fail() + print_message("executemany with args test failed!") + + # fetchmany test + cursor.execute("select * from root.cursor") + count = 2 + actual_count = 0 + for row in cursor.fetchmany(count): + actual_count += 1 + if count != actual_count: + test_fail() + print_message("fetchmany test failed!") + + # fetchone test + cursor.execute("select * from root.cursor") + row = cursor.fetchone() + if row[0] != 1: + test_fail() + print_message("fetchone test failed") + + cursor.execute("delete storage group root.cursor") + cursor.close() + conn.close() + + +if final_flag: + print("All executions done!!") +else: + print("Some test failed, please have a check") + print("failed count: ", failed_count) + exit(1) diff --git a/client-py/iotdb/sqlalchemy/IoTDBDialect.py b/client-py/iotdb/sqlalchemy/IoTDBDialect.py new file mode 100644 index 000000000000..baf5d6525d4c --- /dev/null +++ b/client-py/iotdb/sqlalchemy/IoTDBDialect.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy import types, util +from sqlalchemy.engine import default +from sqlalchemy.sql.sqltypes import String + +from iotdb import dbapi + +from .IoTDBSQLCompiler import IoTDBSQLCompiler +from .IoTDBTypeCompiler import IoTDBTypeCompiler +from .IoTDBIdentifierPreparer import IoTDBIdentifierPreparer + +TYPES_MAP = { + "BOOLEAN": types.Boolean, + "INT32": types.Integer, + "INT64": types.BigInteger, + "FLOAT": types.Float, + "DOUBLE": types.Float, + "TEXT": types.Text, + "LONG": types.BigInteger, +} + + +class IoTDBDialect(default.DefaultDialect): + name = "iotdb" + driver = "iotdb-python" + statement_compiler = IoTDBSQLCompiler + type_compiler = IoTDBTypeCompiler + preparer = IoTDBIdentifierPreparer + convert_unicode = True + + supports_unicode_statements = True + supports_unicode_binds = True + supports_simple_order_by_label = False + supports_schemas = True + supports_right_nested_joins = False + description_encoding = None + + if hasattr(String, "RETURNS_UNICODE"): + returns_unicode_strings = String.RETURNS_UNICODE + else: + + def _check_unicode_returns(self, connection, additional_tests=None): + return True + + _check_unicode_returns = _check_unicode_returns + + def create_connect_args(self, url): + # inherits the docstring from interfaces.Dialect.create_connect_args + opts = url.translate_connect_args() + opts.update(url.query) + opts.update({"sqlalchemy_mode": True}) + return [[], opts] + + @classmethod + def dbapi(cls): + return dbapi + + def has_schema(self, connection, schema): + return schema in self.get_schema_names(connection) + + def has_table(self, connection, table_name, schema=None, **kw): + return table_name in self.get_table_names(connection, schema=schema) + + def get_schema_names(self, connection, **kw): + cursor = connection.execute("SHOW STORAGE GROUP") + return [row[0] for row in cursor.fetchall()] + + def get_table_names(self, connection, schema=None, **kw): + cursor = connection.execute( + "SHOW DEVICES %s.**" % (schema or self.default_schema_name) + ) + return [row[0].replace(schema + ".", "", 1) for row in cursor.fetchall()] + + def get_columns(self, connection, table_name, schema=None, **kw): + cursor = connection.execute("SHOW TIMESERIES %s.%s.*" % (schema, table_name)) + columns = [self._general_time_column_info()] + for row in cursor.fetchall(): + columns.append(self._create_column_info(row, schema, table_name)) + return columns + + def get_pk_constraint(self, connection, table_name, schema=None, **kw): + pass + + def get_foreign_keys(self, connection, table_name, schema=None, **kw): + return [] + + def get_indexes(self, connection, table_name, schema=None, **kw): + return [] + + @util.memoized_property + def _dialect_specific_select_one(self): + # IoTDB does not support select 1 + # so replace the statement with "show version" + return "SHOW VERSION" + + def _general_time_column_info(self): + """ + Treat Time as a column + """ + return { + "name": "Time", + "type": self._resolve_type("LONG"), + "nullable": False, + "default": None, + } + + def _create_column_info(self, row, schema, table_name): + """ + Generate description information for each column + """ + return { + "name": row[0].replace(schema + "." + table_name + ".", "", 1), + "type": self._resolve_type(row[3]), + "nullable": True, + "default": None, + } + + def _resolve_type(self, type_): + return TYPES_MAP.get(type_, types.UserDefinedType) diff --git a/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py b/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py new file mode 100644 index 000000000000..e09dd3c2305c --- /dev/null +++ b/client-py/iotdb/sqlalchemy/IoTDBIdentifierPreparer.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy.sql.compiler import IdentifierPreparer + + +class IoTDBIdentifierPreparer(IdentifierPreparer): + def __init__(self, dialect, **kw): + quote = "`" + super(IoTDBIdentifierPreparer, self).__init__( + dialect, initial_quote=quote, escape_quote=quote, **kw + ) diff --git a/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py b/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py new file mode 100644 index 000000000000..36c4ca0bf764 --- /dev/null +++ b/client-py/iotdb/sqlalchemy/IoTDBSQLCompiler.py @@ -0,0 +1,243 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy.sql.compiler import SQLCompiler +from sqlalchemy.sql.compiler import OPERATORS +from sqlalchemy.sql import operators + + +class IoTDBSQLCompiler(SQLCompiler): + def order_by_clause(self, select, **kw): + """allow dialects to customize how ORDER BY is rendered.""" + + order_by = select._order_by_clause._compiler_dispatch(self, **kw) + if "Time" in order_by: + return " ORDER BY " + order_by.replace('"', "") + else: + return "" + + def group_by_clause(self, select, **kw): + """allow dialects to customize how GROUP BY is rendered.""" + return "" + + def visit_select( + self, + select, + asfrom=False, + parens=True, + fromhints=None, + compound_index=0, + nested_join_translation=False, + select_wraps_for=None, + lateral=False, + **kwargs, + ): + """ + Override this method to solve two problems + 1. IoTDB does not support querying Time as a measurement name (e.g. select Time from root.storagegroup.device) + 2. IoTDB does not support path.measurement format to determine a column (e.g. select root.storagegroup.device.temperature from root.storagegroup.device) + """ + needs_nested_translation = ( + select.use_labels + and not nested_join_translation + and not self.stack + and not self.dialect.supports_right_nested_joins + ) + + if needs_nested_translation: + transformed_select = self._transform_select_for_nested_joins(select) + text = self.visit_select( + transformed_select, + asfrom=asfrom, + parens=parens, + fromhints=fromhints, + compound_index=compound_index, + nested_join_translation=True, + **kwargs, + ) + + toplevel = not self.stack + entry = self._default_stack_entry if toplevel else self.stack[-1] + + populate_result_map = need_column_expressions = ( + toplevel + or entry.get("need_result_map_for_compound", False) + or entry.get("need_result_map_for_nested", False) + ) + + if compound_index > 0: + populate_result_map = False + + # this was first proposed as part of #3372; however, it is not + # reached in current tests and could possibly be an assertion + # instead. + if not populate_result_map and "add_to_result_map" in kwargs: + del kwargs["add_to_result_map"] + + if needs_nested_translation: + if populate_result_map: + self._transform_result_map_for_nested_joins(select, transformed_select) + return text + + froms = self._setup_select_stack(select, entry, asfrom, lateral) + + column_clause_args = kwargs.copy() + column_clause_args.update( + {"within_label_clause": False, "within_columns_clause": False} + ) + + text = "SELECT " # we're off to a good start ! + + if select._hints: + hint_text, byfrom = self._setup_select_hints(select) + if hint_text: + text += hint_text + " " + else: + byfrom = None + + if select._prefixes: + text += self._generate_prefixes(select, select._prefixes, **kwargs) + + text += self.get_select_precolumns(select, **kwargs) + # the actual list of columns to print in the SELECT column list. + # IoTDB does not support querying Time as a measurement name (e.g. select Time from root.storagegroup.device) + columns = [] + for name, column in select._columns_plus_names: + column.table = None + columns.append( + self._label_select_column( + select, + column, + populate_result_map, + asfrom, + column_clause_args, + name=name, + need_column_expressions=need_column_expressions, + ) + ) + inner_columns = [c for c in columns if c is not None] + + if populate_result_map and select_wraps_for is not None: + # if this select is a compiler-generated wrapper, + # rewrite the targeted columns in the result map + + translate = dict( + zip( + [name for (key, name) in select._columns_plus_names], + [name for (key, name) in select_wraps_for._columns_plus_names], + ) + ) + + self._result_columns = [ + (key, name, tuple(translate.get(o, o) for o in obj), type_) + for key, name, obj, type_ in self._result_columns + ] + # IoTDB does not allow to query Time as column, + # need to filter out Time and pass Time and Time's alias to DBAPI separately + # to achieve the query of Time by encoding. + time_column_index = [] + time_column_names = [] + for i in range(len(inner_columns)): + column_strs = ( + inner_columns[i].replace(self.preparer.initial_quote, "").split() + ) + if "Time" in column_strs: + time_column_index.append(str(i)) + time_column_names.append( + column_strs[2] + if OPERATORS[operators.as_] in column_strs + else column_strs[0] + ) + # delete Time column + inner_columns = list( + filter( + lambda x: "Time" + not in x.replace(self.preparer.initial_quote, "").split(), + inner_columns, + ) + ) + if inner_columns and time_column_index: + inner_columns[-1] = ( + inner_columns[-1] + + " \n FROM Time Index " + + " ".join(time_column_index) + + "\n FROM Time Name " + + " ".join(time_column_names) + ) + + text = self._compose_select_body( + text, select, inner_columns, froms, byfrom, kwargs + ) + + if select._statement_hints: + per_dialect = [ + ht + for (dialect_name, ht) in select._statement_hints + if dialect_name in ("*", self.dialect.name) + ] + if per_dialect: + text += " " + self.get_statement_hint_text(per_dialect) + + if self.ctes and toplevel: + text = self._render_cte_clause() + text + + if select._suffixes: + text += " " + self._generate_prefixes(select, select._suffixes, **kwargs) + + self.stack.pop(-1) + + if (asfrom or lateral) and parens: + return "(" + text + ")" + else: + return text + + def visit_table( + self, + table, + asfrom=False, + iscrud=False, + ashint=False, + fromhints=None, + use_schema=True, + **kwargs, + ): + """ + IoTDB's table does not support quotation marks (e.g. select ** from `root.`) + need to override this method + """ + if asfrom or ashint: + effective_schema = self.preparer.schema_for_object(table) + + if use_schema and effective_schema: + ret = effective_schema + "." + table.name + else: + ret = table.name + if fromhints and table in fromhints: + ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud) + return ret + else: + return "" + + def visit_column( + self, column, add_to_result_map=None, include_table=True, **kwargs + ): + """ + IoTDB's where statement does not support "table".column format(e.g. "table".column > 1) + need to override this method to return the name of column directly + """ + return column.name diff --git a/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py b/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py new file mode 100644 index 000000000000..4cfd2480bd4b --- /dev/null +++ b/client-py/iotdb/sqlalchemy/IoTDBTypeCompiler.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from sqlalchemy.sql.compiler import GenericTypeCompiler + + +class IoTDBTypeCompiler(GenericTypeCompiler): + def visit_FLOAT(self, type_, **kw): + return "FLOAT" + + def visit_NUMERIC(self, type_, **kw): + return "INT64" + + def visit_DECIMAL(self, type_, **kw): + return "DOUBLE" + + def visit_INTEGER(self, type_, **kw): + return "INT32" + + def visit_SMALLINT(self, type_, **kw): + return "INT32" + + def visit_BIGINT(self, type_, **kw): + return "LONG" + + def visit_TIMESTAMP(self, type_, **kw): + return "LONG" + + def visit_text(self, type_, **kw): + return "TEXT" diff --git a/client-py/iotdb/sqlalchemy/__init__.py b/client-py/iotdb/sqlalchemy/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/sqlalchemy/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/sqlalchemy/tests/__init__.py b/client-py/iotdb/sqlalchemy/tests/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/sqlalchemy/tests/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/sqlalchemy/tests/test_dialect.py b/client-py/iotdb/sqlalchemy/tests/test_dialect.py new file mode 100644 index 000000000000..7b0e3e2ad659 --- /dev/null +++ b/client-py/iotdb/sqlalchemy/tests/test_dialect.py @@ -0,0 +1,92 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import operator + +from sqlalchemy import create_engine, inspect +from sqlalchemy.dialects import registry + +from iotdb.IoTDBContainer import IoTDBContainer + +final_flag = True +failed_count = 0 + + +def test_fail(): + global failed_count + global final_flag + final_flag = False + failed_count += 1 + + +def print_message(message): + print("*********") + print(message) + print("*********") + + +def test_dialect(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + url = ( + "iotdb://root:root@" + + db.get_container_host_ip() + + ":" + + db.get_exposed_port(6667) + ) + registry.register("iotdb", "iotdb.sqlalchemy.IoTDBDialect", "IoTDBDialect") + eng = create_engine(url) + eng.execute("create storage group root.cursor") + eng.execute("create storage group root.cursor_s1") + eng.execute( + "create timeseries root.cursor.device1.temperature with datatype=FLOAT,encoding=RLE" + ) + eng.execute( + "create timeseries root.cursor.device1.status with datatype=FLOAT,encoding=RLE" + ) + eng.execute( + "create timeseries root.cursor.device2.temperature with datatype=FLOAT,encoding=RLE" + ) + insp = inspect(eng) + # test get_schema_names + schema_names = insp.get_schema_names() + if not operator.eq(schema_names, ["root.cursor", "root.cursor_s1"]): + test_fail() + print_message("test get_schema_names failed!") + # test get_table_names + table_names = insp.get_table_names("root.cursor") + if not operator.eq(table_names, ["device1", "device2"]): + test_fail() + print_message("test get_table_names failed!") + # test get_columns + columns = insp.get_columns(table_name="device1", schema="root.cursor") + if len(columns) != 3: + test_fail() + print_message("test get_columns failed!") + eng.execute("delete storage group root.cursor") + eng.execute("delete storage group root.cursor_s1") + # close engine + eng.dispose() + + +if final_flag: + print("All executions done!!") +else: + print("Some test failed, please have a check") + print("failed count: ", failed_count) + exit(1) diff --git a/client-py/iotdb/template/InternalNode.py b/client-py/iotdb/template/InternalNode.py new file mode 100644 index 000000000000..bac17ca90ab1 --- /dev/null +++ b/client-py/iotdb/template/InternalNode.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .TemplateNode import TemplateNode + + +class InternalNode(TemplateNode): + def __init__(self, name, share_time): + super().__init__(name) + self.children = {} + self.share_time = share_time + + def add_child(self, node: TemplateNode): + if node.get_name() in self.children.keys(): + assert "Duplicated child of node in template." + + self.children.update({node.get_name(): node}) + + def delete_child(self, node): + self.children.pop(node.get_name(), None) + + def get_children(self): + return self.children + + def is_share_time(self): + return self.share_time diff --git a/client-py/iotdb/template/MeasurementNode.py b/client-py/iotdb/template/MeasurementNode.py new file mode 100644 index 000000000000..7d96d4bc0d3a --- /dev/null +++ b/client-py/iotdb/template/MeasurementNode.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor +from .TemplateNode import TemplateNode +from ..tsfile.utils.ReadWriteIOUtils import ReadWriteUtils + + +class MeasurementNode(TemplateNode): + def __init__( + self, + name: str, + data_type: TSDataType, + encoding: TSEncoding, + compression_type: Compressor, + ): + self.name = name + self.data_type = data_type + self.encoding = encoding + self.compression_type = compression_type + + def is_measurement(self): + return True + + def get_data_type(self): + return self.data_type + + def get_encoding(self): + return self.encoding + + def get_compression_type(self): + return self.compression_type + + def serialize(self, *args, **kwargs): + format_str_list, values_tobe_packed = args + ReadWriteUtils.write(self.get_name(), format_str_list, values_tobe_packed) + ReadWriteUtils.write(self.get_data_type(), format_str_list, values_tobe_packed) + ReadWriteUtils.write(self.get_encoding(), format_str_list, values_tobe_packed) + ReadWriteUtils.write( + self.get_compression_type(), format_str_list, values_tobe_packed + ) diff --git a/client-py/iotdb/template/Template.py b/client-py/iotdb/template/Template.py new file mode 100644 index 000000000000..38f883d03fb5 --- /dev/null +++ b/client-py/iotdb/template/Template.py @@ -0,0 +1,87 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import struct + +from .TemplateNode import TemplateNode +from ..tsfile.common.constant.TsFileConstant import TsFileConstant +from ..tsfile.utils.Pair import Pair +from ..tsfile.utils.ReadWriteIOUtils import ReadWriteUtils + + +class Template: + def __init__(self, name, share_time: bool = False): + self.name = name + self.children = dict() + self.share_time = share_time + + def get_name(self) -> object: + return self.name + + def is_share_time(self) -> object: + return self.share_time + + def set_share_time(self, share_time: bool): + self.share_time = share_time + + def add_template(self, child: TemplateNode): + if self.children.get(child.get_name()): + raise Exception("Duplicated child of node in template.") + self.children.update({child.get_name(): child}) + + def delete_from_template(self, name: str): + if not self.children.pop(name, []): + raise Exception("It is not a direct child of the template: " + name) + + @property + def serialize(self): + format_str_list = [">"] + values_tobe_packed = [] + stack = [] + aligned_prefix = set() + ReadWriteUtils.write(self.get_name(), format_str_list, values_tobe_packed) + ReadWriteUtils.write(self.is_share_time(), format_str_list, values_tobe_packed) + if self.is_share_time(): + aligned_prefix.add("") + + for child in self.children: + stack.append(Pair("", self.children[child])) + + while stack: + pair = stack.pop() + prefix = pair.left + cur_node = pair.right + full_path = [prefix] + if not cur_node.is_measurement(): + if prefix != "": + full_path.append(TsFileConstant.PATH_SEPARATOR) + full_path.append(cur_node.get_name()) + if cur_node.is_share_time(): + aligned_prefix.add("".join(full_path)) + for child in cur_node.children: + stack.append(Pair("".join(full_path), cur_node.children[child])) + else: + ReadWriteUtils.write(prefix, format_str_list, values_tobe_packed) + if prefix in aligned_prefix: + ReadWriteUtils.write(True, format_str_list, values_tobe_packed) + else: + ReadWriteUtils.write(False, format_str_list, values_tobe_packed) + cur_node.serialize(format_str_list, values_tobe_packed) + + format_str = "".join(format_str_list) + return struct.pack(format_str, *values_tobe_packed) diff --git a/client-py/iotdb/template/TemplateNode.py b/client-py/iotdb/template/TemplateNode.py new file mode 100644 index 000000000000..6bab445394da --- /dev/null +++ b/client-py/iotdb/template/TemplateNode.py @@ -0,0 +1,47 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +class TemplateNode(object): + """ + Template class, this class should be used to schema template node + """ + + def __init__(self, name): + self.name = name + + def get_name(self): + return self.name + + def get_children(self): + return None + + def add_child(self, node): + ... + + def delete_child(self, node): + ... + + def is_measurement(self): + return False + + def is_share_time(self): + return False + + def serialize(self, *args, **kwargs): + ... diff --git a/client-py/iotdb/template/TemplateQueryType.py b/client-py/iotdb/template/TemplateQueryType.py new file mode 100644 index 000000000000..370d1c0a3671 --- /dev/null +++ b/client-py/iotdb/template/TemplateQueryType.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +from enum import Enum, unique + + +@unique +class TemplateQueryType(Enum): + COUNT_MEASUREMENTS = 0 + IS_MEASUREMENT = 1 + PATH_EXIST = 2 + SHOW_MEASUREMENTS = 3 + SHOW_TEMPLATES = 4 + SHOW_SET_TEMPLATES = 5 + SHOW_USING_TEMPLATES = 6 + + # this method is implemented to avoid the issue reported by: + # https://bugs.python.org/issue30545 + def __eq__(self, other) -> bool: + return self.value == other.value + + def __hash__(self): + return self.value diff --git a/client-py/iotdb/template/__init__.py b/client-py/iotdb/template/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/template/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/tsfile/__init__.py b/client-py/iotdb/tsfile/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/tsfile/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/tsfile/common/__init__.py b/client-py/iotdb/tsfile/common/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/tsfile/common/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/tsfile/common/constant/TsFileConstant.py b/client-py/iotdb/tsfile/common/constant/TsFileConstant.py new file mode 100644 index 000000000000..0baad6a5fb87 --- /dev/null +++ b/client-py/iotdb/tsfile/common/constant/TsFileConstant.py @@ -0,0 +1,36 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +class TsFileConstant: + TSFILE_SUFFIX = ".tsfile" + TSFILE_HOME = "TSFILE_HOME" + TSFILE_CONF = "TSFILE_CONF" + PATH_ROOT = "root" + TMP_SUFFIX = "tmp" + PATH_SEPARATOR = "." + PATH_SEPARATOR_CHAR = "." + PATH_SEPARATER_NO_REGEX = "\\." + DOUBLE_QUOTE = '"' + + TIME_COLUMN_MASK = 0x80 + + VALUE_COLUMN_MASK = 0x40 + + def __ts_file_constant(self): + ... diff --git a/client-py/iotdb/tsfile/common/constant/__init__.py b/client-py/iotdb/tsfile/common/constant/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/tsfile/common/constant/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/iotdb/tsfile/utils/Pair.py b/client-py/iotdb/tsfile/utils/Pair.py new file mode 100644 index 000000000000..f3603fa91257 --- /dev/null +++ b/client-py/iotdb/tsfile/utils/Pair.py @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +class Pair: + def __init__(self, left: str, right): + self.__serialVersionUID = -1398609631703707002 + self.left = left + self.right = right diff --git a/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py b/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py new file mode 100644 index 000000000000..6101906ced93 --- /dev/null +++ b/client-py/iotdb/tsfile/utils/ReadWriteIOUtils.py @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor + + +class ReadWriteUtils: + BOOLEAN_LEN = 1 + SHORT_LEN = 2 + INT_LEN = 4 + LONG_LEN = 8 + DOUBLE_LEN = 8 + FLOAT_LEN = 4 + BIT_LEN = 0.125 + NO_BYTE_TO_READ = -1 + magicStringBytes = [] + RETURN_ERROR = "Intend to read %d bytes but %d are actually returned" + URN_ERROR = "Intend to read %d bytes but %d are actually returned" + + @classmethod + def write(cls, *args, **kwargs): + value, format_str_list, values_tobe_packed = args + if isinstance(value, bool): + cls.write_bool(value, format_str_list, values_tobe_packed) + elif isinstance(value, str): + cls.write_str(value, format_str_list, values_tobe_packed) + elif isinstance(value, int): + cls.write_int(value, format_str_list, values_tobe_packed) + elif isinstance(value, TSDataType): + cls.write_byte(value.value, format_str_list, values_tobe_packed) + elif isinstance(value, TSEncoding): + cls.write_byte(value.value, format_str_list, values_tobe_packed) + elif isinstance(value, Compressor): + cls.write_byte(value.value, format_str_list, values_tobe_packed) + + @classmethod + def write_str(cls, s: str, format_str_list, values_tobe_packed): + if s is None: + cls.write_int(cls.NO_BYTE_TO_READ, format_str_list, values_tobe_packed) + + value_bytes = bytes(s, "utf-8") + format_str_list.append("i") + format_str_list.append(str(len(value_bytes))) + format_str_list.append("s") + + values_tobe_packed.append(len(value_bytes)) + values_tobe_packed.append(value_bytes) + + @classmethod + def write_int(cls, i: int, format_str_list, values_tobe_packed): + format_str_list.append("i") + values_tobe_packed.append(i) + + @classmethod + def write_bool(cls, flag: bool, format_str_list, values_tobe_packed): + format_str_list.append("?") + values_tobe_packed.append(flag) + + @classmethod + def write_byte(cls, b, format_str_list, values_tobe_packed): + format_str_list.append("b") + values_tobe_packed.append(b) diff --git a/client-py/iotdb/tsfile/utils/__init__.py b/client-py/iotdb/tsfile/utils/__init__.py new file mode 100644 index 000000000000..2a1e720805f2 --- /dev/null +++ b/client-py/iotdb/tsfile/utils/__init__.py @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/client-py/requirements.txt b/client-py/requirements.txt index 566c75f79c72..8715d50595b3 100644 --- a/client-py/requirements.txt +++ b/client-py/requirements.txt @@ -20,4 +20,7 @@ pandas~=1.3.5 # Testcontainer testcontainers==3.3.0 -numpy~=1.21.4 \ No newline at end of file +numpy~=1.21.4 +# SQLAlchemy Dialect +sqlalchemy == 1.3.20 +sqlalchemy-utils == 0.36.8 \ No newline at end of file diff --git a/client-py/setup.py b/client-py/setup.py index 11e30cf40f49..a3f147af3cff 100644 --- a/client-py/setup.py +++ b/client-py/setup.py @@ -44,6 +44,8 @@ "pandas>=1.0.0,<1.99.99", "numpy>=1.0.0", "testcontainers>=2.0.0", + "sqlalchemy>=1.3.16, <1.4, !=1.3.21", + "sqlalchemy-utils>=0.37.8, <0.38", ], classifiers=[ "Programming Language :: Python :: 3", @@ -55,4 +57,9 @@ python_requires=">=3.7", license="Apache License, Version 2.0", website="https://iotdb.apache.org", + entry_points={ + "sqlalchemy.dialects": [ + "iotdb = iotdb.sqlalchemy.IoTDBDialect:IoTDBDialect", + ], + }, ) diff --git a/client-py/tests/test_dataframe.py b/client-py/tests/test_dataframe.py index f95ade6e0895..c7cce58ea5b1 100644 --- a/client-py/tests/test_dataframe.py +++ b/client-py/tests/test_dataframe.py @@ -27,9 +27,10 @@ def test_simple_query(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.device0") # Write data - session.insert_str_record("root.device", 123, "pressure", "15.0") + session.insert_str_record("root.device0", 123, "pressure", "15.0") # Read session_data_set = session.execute_query_statement("SELECT ** FROM root") @@ -37,7 +38,7 @@ def test_simple_query(): session.close() - assert list(df.columns) == ["Time", "root.device.pressure"] + assert list(df.columns) == ["Time", "root.device0.pressure"] assert_array_equal(df.values, [[123.0, 15.0]]) @@ -46,9 +47,10 @@ def test_non_time_query(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.device0") # Write data - session.insert_str_record("root.device", 123, "pressure", "15.0") + session.insert_str_record("root.device0", 123, "pressure", "15.0") # Read session_data_set = session.execute_query_statement("SHOW TIMESERIES") @@ -70,9 +72,9 @@ def test_non_time_query(): df.values, [ [ - "root.device.pressure", + "root.device0.pressure", None, - "root.device", + "root.device0", "FLOAT", "GORILLA", "SNAPPY", diff --git a/client-py/tests/test_one_device.py b/client-py/tests/test_one_device.py index c364cd1105f8..d428947e9b8d 100644 --- a/client-py/tests/test_one_device.py +++ b/client-py/tests/test_one_device.py @@ -44,6 +44,7 @@ def test_one_device(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.str_test_01") if not session.is_open(): print("can't open session") diff --git a/client-py/tests/test_tablet.py b/client-py/tests/test_tablet.py index fcb55133679a..1e80277d771b 100644 --- a/client-py/tests/test_tablet.py +++ b/client-py/tests/test_tablet.py @@ -30,6 +30,7 @@ def test_tablet_insertion(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.sg_test_01") measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"] data_types_ = [ @@ -72,6 +73,7 @@ def test_nullable_tablet_insertion(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.sg_test_01") measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"] data_types_ = [ diff --git a/client-py/tests/test_template.py b/client-py/tests/test_template.py new file mode 100644 index 000000000000..1dd328fdb24c --- /dev/null +++ b/client-py/tests/test_template.py @@ -0,0 +1,194 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +from iotdb.IoTDBContainer import IoTDBContainer +from iotdb.Session import Session +from iotdb.template.InternalNode import InternalNode +from iotdb.template.MeasurementNode import MeasurementNode +from iotdb.template.Template import Template +from iotdb.utils.IoTDBConstants import TSDataType, Compressor, TSEncoding + + +def test_template_create(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) + session.open(False) + + measurement_template_name = "template_python" + template = Template(name=measurement_template_name, share_time=False) + m_node_1 = MeasurementNode( + name="s1", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, + ) + m_node_2 = MeasurementNode( + name="s2", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, + ) + m_node_3 = MeasurementNode( + name="s3", + data_type=TSDataType.INT64, + encoding=TSEncoding.RLE, + compression_type=Compressor.SNAPPY, + ) + template.add_template(m_node_1) + template.add_template(m_node_2) + template.add_template(m_node_3) + session.create_schema_template(template) + + assert session.show_measurements_in_template(measurement_template_name) == [ + "s3", + "s1", + "s2", + ] + assert session.count_measurements_in_template(measurement_template_name) == 3 + assert ( + session.is_measurement_in_template(measurement_template_name, "s1") is True + ) + assert ( + session.is_path_exist_in_template(measurement_template_name, "s1") is True + ) + assert ( + session.is_path_exist_in_template(measurement_template_name, "s4") is False + ) + + session.delete_node_in_template(measurement_template_name, "s1") + assert session.show_measurements_in_template(measurement_template_name) == [ + "s3", + "s2", + ] + assert session.count_measurements_in_template(measurement_template_name) == 2 + assert ( + session.is_path_exist_in_template(measurement_template_name, "s1") is False + ) + + tree_template_name = "treeTemplate_python" + template = Template(name=tree_template_name, share_time=True) + i_node_gps = InternalNode(name="GPS", share_time=False) + i_node_v = InternalNode(name="vehicle", share_time=True) + m_node_x = MeasurementNode( + "x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY + ) + + i_node_gps.add_child(m_node_x) + i_node_v.add_child(m_node_x) + template.add_template(i_node_gps) + template.add_template(i_node_v) + template.add_template(m_node_x) + session.create_schema_template(template) + assert session.show_measurements_in_template(tree_template_name) == [ + "x", + "GPS.x", + "vehicle.x", + ] + assert session.count_measurements_in_template(tree_template_name) == 3 + + assert session.show_all_templates() == [ + measurement_template_name, + tree_template_name, + ] + assert session.is_measurement_in_template(tree_template_name, "GPS") is False + assert session.is_measurement_in_template(tree_template_name, "GPS.x") is True + + session.drop_schema_template(measurement_template_name) + session.drop_schema_template(tree_template_name) + + session.close() + + +def test_add_measurements_template(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) + session.open(False) + + template_name = "add_template_python" + template = Template(name=template_name, share_time=False) + i_node_v = InternalNode(name="GPS", share_time=False) + i_node_gps_x = MeasurementNode( + "x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY + ) + + i_node_v.add_child(i_node_gps_x) + template.add_template(i_node_v) + session.create_schema_template(template) + + # # append schema template + data_types = [TSDataType.FLOAT, TSDataType.FLOAT, TSDataType.DOUBLE] + encoding_list = [TSEncoding.RLE, TSEncoding.RLE, TSEncoding.GORILLA] + compressor_list = [Compressor.SNAPPY, Compressor.SNAPPY, Compressor.LZ4] + + measurements_aligned_path = ["aligned.s1", "aligned.s2", "aligned.s3"] + session.add_measurements_in_template( + template_name, + measurements_aligned_path, + data_types, + encoding_list, + compressor_list, + is_aligned=True, + ) + # session.drop_schema_template("add_template_python") + measurements_aligned_path = ["unaligned.s1", "unaligned.s2", "unaligned.s3"] + session.add_measurements_in_template( + template_name, + measurements_aligned_path, + data_types, + encoding_list, + compressor_list, + is_aligned=False, + ) + measurements_aligned_path = ["s1", "s2", "s3"] + session.add_measurements_in_template( + template_name, + measurements_aligned_path, + data_types, + encoding_list, + compressor_list, + is_aligned=False, + ) + + assert session.count_measurements_in_template(template_name) == 10 + assert session.is_measurement_in_template(template_name, "GPS") is False + assert session.is_path_exist_in_template(template_name, "GPS.x") is True + assert session.is_path_exist_in_template(template_name, "x") is False + + session.drop_schema_template(template_name) + session.close() + + +def test_set_template(): + with IoTDBContainer("iotdb:dev") as db: + db: IoTDBContainer + session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) + session.open(False) + + template_name = "set_template_python" + template = Template(name=template_name, share_time=False) + session.create_schema_template(template) + + session.set_schema_template(template_name, "root.python.GPS") + + assert session.show_paths_template_set_on(template_name) == ["root.python.GPS"] + assert session.show_paths_template_using_on(template_name) == [] + + session.unset_schema_template(template_name, "root.python.GPS") + session.drop_schema_template(template_name) + session.close() diff --git a/client-py/tests/test_todf.py b/client-py/tests/test_todf.py index feedcb3228ac..07953446cffa 100644 --- a/client-py/tests/test_todf.py +++ b/client-py/tests/test_todf.py @@ -69,6 +69,7 @@ def test_simple_query(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.wt1") create_ts(session) @@ -105,6 +106,7 @@ def test_with_null_query(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.wt1") create_ts(session) @@ -184,6 +186,7 @@ def test_multi_fetch(): db: IoTDBContainer session = Session(db.get_container_host_ip(), db.get_exposed_port(6667)) session.open(False) + session.execute_non_query_statement("set storage group to root.wt1") create_ts(session) diff --git a/cluster/pom.xml b/cluster/pom.xml deleted file mode 100644 index 61f5b652fbb3..000000000000 --- a/cluster/pom.xml +++ /dev/null @@ -1,228 +0,0 @@ - - - - 4.0.0 - - org.apache.iotdb - iotdb-parent - 0.14.0-SNAPSHOT - ../pom.xml - - iotdb-cluster - cluster - - false - ${cluster.test.skip} - ${cluster.test.skip} - - - - - org.apache.thrift - libthrift - ${thrift.version} - - - org.apache.iotdb - service-rpc - ${project.version} - - - org.apache.thrift - libthrift - - - - - org.apache.iotdb - iotdb-server - ${project.version} - - - org.apache.thrift - libthrift - - - - - commons-io - commons-io - - - org.apache.iotdb - iotdb-thrift-cluster - ${project.version} - - - org.apache.thrift - libthrift - - - compile - - - org.apache.iotdb - iotdb-server - ${project.version} - test-jar - test - - - org.apache.iotdb - iotdb-session - ${project.version} - compile - - - org.apache.iotdb - iotdb-jdbc - ${project.version} - compile - - - commons-cli - commons-cli - - - org.awaitility - awaitility - ${awaitility.version} - test - - - - org.powermock - powermock-core - test - - - org.powermock - powermock-module-junit4 - test - - - org.powermock - powermock-api-mockito2 - test - - - org.apache.commons - commons-pool2 - - - - - skipClusterTests - - - skipTests - true - - - - true - true - true - - - - skipUT_Cluster_Tests - - - skipUTs - true - - - - true - - - - only_test_Cluster - - - cluster.test.only - true - - - - false - false - false - - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${cluster.ut.skip} - pertest - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - run-integration-tests - integration-test - - integration-test - verify - - - - - ${cluster.test.skip} - ${cluster.it.skip} - - - - org.apache.maven.plugins - maven-assembly-plugin - ${maven.assembly.version} - - - - cluster-assembly - package - - single - - - - src/assembly/cluster.xml - - false - - - - - - - diff --git a/cluster/src/assembly/cluster.xml b/cluster/src/assembly/cluster.xml deleted file mode 100644 index 7025a3ed882f..000000000000 --- a/cluster/src/assembly/cluster.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - cluster - - dir - zip - - false - - - lib - - - - - src/assembly/resources - ${file.separator} - - - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf - ${file.separator}conf - - - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/tools - ${file.separator}tools - - - ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf - conf - - - diff --git a/cluster/src/assembly/resources/conf/iotdb-cluster.properties b/cluster/src/assembly/resources/conf/iotdb-cluster.properties deleted file mode 100644 index 45de5102be67..000000000000 --- a/cluster/src/assembly/resources/conf/iotdb-cluster.properties +++ /dev/null @@ -1,200 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -#-------------------------------------------IMPORTANT---------------------------------------------# -# 1. Note that the system will automatically create a heartbeat port for each metadata service # -# and data service. The default metadata heartbeat port is internal_meta_port + 1, # -# The default data heartbeat port is internal_data_port + 1. # -# So when you configure these two items and seed_nodes, pay attention to reserve a port for # -# heartbeat service. # -# 2. If open_server_rpc_port is set to true, the server module's RPC port will be turned on, # -# and the server module's RPC port will be set to rpc_port (in iotdb-engines.properties) + 1, # -# so this port should also be reserved. # -#-------------------------------------------IMPORTANT---------------------------------------------# - -# used for communication between cluster nodes, eg heartbeat、raft logs and snapshots etc. -# if this parameter is commented, then the IP that binded by the hostname will be used. -internal_ip=127.0.0.1 - -# port for metadata service -internal_meta_port=9003 - -# port for data service -internal_data_port=40010 - -# port for cluster info API, 6567 by default -#cluster_info_public_port=6567 - -# whether open port for server module (for debug purpose) -# if true, the rpc_port of the single server will be changed to rpc_port (in iotdb-engines.properties) + 1 -# open_server_rpc_port=false - -# comma-separated {IP/DOMAIN}:internal_meta_port pairs, when used by start-datanode.sh(.bat), -# this configuration means the nodes that will form the initial cluster, -# every node that use start-datanode.sh(.bat) should have the same SEED_NODES, or the -# building of the initial cluster will fail. WARNING: if the initial cluster is built, this -# should not be changed before the environment is cleaned. -# when used by add-node.sh(.bat), this means the nodes to which that the application of joining -# the cluster will be sent, as all nodes can respond to a request, this configuration can be any -# nodes that already in the cluster, unnecessary to be the nodes that were used to build the -# initial cluster by start-datanode.sh(.bat). Several nodes will be picked randomly to send the -# request, the number of nodes picked depends on the number of retries. -#seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007 -seed_nodes=127.0.0.1:9003 - -# whether to use thrift compressed protocol for internal communications. If you want to change -# compression settings for external clients, please modify 'rpc_thrift_compression_enable' in -# 'iotdb-engine.properties'. -# WARNING: this must be consistent across all nodes in the cluster -# rpc_thrift_compression_enable=false - -# number of replications for one partition -default_replica_num=1 - -# sub raft num for multi-raft -multi_raft_factor=1 - -# cluster name to identify different clusters -# all node's cluster_name in one cluster are the same -# cluster_name=default - -# Thrift socket and connection timeout between raft nodes, in milliseconds. -# NOTE: the timeout of connection used for sending heartbeats and requesting votes -# will be adjusted to min(heartbeat_interval_ms, connection_timeout_ms). -# connection_timeout_ms=20000 - -# write operation timeout threshold (ms), this is only for internal communications, -# not for the whole operation. -# write_operation_timeout_ms=30000 - -# read operation timeout threshold (ms), this is only for internal communications, -# not for the whole operation. -# read_operation_timeout_ms=30000 - -# the time interval (ms) between two rounds of heartbeat broadcast of one raft group leader. -# Recommend to set it as 1/10 of election_timeout_ms, but larger than 1 RTT between each two nodes. -# heartbeat_interval_ms=1000 - -# The election timeout in follower, or the time waiting for requesting votes in elector, in milliseconds. -# election_timeout_ms=20000 - -# catch up timeout threshold (ms), this is used for a follower behind the leader too much, -# so the leader will send logs(snapshot) to the follower, -# NOTICE, it may cost minutes of time to send a snapshot, -# so this parameter should be larger than the snapshot cost time. -# catch_up_timeout_ms=300000 - -# whether to use batch append entries in log catch up -# use_batch_in_catch_up=true - -# the minimum number of committed logs in memory, after each log deletion, at most such number of logs -# will remain in memory. Increasing the number will reduce the chance to use snapshot in catch-ups, -# but will also increase the memory footprint -# min_num_of_logs_in_mem=1000 - -# maximum number of committed logs in memory, when reached, a log deletion will be triggered. -# Increasing the number will reduce the chance to use snapshot in catch-ups, but will also increase -# memory footprint -# max_num_of_logs_in_mem=2000 - -# Ratio of write memory allocated for raft log, 0.2 by default -# Increasing the number will reduce the memory allocated for write process in iotdb, but will also -# increase the memory footprint for raft log, which reduces the chance to use snapshot in catch-ups -# raft_log_memory_proportion=0.2 - -# deletion check period of the submitted log -# log_deletion_check_interval_second=-1 - -# Whether creating schema automatically is enabled, this will replace the one in iotdb-engine.properties -# enable_auto_create_schema=true - -# consistency level, now three consistency levels are supported: strong, mid, and weak. -# Strong consistency means the server will first try to synchronize with the leader to get the -# newest data, if failed(timeout), directly report an error to the user; -# While mid consistency means the server will first try to synchronize with the leader, -# but if failed(timeout), it will give up and just use current data it has cached before; -# Weak consistency does not synchronize with the leader and simply use the local data -# consistency_level=mid - -# Whether to use asynchronous server -# is_use_async_server=false - -# Whether to use asynchronous applier -# is_use_async_applier=true - -# is raft log persistence enabled -# is_enable_raft_log_persistence=true - -# When a certain amount of raft log is reached, it will be flushed to disk -# It is possible to lose at most flush_raft_log_threshold operations -# flush_raft_log_threshold=10000 - -# Size of log buffer in each RaftMember's LogManager(in byte). -# raft_log_buffer_size=16777216 - -# The maximum value of the raft log index stored in the memory per raft group, -# These indexes are used to index the location of the log on the disk -# max_raft_log_index_size_in_memory=10000 - -# If leader finds too many uncommitted raft logs, raft group leader will wait for a short period of -# time, and then append the raft log -# uncommitted_raft_log_num_for_reject_threshold=500 - -# If followers find too many committed raft logs have not been applied, followers will reject the raft -# log sent by leader -# unapplied_raft_log_num_for_reject_threshold=500 - -# The maximum size of the raft log saved on disk for each file (in bytes) of each raft group. -# The default size is 1GB -# max_raft_log_persist_data_size_per_file=1073741824 - -# The maximum number of persistent raft log files on disk per raft group, -# So each raft group's log takes up disk space approximately equals -# max_raft_log_persist_data_size_per_file*max_number_of_persist_raft_log_files -# max_number_of_persist_raft_log_files=5 - -# The maximum number of logs saved on the disk -# max_persist_raft_log_number_on_disk=1000000 - -# whether enable use persist log on disk to catch up when no logs found in memory, if set false, -# will use snapshot to catch up when no logs found in memory. -# enable_use_persist_log_on_disk_to_catch_up=false - -# The number of logs read on the disk at one time, which is mainly used to control the memory usage. -# This value multiplied by the log size is about the amount of memory used to read logs from the disk at one time. -# max_number_of_logs_per_fetch_on_disk=1000 - -# When consistency level is set to mid, query will fail if the log lag exceeds max_read_log_lag -# This default value is 1000 -# max_read_log_lag=1000 - -# When a follower tries to sync log with the leader, sync will fail if the log Lag exceeds max_sync_log_lag. -# This default value is 100000 -# max_sync_log_lag=100000 - -# Max number of clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool blocks the thread that obtains the client for waitClientTimeoutMS. -# max_client_pernode_permember_number=1000 - -# Max number of idle clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool destroys the client when it returns. -# max_idle_client_pernode_permember_number=500 - -# If the number of connections created for a node exceeds `max_client_pernode_permember_number`, -# we need to wait so much time for other connections to be released until timeout, -# or a new connection will be created. -# wait_client_timeout_ms=5000 diff --git a/cluster/src/assembly/resources/sbin/add-node.bat b/cluster/src/assembly/resources/sbin/add-node.bat deleted file mode 100755 index d272cf90b9cf..000000000000 --- a/cluster/src/assembly/resources/sbin/add-node.bat +++ /dev/null @@ -1,133 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off -echo ```````````````````````` -echo Starting IoTDB (Cluster Mode) -echo ```````````````````````` - -PATH %PATH%;%JAVA_HOME%\bin\ -set "FULL_VERSION=" -set "MAJOR_VERSION=" -set "MINOR_VERSION=" - - -for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do ( - set "FULL_VERSION=%%j-%%k-%%l-%%m" - IF "%%j" == "1" ( - set "MAJOR_VERSION=%%k" - set "MINOR_VERSION=%%l" - ) else ( - set "MAJOR_VERSION=%%j" - set "MINOR_VERSION=%%k" - ) -) - -set JAVA_VERSION=%MAJOR_VERSION% - -IF NOT %JAVA_VERSION% == 8 ( - IF NOT %JAVA_VERSION% == 11 ( - echo IoTDB only supports jdk8 or jdk11, please check your java version. - goto finally - ) -) - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd% -popd - -SET enable_printgc=false -IF "%1" == "printgc" ( - SET enable_printgc=true - SHIFT -) - -SET IOTDB_CONF=%1 -IF "%IOTDB_CONF%" == "" ( - SET IOTDB_CONF=%IOTDB_HOME%\conf -) ELSE ( - SET IOTDB_CONF="%IOTDB_CONF%" -) - -SET IOTDB_LOGS=%IOTDB_HOME%\logs - -IF EXIST "%IOTDB_CONF%\iotdb-env.bat" ( - IF "%enable_printgc%" == "true" ( - CALL "%IOTDB_CONF%\iotdb-env.bat" printgc - ) ELSE ( - CALL "%IOTDB_CONF%\iotdb-env.bat" - ) -) ELSE IF EXIST "%IOTDB_HOME%/conf/iotdb-env.bat" ( - IF "%enable_printgc%" == "true" ( - CALL "%IOTDB_HOME%/conf/iotdb-env.bat" printgc - ) ELSE ( - CALL "%IOTDB_HOME%/conf/iotdb-env.bat" - ) -) ELSE ( - echo "can't find iotdb-env.bat" -) - -@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS -set CONF_PARAMS=-a -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^ - -DIOTDB_HOME="%IOTDB_HOME%"^ - -DTSFILE_HOME="%IOTDB_HOME%"^ - -DTSFILE_CONF="%IOTDB_CONF%"^ - -DIOTDB_CONF="%IOTDB_CONF%" - -@REM ***** CLASSPATH library setting ***** -@REM Ensure that any user defined CLASSPATH variables are not used on startup -set CLASSPATH="%IOTDB_HOME%\lib" - -@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable. -set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*" -set CLASSPATH=%CLASSPATH%;iotdb.IoTDB -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 -goto :eof - -@REM ----------------------------------------------------------------------------- -:okClasspath - -rem echo CLASSPATH: %CLASSPATH% - -"%JAVA_HOME%\bin\java" %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS% -goto finally - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -pause - -ENDLOCAL \ No newline at end of file diff --git a/cluster/src/assembly/resources/sbin/add-node.sh b/cluster/src/assembly/resources/sbin/add-node.sh deleted file mode 100755 index 321299902b7c..000000000000 --- a/cluster/src/assembly/resources/sbin/add-node.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -echo --------------------- -echo "Starting IoTDB (Cluster Mode)" -echo --------------------- - -if [ -z "${IOTDB_HOME}" ]; then - export IOTDB_HOME="`dirname "$0"`/.." -fi - -enable_printgc=false -if [ "$#" -ge "1" -a "$1" == "printgc" ]; then - enable_printgc=true; - shift -fi - -IOTDB_CONF=$1 -if [ -z "${IOTDB_CONF}" ]; then - export IOTDB_CONF=${IOTDB_HOME}/conf -fi - -if [ -f "$IOTDB_CONF/iotdb-env.sh" ]; then - if [ $enable_printgc == "true" ]; then - . "$IOTDB_CONF/iotdb-env.sh" "printgc" - else - . "$IOTDB_CONF/iotdb-env.sh" - fi -elif [ -f "${IOTDB_HOME}/conf/iotdb-env.sh" ]; then - if [ $enable_printgc == "true" ]; then - . "${IOTDB_HOME}/conf/iotdb-env.sh" "printgc" - else - . "${IOTDB_HOME}/conf/iotdb-env.sh" - fi -else - echo "can't find $IOTDB_CONF/iotdb-env.sh" -fi - - -if [ -n "$JAVA_HOME" ]; then - for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do - if [ -x "$java" ]; then - JAVA="$java" - break - fi - done -else - JAVA=java -fi - -if [ -z $JAVA ] ; then - echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr - exit 1; -fi - -CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done -classname=org.apache.iotdb.cluster.ClusterIoTDB - -launch_service() -{ - class="$1" - iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml" - iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB" - exec "$JAVA" $illegal_access_params $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" -a - return $? -} - -# Start up the service -launch_service "$classname" - -exit $? diff --git a/cluster/src/assembly/resources/sbin/nodetool.bat b/cluster/src/assembly/resources/sbin/nodetool.bat deleted file mode 100755 index 7dd9111dbc8c..000000000000 --- a/cluster/src/assembly/resources/sbin/nodetool.bat +++ /dev/null @@ -1,58 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%CD% -popd - -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.utils.nodetool.NodeTool -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -DIOTDB_HOME=%IOTDB_HOME% - -REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable. -for %%i in ("%IOTDB_HOME%\lib\*.jar") do call :append "%%i" -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 -goto :eof - -REM ----------------------------------------------------------------------------- -:okClasspath - -"%JAVA_HOME%\bin\java" %JAVA_OPTS% -cp "%CLASSPATH%" %MAIN_CLASS% %* - -goto finally - - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -ENDLOCAL \ No newline at end of file diff --git a/cluster/src/assembly/resources/sbin/nodetool.sh b/cluster/src/assembly/resources/sbin/nodetool.sh deleted file mode 100755 index ab005a722a00..000000000000 --- a/cluster/src/assembly/resources/sbin/nodetool.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/sh -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -if [ -z "${IOTDB_HOME}" ]; then - export IOTDB_HOME="$(cd "`dirname "$0"`"/..; pwd)" -fi - - -MAIN_CLASS=org.apache.iotdb.cluster.utils.nodetool.NodeTool - - -CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done - - -if [ -n "$JAVA_HOME" ]; then - for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do - if [ -x "$java" ]; then - JAVA="$java" - break - fi - done -else - JAVA=java -fi - -exec "$JAVA" -cp "$CLASSPATH" "$MAIN_CLASS" "$@" - -exit $? diff --git a/cluster/src/assembly/resources/sbin/remove-node.bat b/cluster/src/assembly/resources/sbin/remove-node.bat deleted file mode 100755 index fc3202b711ce..000000000000 --- a/cluster/src/assembly/resources/sbin/remove-node.bat +++ /dev/null @@ -1,110 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off -echo ```````````````````````` -echo Starting to remove a node (Cluster Mode) -echo ```````````````````````` - -PATH %PATH%;%JAVA_HOME%\bin\ -set "FULL_VERSION=" -set "MAJOR_VERSION=" -set "MINOR_VERSION=" - - -for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do ( - set "FULL_VERSION=%%j-%%k-%%l-%%m" - IF "%%j" == "1" ( - set "MAJOR_VERSION=%%k" - set "MINOR_VERSION=%%l" - ) else ( - set "MAJOR_VERSION=%%j" - set "MINOR_VERSION=%%k" - ) -) - -set JAVA_VERSION=%MAJOR_VERSION% - -IF NOT %JAVA_VERSION% == 8 ( - IF NOT %JAVA_VERSION% == 11 ( - echo IoTDB only supports jdk8 or jdk11, please check your java version. - goto finally - ) -) - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd% -popd - -set IOTDB_CONF=%IOTDB_HOME%\conf -set IOTDB_LOGS=%IOTDB_HOME%\logs - -@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS -set CONF_PARAMS=-r -set is_conf_path=false -for %%i in (%*) do ( - set CONF_PARAMS=!CONF_PARAMS! %%i -) - -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^ - -DIOTDB_HOME="%IOTDB_HOME%"^ - -DTSFILE_HOME="%IOTDB_HOME%"^ - -DCLUSTER_CONF="%IOTDB_CONF%"^ - -DIOTDB_CONF="%IOTDB_CONF%" - -@REM ***** CLASSPATH library setting ***** -@REM Ensure that any user defined CLASSPATH variables are not used on startup -set CLASSPATH="%IOTDB_HOME%\lib" - -@REM For each jar in the IOTDB_HOME lib directory call append to build the CLASSPATH variable. -set CLASSPATH=%CLASSPATH%;"%IOTDB_HOME%\lib\*" -set CLASSPATH=%CLASSPATH%;iotdb.ClusterIoTDB -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 -goto :eof - -@REM ----------------------------------------------------------------------------- -:okClasspath - -rem echo CLASSPATH: %CLASSPATH% - -"%JAVA_HOME%\bin\java" %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS% -goto finally - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -pause - -ENDLOCAL \ No newline at end of file diff --git a/cluster/src/assembly/resources/sbin/remove-node.sh b/cluster/src/assembly/resources/sbin/remove-node.sh deleted file mode 100755 index 0cafad153564..000000000000 --- a/cluster/src/assembly/resources/sbin/remove-node.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -echo --------------------- -echo "Starting to remove a node(Cluster Mode)" -echo --------------------- - -if [ -z "${IOTDB_HOME}" ]; then - export IOTDB_HOME="`dirname "$0"`/.." -fi - -IOTDB_CONF=${IOTDB_HOME}/conf - -CONF_PARAMS="-r "$* - -if [ -n "$JAVA_HOME" ]; then - for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do - if [ -x "$java" ]; then - JAVA="$java" - break - fi - done -else - JAVA=java -fi - -if [ -z $JAVA ] ; then - echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr - exit 1; -fi - -CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done -classname=org.apache.iotdb.cluster.ClusterIoTDB - -launch_service() -{ - class="$1" - iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml" - iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -DCLUSTER_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB" - exec "$JAVA" $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $CONF_PARAMS - return $? -} - -# Start up the service -launch_service "$classname" - -exit $? diff --git a/cluster/src/assembly/resources/sbin/start-node.bat b/cluster/src/assembly/resources/sbin/start-node.bat deleted file mode 100755 index a16e79b016a4..000000000000 --- a/cluster/src/assembly/resources/sbin/start-node.bat +++ /dev/null @@ -1,136 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off -echo ```````````````````````` -echo Starting IoTDB (Cluster Mode) -echo ```````````````````````` - -PATH %PATH%;%JAVA_HOME%\bin\ -set "FULL_VERSION=" -set "MAJOR_VERSION=" -set "MINOR_VERSION=" - - -for /f tokens^=2-5^ delims^=.-_+^" %%j in ('java -fullversion 2^>^&1') do ( - set "FULL_VERSION=%%j-%%k-%%l-%%m" - IF "%%j" == "1" ( - set "MAJOR_VERSION=%%k" - set "MINOR_VERSION=%%l" - ) else ( - set "MAJOR_VERSION=%%j" - set "MINOR_VERSION=%%k" - ) -) - -set JAVA_VERSION=%MAJOR_VERSION% - -@REM we do not check jdk that version less than 1.8 because they are too stale... -IF "%JAVA_VERSION%" == "6" ( - echo IoTDB only supports jdk >= 8, please check your java version. - goto finally -) -IF "%JAVA_VERSION%" == "7" ( - echo IoTDB only supports jdk >= 8, please check your java version. - goto finally -) - - -if "%OS%" == "Windows_NT" setlocal - -pushd %~dp0.. -if NOT DEFINED IOTDB_HOME set IOTDB_HOME=%cd% -popd - -SET enable_printgc=false -IF "%1" == "printgc" ( - SET enable_printgc=true - SHIFT -) - -SET IOTDB_CONF=%1 -IF "%IOTDB_CONF%" == "" ( - SET IOTDB_CONF=%IOTDB_HOME%\conf -) ELSE ( - SET IOTDB_CONF="%IOTDB_CONF%" -) - -SET IOTDB_LOGS=%IOTDB_HOME%\logs - -IF EXIST "%IOTDB_CONF%\iotdb-env.bat" ( - IF "%enable_printgc%" == "true" ( - CALL "%IOTDB_CONF%\iotdb-env.bat" printgc - ) ELSE ( - CALL "%IOTDB_CONF%\iotdb-env.bat" - ) -) ELSE IF EXIST "%IOTDB_HOME%/conf/iotdb-env.bat" ( - IF "%enable_printgc%" == "true" ( - CALL "%IOTDB_HOME%/conf/iotdb-env.bat" printgc - ) ELSE ( - CALL "%IOTDB_HOME%/conf/iotdb-env.bat" - ) -) ELSE ( - echo "can't find iotdb-env.bat" -) - -@setlocal ENABLEDELAYEDEXPANSION ENABLEEXTENSIONS -set CONF_PARAMS=-s -if NOT DEFINED MAIN_CLASS set MAIN_CLASS=org.apache.iotdb.cluster.ClusterIoTDB -if NOT DEFINED JAVA_HOME goto :err - -@REM ----------------------------------------------------------------------------- -@REM JVM Opts we'll use in legacy run or installation -set JAVA_OPTS=-ea^ - -Dlogback.configurationFile="%IOTDB_CONF%\logback.xml"^ - -DIOTDB_HOME="%IOTDB_HOME%"^ - -DTSFILE_HOME="%IOTDB_HOME%"^ - -DTSFILE_CONF="%IOTDB_CONF%"^ - -DIOTDB_CONF="%IOTDB_CONF%" - -@REM ***** CLASSPATH library setting ***** -@REM Ensure that any user defined CLASSPATH variables are not used on startup -set CLASSPATH="%IOTDB_HOME%\lib\*" - -@REM this special suffix 'iotdb.ClusterIoTDB' is mandatory as stop-node.bat uses it to filter the process id. -set CLASSPATH=%CLASSPATH%;iotdb.ClusterIoTDB -goto okClasspath - -:append -set CLASSPATH=%CLASSPATH%;%1 -goto :eof - -@REM ----------------------------------------------------------------------------- -:okClasspath - -rem echo CLASSPATH: %CLASSPATH% - -"%JAVA_HOME%\bin\java" %ILLEGAL_ACCESS_PARAMS% %JAVA_OPTS% %IOTDB_HEAP_OPTS% -cp %CLASSPATH% %IOTDB_JMX_OPTS% %MAIN_CLASS% %CONF_PARAMS% -goto finally - -:err -echo JAVA_HOME environment variable must be set! -pause - - -@REM ----------------------------------------------------------------------------- -:finally - -pause - -ENDLOCAL diff --git a/cluster/src/assembly/resources/sbin/start-node.sh b/cluster/src/assembly/resources/sbin/start-node.sh deleted file mode 100755 index 31a84f58a135..000000000000 --- a/cluster/src/assembly/resources/sbin/start-node.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -echo --------------------- -echo "Starting IoTDB (Cluster Mode)" -echo --------------------- - -if [ -z "${IOTDB_HOME}" ]; then - export IOTDB_HOME="`dirname "$0"`/.." -fi - -enable_printgc=false -if [ "$#" -ge "1" -a "$1" == "printgc" ]; then - enable_printgc=true; - shift -fi - -IOTDB_CONF=$1 -if [ -z "${IOTDB_CONF}" ]; then - export IOTDB_CONF=${IOTDB_HOME}/conf -fi - -if [ -f "$IOTDB_CONF/iotdb-env.sh" ]; then - if [ $enable_printgc == "true" ]; then - . "$IOTDB_CONF/iotdb-env.sh" "printgc" - else - . "$IOTDB_CONF/iotdb-env.sh" - fi -elif [ -f "${IOTDB_HOME}/conf/iotdb-env.sh" ]; then - if [ $enable_printgc == "true" ]; then - . "${IOTDB_HOME}/conf/iotdb-env.sh" "printgc" - else - . "${IOTDB_HOME}/conf/iotdb-env.sh" - fi -else - echo "can't find $IOTDB_CONF/iotdb-env.sh" -fi - -CONF_PARAMS="-s "$* - -if [ -n "$JAVA_HOME" ]; then - for java in "$JAVA_HOME"/bin/amd64/java "$JAVA_HOME"/bin/java; do - if [ -x "$java" ]; then - JAVA="$java" - break - fi - done -else - JAVA=java -fi - -if [ -z $JAVA ] ; then - echo Unable to find java executable. Check JAVA_HOME and PATH environment variables. > /dev/stderr - exit 1; -fi - -CLASSPATH="" -for f in ${IOTDB_HOME}/lib/*.jar; do - CLASSPATH=${CLASSPATH}":"$f -done -classname=org.apache.iotdb.cluster.ClusterIoTDB - -launch_service() -{ - class="$1" - iotdb_parms="-Dlogback.configurationFile=${IOTDB_CONF}/logback.xml" - iotdb_parms="$iotdb_parms -DIOTDB_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DTSFILE_HOME=${IOTDB_HOME}" - iotdb_parms="$iotdb_parms -DIOTDB_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -DTSFILE_CONF=${IOTDB_CONF}" - iotdb_parms="$iotdb_parms -Dname=iotdb\.IoTDB" - exec "$JAVA" $illegal_access_params $iotdb_parms $IOTDB_JMX_OPTS -cp "$CLASSPATH" "$class" $CONF_PARAMS - return $? -} - -# Start up the service -launch_service "$classname" - -exit $? diff --git a/cluster/src/assembly/resources/sbin/stop-node.bat b/cluster/src/assembly/resources/sbin/stop-node.bat deleted file mode 100644 index 29c54c9e944c..000000000000 --- a/cluster/src/assembly/resources/sbin/stop-node.bat +++ /dev/null @@ -1,27 +0,0 @@ -@REM -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM - -@echo off - -pushd.. -set exec_dir=%cd% -popd -set exec_dir=%exec_dir:\=\\% -wmic process where (commandline like "%%iotdb.ClusterIoTDB%%" and not name="wmic.exe" and commandline like "%%%exec_dir%%%") delete - diff --git a/cluster/src/assembly/resources/sbin/stop-node.sh b/cluster/src/assembly/resources/sbin/stop-node.sh deleted file mode 100644 index 4d670c495432..000000000000 --- a/cluster/src/assembly/resources/sbin/stop-node.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -PIDS=$(ps ax | grep -i 'ClusterIoTDB' | grep java | grep -v grep | awk '{print $1}') -sig=0 -for every_pid in ${PIDS} -do - cwd_path=$(ls -l /proc/$every_pid | grep "cwd ->" | grep -v grep | awk '{print $NF}') - pwd_path=$(/bin/pwd) - if [[ $pwd_path =~ $cwd_path ]]; then - kill -s TERM $every_pid - echo "close IoTDB" - sig=1 - fi -done - -if [ $sig -eq 0 ]; then - echo "No IoTDB server to stop" - exit 1 -fi - diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java deleted file mode 100644 index a8003e2a4c50..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/ClientMain.java +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster; - -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.jdbc.Config; -import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.rpc.RpcTransportFactory; -import org.apache.iotdb.rpc.StatementExecutionException; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.service.rpc.thrift.TSCloseOperationReq; -import org.apache.iotdb.service.rpc.thrift.TSCloseSessionReq; -import org.apache.iotdb.service.rpc.thrift.TSCreateTimeseriesReq; -import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementReq; -import org.apache.iotdb.service.rpc.thrift.TSExecuteStatementResp; -import org.apache.iotdb.service.rpc.thrift.TSIService; -import org.apache.iotdb.service.rpc.thrift.TSIService.Client; -import org.apache.iotdb.service.rpc.thrift.TSIService.Client.Factory; -import org.apache.iotdb.service.rpc.thrift.TSInsertStringRecordReq; -import org.apache.iotdb.service.rpc.thrift.TSOpenSessionReq; -import org.apache.iotdb.service.rpc.thrift.TSOpenSessionResp; -import org.apache.iotdb.service.rpc.thrift.TSProtocolVersion; -import org.apache.iotdb.session.SessionDataSet; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.DefaultParser; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@SuppressWarnings("java:S106") -public class ClientMain { - - private static final Logger logger = LoggerFactory.getLogger(ClientMain.class); - - private static final String PARAM_INSERTION = "i"; - private static final String PARAM_QUERY = "q"; - private static final String PARAM_DELETE_STORAGE_GROUP = "dsg"; - private static final String PARAM_DELETE_SERIES = "ds"; - private static final String PARAM_QUERY_PORTS = "qp"; - private static final String PARAM_INSERT_PORT = "ip"; - private static final String PARAM_BATCH = "b"; - private static Options options = new Options(); - - private static String ip = "127.0.0.1"; - private static int port = 6667; - - static { - options.addOption(new Option(PARAM_INSERTION, "Perform insertion")); - options.addOption(new Option(PARAM_QUERY, "Perform query")); - options.addOption(new Option(PARAM_DELETE_SERIES, "Perform deleting timeseries")); - options.addOption(new Option(PARAM_DELETE_STORAGE_GROUP, "Perform deleting storage group")); - options.addOption( - new Option(PARAM_QUERY_PORTS, true, "Ports to query (ip is currently " + "localhost)")); - options.addOption(new Option(PARAM_INSERT_PORT, true, "Port to perform insertion")); - options.addOption(new Option(PARAM_BATCH, "Test batch statement")); - } - - private static Map failedQueries; - - private static final String[] STORAGE_GROUPS = - new String[] { - "root.beijing", "root.shanghai", "root.guangzhou", "root.shenzhen", - }; - - private static final String[] DEVICES = - new String[] { - "root.beijing.d1", "root.shanghai.d1", "root.guangzhou.d1", "root.shenzhen.d1", - }; - - private static final String[] MEASUREMENTS = new String[] {"s1"}; - - private static final TSDataType[] DATA_TYPES = new TSDataType[] {TSDataType.DOUBLE}; - - private static List schemas; - - private static final String[] DATA_QUERIES = - new String[] { - // raw data multi series - "SELECT * FROM root", - "SELECT * FROM root WHERE time <= 691200000", - "SELECT * FROM root WHERE time >= 391200000 and time <= 691200000", - "SELECT * FROM root.*.* WHERE s1 <= 0.7", - // raw data single series - "SELECT s1 FROM root.beijing.d1", - "SELECT s1 FROM root.shanghai.d1", - "SELECT s1 FROM root.guangzhou.d1", - "SELECT s1 FROM root.shenzhen.d1", - // aggregation - "SELECT count(s1) FROM root.*.*", - "SELECT avg(s1) FROM root.*.*", - "SELECT sum(s1) FROM root.*.*", - "SELECT max_value(s1) FROM root.*.*", - "SELECT count(s1) FROM root.*.* where time <= 691200000", - "SELECT count(s1) FROM root.*.* where s1 <= 0.7", - // group by device - "SELECT * FROM root GROUP BY DEVICE", - // fill - "SELECT s1 FROM root.beijing.d1 WHERE time = 86400000 FILL (DOUBLE[PREVIOUS,1d])", - "SELECT s1 FROM root.shanghai.d1 WHERE time = 86400000 FILL (DOUBLE[LINEAR,1d,1d])", - "SELECT s1 FROM root.guangzhou.d1 WHERE time = 126400000 FILL (DOUBLE[PREVIOUS,1d])", - "SELECT s1 FROM root.shenzhen.d1 WHERE time = 126400000 FILL (DOUBLE[LINEAR,1d,1d])", - // group by - "SELECT COUNT(*) FROM root.*.* GROUP BY ([0, 864000000), 3d, 3d)", - "SELECT AVG(*) FROM root.*.* WHERE s1 <= 0.7 GROUP BY ([0, 864000000), 3d, 3d)", - // last - "SELECT LAST s1 FROM root.*.*", - }; - - private static final String[] META_QUERY = - new String[] { - "SHOW STORAGE GROUP", - "SHOW TIMESERIES root", - "COUNT TIMESERIES root", - "COUNT TIMESERIES root GROUP BY LEVEL=2", - "SHOW DEVICES", - "SHOW TIMESERIES root limit 1 offset 1", - }; - - public static void main(String[] args) - throws TException, StatementExecutionException, IoTDBConnectionException, ParseException, - SQLException, ClassNotFoundException { - CommandLineParser parser = new DefaultParser(); - CommandLine commandLine = parser.parse(options, args); - boolean noOption = args.length == 0; - - failedQueries = new HashMap<>(); - prepareSchema(); - - if (commandLine.hasOption(PARAM_INSERT_PORT)) { - port = Integer.parseInt(commandLine.getOptionValue(PARAM_INSERT_PORT)); - } - - doInsertion(noOption, commandLine); - - doQuery(noOption, commandLine); - - doDeleteSeries(noOption, commandLine); - - doDeleteSG(noOption, commandLine); - - doBatchStmt(noOption, commandLine); - } - - private static void doInsertion(boolean noOption, CommandLine commandLine) throws TException { - if (noOption || commandLine.hasOption(PARAM_INSERTION)) { - System.out.println("Test insertion"); - Client client = getClient(ip, port); - long sessionId = connectClient(client); - testInsertion(client, sessionId); - client.closeSession(new TSCloseSessionReq(sessionId)); - } - } - - private static void doQuery(boolean noOption, CommandLine commandLine) - throws StatementExecutionException, TException, IoTDBConnectionException { - if (noOption || commandLine.hasOption(PARAM_QUERY)) { - int[] queryPorts = null; - if (commandLine.hasOption(PARAM_QUERY_PORTS)) { - queryPorts = parseIntArray(commandLine.getOptionValue(PARAM_QUERY_PORTS)); - } - if (queryPorts == null) { - queryPorts = new int[] {port, port + 1, port + 2}; - } - for (int queryPort : queryPorts) { - System.out.println("Test port: " + queryPort); - - Client client = getClient(ip, queryPort); - long sessionId = connectClient(client); - System.out.println("Test data queries"); - testQuery(client, sessionId, DATA_QUERIES); - - System.out.println("Test metadata queries"); - testQuery(client, sessionId, META_QUERY); - - logger.info("Failed queries: {}", failedQueries); - client.closeSession(new TSCloseSessionReq(sessionId)); - } - } - } - - private static void doDeleteSeries(boolean noOption, CommandLine commandLine) throws TException { - if (noOption || commandLine.hasOption(PARAM_DELETE_SERIES)) { - System.out.println("Test delete timeseries"); - Client client = getClient(ip, port); - long sessionId = connectClient(client); - testDeleteTimeseries(client, sessionId); - client.closeSession(new TSCloseSessionReq(sessionId)); - } - } - - private static void doDeleteSG(boolean noOption, CommandLine commandLine) - throws StatementExecutionException, TException, IoTDBConnectionException { - if (noOption || commandLine.hasOption(PARAM_DELETE_STORAGE_GROUP)) { - System.out.println("Test delete storage group"); - Client client = getClient(ip, port); - long sessionId = connectClient(client); - testDeleteStorageGroup(client, sessionId); - client.closeSession(new TSCloseSessionReq(sessionId)); - } - } - - private static void doBatchStmt(boolean noOption, CommandLine commandLine) - throws SQLException, ClassNotFoundException { - if (noOption || commandLine.hasOption(PARAM_BATCH)) { - System.out.println("Test batch create sgs"); - testBatch(ip, port); - } - } - - private static int[] parseIntArray(String str) { - if (str == null) { - return new int[0]; - } - String[] split = str.split(","); - int[] ret = new int[split.length]; - for (int i = 0; i < split.length; i++) { - ret[i] = Integer.parseInt(split[i]); - } - return ret; - } - - private static long connectClient(Client client) throws TException { - TSOpenSessionReq openReq = - new TSOpenSessionReq( - TSProtocolVersion.IOTDB_SERVICE_PROTOCOL_V3, ZoneId.systemDefault().getId()); - openReq.setUsername("root"); - openReq.setPassword("root"); - TSOpenSessionResp openResp = client.openSession(openReq); - return openResp.getSessionId(); - } - - @SuppressWarnings({"java:S2095"}) // the transport is used later - private static Client getClient(String ip, int port) throws TTransportException { - TSIService.Client.Factory factory = new Factory(); - TTransport transport = RpcTransportFactory.INSTANCE.getTransportWithNoTimeout(ip, port); - transport.open(); - TProtocol protocol = - IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable() - ? new TCompactProtocol(transport) - : new TBinaryProtocol(transport); - return factory.getClient(protocol); - } - - private static void prepareSchema() { - schemas = new ArrayList<>(); - for (String device : DEVICES) { - for (int i = 0; i < MEASUREMENTS.length; i++) { - String measurement = MEASUREMENTS[i]; - schemas.add( - new MeasurementSchema( - device + IoTDBConstant.PATH_SEPARATOR + measurement, DATA_TYPES[i])); - } - } - } - - private static void testQuery(Client client, long sessionId, String[] queries) - throws TException, StatementExecutionException, IoTDBConnectionException { - long statementId = client.requestStatementId(sessionId); - for (String dataQuery : queries) { - executeQuery(client, sessionId, dataQuery, statementId); - } - - TSCloseOperationReq tsCloseOperationReq = new TSCloseOperationReq(sessionId); - tsCloseOperationReq.setStatementId(statementId); - client.closeOperation(tsCloseOperationReq); - } - - private static void executeQuery(Client client, long sessionId, String query, long statementId) - throws TException, StatementExecutionException, IoTDBConnectionException { - if (logger.isInfoEnabled()) { - logger.info("{ {} }", query); - } - TSExecuteStatementResp resp = - client.executeQueryStatement( - new TSExecuteStatementReq(sessionId, query, statementId).setFetchSize(1000)); - if (resp.status.code != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - failedQueries.put(query, resp.status); - return; - } - - long queryId = resp.getQueryId(); - if (logger.isInfoEnabled()) { - logger.info(resp.columns.toString()); - } - - SessionDataSet dataSet = - new SessionDataSet( - query, - resp.getColumns(), - resp.getDataTypeList(), - resp.columnNameIndexMap, - queryId, - statementId, - client, - sessionId, - resp.queryDataSet, - false); - - while (dataSet.hasNext()) { - if (logger.isInfoEnabled()) { - logger.info(dataSet.next().toString()); - } - } - System.out.println(); - - TSCloseOperationReq tsCloseOperationReq = new TSCloseOperationReq(sessionId); - tsCloseOperationReq.setQueryId(queryId); - client.closeOperation(tsCloseOperationReq); - } - - private static void testDeleteStorageGroup(Client client, long sessionId) - throws TException, StatementExecutionException, IoTDBConnectionException { - if (logger.isInfoEnabled()) { - logger.info(client.deleteStorageGroups(sessionId, Arrays.asList(STORAGE_GROUPS)).toString()); - } - - testQuery(client, sessionId, new String[] {"SELECT * FROM root"}); - } - - private static void registerTimeseries(long sessionId, Client client) throws TException { - TSCreateTimeseriesReq req = new TSCreateTimeseriesReq(); - req.setSessionId(sessionId); - for (IMeasurementSchema schema : schemas) { - req.setDataType(schema.getType().ordinal()); - req.setEncoding(schema.getEncodingType().ordinal()); - req.setCompressor(schema.getCompressor().ordinal()); - req.setPath(schema.getMeasurementId()); - if (logger.isInfoEnabled()) { - logger.info(client.createTimeseries(req).toString()); - } - } - } - - @SuppressWarnings("ConstantConditions") - private static void testInsertion(Client client, long sessionId) throws TException { - for (String storageGroup : STORAGE_GROUPS) { - if (logger.isInfoEnabled()) { - logger.info(client.setStorageGroup(sessionId, storageGroup).toString()); - } - } - - registerTimeseries(sessionId, client); - - TSInsertStringRecordReq insertReq = new TSInsertStringRecordReq(); - insertReq.setMeasurements(Arrays.asList(MEASUREMENTS)); - insertReq.setSessionId(sessionId); - - for (int i = 0; i < 10; i++) { - List values = new ArrayList<>(MEASUREMENTS.length); - insertReq.setTimestamp(i * 24 * 3600 * 1000L); - for (int i1 = 0; i1 < MEASUREMENTS.length; i1++) { - switch (DATA_TYPES[i1]) { - case DOUBLE: - values.add(Double.toString(i * 0.1)); - break; - case BOOLEAN: - values.add(Boolean.toString(i % 2 == 0)); - break; - case INT64: - values.add(Long.toString(i)); - break; - case INT32: - values.add(Integer.toString(i)); - break; - case FLOAT: - values.add(Float.toString(i * 0.1f)); - break; - case TEXT: - values.add("S" + i); - break; - } - } - - insertReq.setValues(values); - - for (String device : DEVICES) { - insertReq.setPrefixPath(device); - if (logger.isInfoEnabled()) { - logger.info(insertReq.toString()); - logger.info(client.insertStringRecord(insertReq).toString()); - } - } - } - } - - private static void testDeleteTimeseries(Client client, long sessionId) throws TException { - List paths = new ArrayList<>(); - for (String measurement : MEASUREMENTS) { - for (String device : DEVICES) { - paths.add(device + "." + measurement); - } - } - if (logger.isInfoEnabled()) { - logger.info(client.deleteTimeseries(sessionId, paths).toString()); - } - } - - private static void testBatch(String ip, int port) throws ClassNotFoundException, SQLException { - Class.forName(Config.JDBC_DRIVER_NAME); - try (Connection connection = - DriverManager.getConnection( - Config.IOTDB_URL_PREFIX + String.format("%s:%d/", ip, port), "root", "root"); - Statement statement = connection.createStatement()) { - - statement.addBatch("SET STORAGE GROUP TO root.batch1"); - statement.addBatch("SET STORAGE GROUP TO root.batch2"); - statement.addBatch("SET STORAGE GROUP TO root.batch3"); - statement.addBatch("SET STORAGE GROUP TO root.batch4"); - - statement.executeBatch(); - statement.clearBatch(); - - try (ResultSet set = statement.executeQuery("SHOW STORAGE GROUP")) { - int colNum = set.getMetaData().getColumnCount(); - while (set.next()) { - StringBuilder stringBuilder = new StringBuilder(); - for (int i = 0; i < colNum; i++) { - stringBuilder.append(set.getString(i + 1)).append(","); - } - System.out.println(stringBuilder); - } - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java deleted file mode 100644 index c642632206c0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterFileFlushPolicy.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster; - -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.TsFileProcessor; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -public class ClusterFileFlushPolicy implements TsFileFlushPolicy { - - private static final Logger logger = LoggerFactory.getLogger(ClusterFileFlushPolicy.class); - - private ExecutorService closePartitionExecutor; - private MetaGroupMember metaGroupMember; - - public ClusterFileFlushPolicy(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - this.closePartitionExecutor = - new ThreadPoolExecutor( - 16, - 1024, - 0, - TimeUnit.SECONDS, - new LinkedBlockingDeque<>(), - r -> { - Thread thread = new Thread(r); - thread.setName("ClusterFileFlushPolicy-" + thread.getId()); - return thread; - }); - } - - @Override - public void apply(DataRegion dataRegion, TsFileProcessor processor, boolean isSeq) { - logger.info( - "The memtable size reaches the threshold, async flush it to tsfile: {}", - processor.getTsFileResource().getTsFile().getAbsolutePath()); - - if (processor.shouldClose()) { - // find the related DataGroupMember and close the processor through it - // we execute it in another thread to avoid deadlocks - closePartitionExecutor.submit( - () -> - metaGroupMember.closePartition( - dataRegion.getDataRegionId(), processor.getTimeRangeId(), isSeq)); - } - // flush the memtable anyway to avoid the insertion trigger the policy again - processor.asyncFlush(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java deleted file mode 100644 index 2910a3fd473f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDB.java +++ /dev/null @@ -1,672 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.exception.ConfigInconsistentException; -import org.apache.iotdb.cluster.exception.StartUpCheckFailureException; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotStrategy; -import org.apache.iotdb.cluster.query.manage.ClusterSessionManager; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.ClusterRPCService; -import org.apache.iotdb.cluster.server.ClusterTSServiceImpl; -import org.apache.iotdb.cluster.server.HardLinkCleaner; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.basic.ClusterServiceProvider; -import org.apache.iotdb.cluster.server.clusterinfo.ClusterInfoServer; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.monitor.NodeReport; -import org.apache.iotdb.cluster.server.raft.DataRaftHeartBeatService; -import org.apache.iotdb.cluster.server.raft.DataRaftService; -import org.apache.iotdb.cluster.server.raft.MetaRaftHeartBeatService; -import org.apache.iotdb.cluster.server.raft.MetaRaftService; -import org.apache.iotdb.cluster.server.service.DataGroupEngine; -import org.apache.iotdb.cluster.server.service.DataGroupServiceImpls; -import org.apache.iotdb.cluster.server.service.MetaAsyncService; -import org.apache.iotdb.cluster.server.service.MetaSyncService; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitor; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.ConfigurationException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.service.JMXService; -import org.apache.iotdb.commons.service.RegisterManager; -import org.apache.iotdb.commons.service.ThriftServiceThread; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.conf.IoTDBStartCheck; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.service.basic.ServiceProvider; - -import org.apache.thrift.TException; -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.protocol.TBinaryProtocol.Factory; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.apache.iotdb.cluster.config.ClusterConstant.THREAD_POLL_WAIT_TERMINATION_TIME_S; -import static org.apache.iotdb.cluster.utils.ClusterUtils.UNKNOWN_CLIENT_IP; - -/** we do not inherent IoTDB instance, as it may break the singleton mode of IoTDB. */ -public class ClusterIoTDB implements ClusterIoTDBMBean { - - private static final Logger logger = LoggerFactory.getLogger(ClusterIoTDB.class); - private final String mbeanName = - String.format( - "%s:%s=%s", "org.apache.iotdb.cluster.service", IoTDBConstant.JMX_TYPE, "ClusterIoTDB"); - - // TODO: better to throw exception if the client can not be get. Then we can remove this field. - private boolean printClientConnectionErrorStack = false; - - private MetaGroupMember metaGroupMember; - - private DataGroupEngine dataGroupEngine; - - private Node thisNode; - private Coordinator coordinator; - - private final IoTDB iotdb = IoTDB.getInstance(); - - // Cluster IoTDB uses a individual registerManager with its parent. - private final RegisterManager registerManager = new RegisterManager(); - - /** - * a single thread pool, every "REPORT_INTERVAL_SEC" seconds, "reportThread" will print the status - * of all raft members in this node. - */ - private ScheduledExecutorService reportThread; - - private boolean allowReport = true; - - /** hardLinkCleaner will periodically clean expired hardlinks created during snapshots. */ - private ScheduledExecutorService hardLinkCleanerThread; - - /** - * The clientManager is only used by those instances who do not belong to any DataGroup or - * MetaGroup. - */ - private IClientManager clientManager; - - private ClusterIoTDB() { - // we do not init anything here, so that we can re-initialize the instance in IT. - } - - /** initialize the current node and its services */ - public boolean initLocalEngines() { - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - thisNode = new Node(); - // set internal rpc ip and ports - thisNode.setInternalIp(config.getInternalIp()); - thisNode.setMetaPort(config.getInternalMetaPort()); - thisNode.setDataPort(config.getInternalDataPort()); - // set client rpc ip and ports - thisNode.setClientPort(config.getClusterRpcPort()); - thisNode.setClientIp(IoTDBDescriptor.getInstance().getConfig().getRpcAddress()); - coordinator = new Coordinator(); - // local engine - TProtocolFactory protocolFactory = - ThriftServiceThread.getProtocolFactory( - IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable()); - metaGroupMember = new MetaGroupMember(protocolFactory, thisNode, coordinator); - IoTDB.setClusterMode(); - IoTDB.setSchemaProcessor(CSchemaProcessor.getInstance()); - ((CSchemaProcessor) IoTDB.schemaProcessor).setMetaGroupMember(metaGroupMember); - ((CSchemaProcessor) IoTDB.schemaProcessor).setCoordinator(coordinator); - MetaPuller.getInstance().init(metaGroupMember); - // set coordinator for serviceProvider construction - try { - IoTDB.setServiceProvider(new ClusterServiceProvider(coordinator, metaGroupMember)); - } catch (QueryProcessException e) { - logger.error("Failed to set clusterServiceProvider.", e); - stop(); - return false; - } - - // from the scope of the DataGroupEngine,it should be singleton pattern - // the way of setting MetaGroupMember in DataGroupEngine may need a better modification in - // future commit. - DataGroupEngine.setProtocolFactory(protocolFactory); - DataGroupEngine.setMetaGroupMember(metaGroupMember); - dataGroupEngine = DataGroupEngine.getInstance(); - clientManager = - new ClientManager( - ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(), - ClientManager.Type.RequestForwardClient); - initTasks(); - try { - // we need to check config after initLocalEngines. - startServerCheck(); - JMXService.registerMBean(metaGroupMember, metaGroupMember.getMBeanName()); - } catch (StartupException e) { - logger.error("Failed to check cluster config.", e); - stop(); - return false; - } - return true; - } - - private void initTasks() { - reportThread = IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("NodeReportThread"); - reportThread.scheduleAtFixedRate( - this::generateNodeReport, - ClusterConstant.REPORT_INTERVAL_SEC, - ClusterConstant.REPORT_INTERVAL_SEC, - TimeUnit.SECONDS); - hardLinkCleanerThread = - IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("HardLinkCleaner"); - hardLinkCleanerThread.scheduleAtFixedRate( - new HardLinkCleaner(), - ClusterConstant.CLEAN_HARDLINK_INTERVAL_SEC, - ClusterConstant.CLEAN_HARDLINK_INTERVAL_SEC, - TimeUnit.SECONDS); - } - - /** - * Generate a report containing the status of both MetaGroupMember and DataGroupMembers of this - * node. This will help to see if the node is in a consistent and right state during debugging. - */ - private void generateNodeReport() { - if (logger.isDebugEnabled() && allowReport) { - try { - NodeReport report = new NodeReport(thisNode); - report.setMetaMemberReport(metaGroupMember.genMemberReport()); - report.setDataMemberReportList(dataGroupEngine.genMemberReports()); - logger.debug(report.toString()); - } catch (Exception e) { - logger.error("exception occurred when generating node report", e); - } - } - } - - public static void main(String[] args) { - new ClusterIoTDBServerCommandLine().doMain(args); - } - - protected boolean serverCheckAndInit() throws ConfigurationException, IOException { - IoTDBStartCheck.getInstance().checkConfig(); - IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - // init server's configuration first, because the cluster configuration may read settings from - // the server's configuration. - // auto create schema is took over by cluster module, so we disable it in the server module. - config.setAutoCreateSchemaEnabled(false); - // check cluster config - String checkResult = clusterConfigCheck(); - if (checkResult != null) { - logger.error(checkResult); - return false; - } - ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig(); - // if client ip is the default address, set it same with internal ip - if (config.getRpcAddress().equals("0.0.0.0")) { - config.setRpcAddress(clusterConfig.getInternalIp()); - } - // set the memory allocated for raft log of each raft log manager - if (clusterConfig.getReplicationNum() > 1) { - clusterConfig.setMaxMemorySizeForRaftLog( - (long) - (config.getAllocateMemoryForWrite() - * clusterConfig.getRaftLogMemoryProportion() - / clusterConfig.getReplicationNum())); - // calculate remaining memory allocated for write process - config.setAllocateMemoryForWrite( - (long) - (config.getAllocateMemoryForWrite() - * (1 - clusterConfig.getRaftLogMemoryProportion()))); - } - return true; - } - - private String clusterConfigCheck() { - try { - ClusterDescriptor.getInstance().replaceHostnameWithIp(); - } catch (Exception e) { - return String.format("replace hostname with ip failed, %s", e.getMessage()); - } - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - // check the initial replicateNum and refuse to start when the replicateNum <= 0 - if (config.getReplicationNum() <= 0) { - return String.format( - "ReplicateNum should be greater than 0 instead of %d.", config.getReplicationNum()); - } - // check the initial cluster size and refuse to start when the size < quorum - int quorum = config.getReplicationNum() / 2 + 1; - if (config.getSeedNodeUrls().size() < quorum) { - return String.format( - "Seed number less than quorum, seed number: %s, quorum: " + "%s.", - config.getSeedNodeUrls().size(), quorum); - } - // TODO: duplicate code - Set seedNodes = new HashSet<>(); - for (String url : config.getSeedNodeUrls()) { - Node node = ClusterUtils.parseNode(url); - if (seedNodes.contains(node)) { - return String.format( - "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls()); - } - seedNodes.add(node); - } - return null; - } - - /** Start as a seed node */ - public void activeStartNodeMode() { - try { - // start iotdb server first - IoTDB.getInstance().active(); - // some work about cluster - preInitCluster(); - // try to build cluster - metaGroupMember.buildCluster(); - // register service after cluster build - postInitCluster(); - // init ServiceImpl to handle request of client - startClientRPC(); - } catch (StartupException - | StartUpCheckFailureException - | ConfigInconsistentException - | QueryProcessException e) { - logger.error("Fail to start server", e); - stop(); - } - } - - private void preInitCluster() throws StartupException { - stopRaftInfoReport(); - JMXService.registerMBean(this, mbeanName); - // register MetaGroupMember. MetaGroupMember has the same position with "StorageEngine" in the - // cluster module. - // TODO: it is better to remove coordinator out of metaGroupEngine - - registerManager.register(metaGroupMember); - registerManager.register(dataGroupEngine); - - // rpc service initialize - DataGroupServiceImpls dataGroupServiceImpls = new DataGroupServiceImpls(); - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - MetaAsyncService metaAsyncService = new MetaAsyncService(metaGroupMember); - MetaRaftHeartBeatService.getInstance().initAsyncedServiceImpl(metaAsyncService); - MetaRaftService.getInstance().initAsyncedServiceImpl(metaAsyncService); - DataRaftService.getInstance().initAsyncedServiceImpl(dataGroupServiceImpls); - DataRaftHeartBeatService.getInstance().initAsyncedServiceImpl(dataGroupServiceImpls); - } else { - MetaSyncService syncService = new MetaSyncService(metaGroupMember); - MetaRaftHeartBeatService.getInstance().initSyncedServiceImpl(syncService); - MetaRaftService.getInstance().initSyncedServiceImpl(syncService); - DataRaftService.getInstance().initSyncedServiceImpl(dataGroupServiceImpls); - DataRaftHeartBeatService.getInstance().initSyncedServiceImpl(dataGroupServiceImpls); - } - // start RPC service - logger.info("start Meta Heartbeat RPC service... "); - registerManager.register(MetaRaftHeartBeatService.getInstance()); - /* TODO: better to start the Meta RPC service until the heartbeatService has elected the leader - and quorum of followers have caught up. */ - logger.info("start Meta RPC service... "); - registerManager.register(MetaRaftService.getInstance()); - } - - private void postInitCluster() throws StartupException { - logger.info("start Data Heartbeat RPC service... "); - registerManager.register(DataRaftHeartBeatService.getInstance()); - logger.info("start Data RPC service... "); - registerManager.register(DataRaftService.getInstance()); - // RPC based DBA API - registerManager.register(ClusterInfoServer.getInstance()); - // JMX based DBA API - registerManager.register(ClusterMonitor.INSTANCE); - } - - private void startClientRPC() throws QueryProcessException, StartupException { - // we must wait until the metaGroup established. - // So that the ClusterRPCService can work. - ClusterTSServiceImpl clusterServiceImpl = new ClusterTSServiceImpl(); - ServiceProvider.SESSION_MANAGER = ClusterSessionManager.getInstance(); - ClusterSessionManager.getInstance().setCoordinator(coordinator); - ClusterRPCService.getInstance().initSyncedServiceImpl(clusterServiceImpl); - registerManager.register(ClusterRPCService.getInstance()); - // init influxDB MManager - if (IoTDBDescriptor.getInstance().getConfig().isEnableInfluxDBRpcService()) { - IoTDB.initInfluxDBMManager(); - } - } - - /** Be added to the cluster by seed nodes */ - public void activeAddNodeMode() { - try { - final long startTime = System.currentTimeMillis(); - preInitCluster(); - metaGroupMember.joinCluster(); - postInitCluster(); - dataGroupEngine.pullSnapshots(); - startClientRPC(); - logger.info( - "Adding this node {} to cluster costs {} ms", - thisNode, - (System.currentTimeMillis() - startTime)); - } catch (StartupException - | QueryProcessException - | StartUpCheckFailureException - | ConfigInconsistentException e) { - stop(); - logger.error("Fail to join cluster", e); - } - } - - private void startServerCheck() throws StartupException { - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - // assert not duplicated nodes - Set seedNodes = new HashSet<>(); - for (String url : config.getSeedNodeUrls()) { - Node node = ClusterUtils.parseNode(url); - if (seedNodes.contains(node)) { - String message = - String.format( - "SeedNodes must not repeat each other. SeedNodes: %s", config.getSeedNodeUrls()); - throw new StartupException(metaGroupMember.getName(), message); - } - seedNodes.add(node); - } - - // assert this node is in all nodes when restart - if (!metaGroupMember.getAllNodes().isEmpty()) { - if (!metaGroupMember.getAllNodes().contains(metaGroupMember.getThisNode())) { - String message = - String.format( - "All nodes in partitionTables must contains local node in start-server mode. " - + "LocalNode: %s, AllNodes: %s", - metaGroupMember.getThisNode(), metaGroupMember.getAllNodes()); - throw new StartupException(metaGroupMember.getName(), message); - } else { - return; - } - } - - // assert this node is in seed nodes list - if (!seedNodes.contains(thisNode)) { - String message = - String.format( - "SeedNodes must contains local node in start-server mode. LocalNode: %s ,SeedNodes: " - + "%s", - thisNode.toString(), config.getSeedNodeUrls()); - throw new StartupException(metaGroupMember.getName(), message); - } - } - - protected void doRemoveNode(String[] args) throws IOException { - if (args.length != 3) { - logger.error("Usage: "); - return; - } - String ip = args[1]; - int metaPort = Integer.parseInt(args[2]); - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - TProtocolFactory factory = - config.isRpcThriftCompressionEnabled() ? new TCompactProtocol.Factory() : new Factory(); - Node nodeToRemove = new Node(); - nodeToRemove.setInternalIp(ip).setMetaPort(metaPort).setClientIp(UNKNOWN_CLIENT_IP); - // try sending the request to each seed node - for (String url : config.getSeedNodeUrls()) { - Node node = ClusterUtils.parseNode(url); - if (node == null) { - continue; - } - AsyncMetaClient client = - new AsyncMetaClient(factory, new TAsyncClientManager(), node, ClientCategory.META); - Long response = null; - long startTime = System.currentTimeMillis(); - try { - logger.info("Start removing node {} with the help of node {}", nodeToRemove, node); - response = SyncClientAdaptor.removeNode(client, nodeToRemove); - } catch (TException e) { - logger.warn("Cannot send remove node request through {}, try next node", node); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Cannot send remove node request through {}, try next node", node); - } - if (response != null) { - handleNodeRemovalResp(response, nodeToRemove, startTime); - return; - } - } - } - - private void handleNodeRemovalResp(Long response, Node nodeToRemove, long startTime) { - if (response == Response.RESPONSE_AGREE) { - logger.info( - "Node {} is successfully removed, cost {}ms", - nodeToRemove, - (System.currentTimeMillis() - startTime)); - } else if (response == Response.RESPONSE_CLUSTER_TOO_SMALL) { - logger.error("Cluster size is too small, cannot remove any node"); - } else if (response == Response.RESPONSE_REJECT) { - logger.error("Node {} is not found in the cluster, please check", nodeToRemove); - } else if (response == Response.RESPONSE_DATA_MIGRATION_NOT_FINISH) { - logger.warn( - "The data migration of the previous membership change operation is not finished. Please " - + "try again later"); - } else { - logger.error("Unexpected response {}", response); - } - } - - /** Developers may perform pre-start customizations here for debugging or experiments. */ - @SuppressWarnings("java:S125") // leaving examples - private void preStartCustomize() { - // customize data distribution - // The given example tries to divide storage groups like "root.sg_1", "root.sg_2"... into k - // nodes evenly, and use default strategy for other groups - SlotPartitionTable.setSlotStrategy( - new SlotStrategy() { - final SlotStrategy defaultStrategy = new SlotStrategy.DefaultStrategy(); - final int clusterSize = - ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls().size(); - - @Override - public int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum) { - int sgSerialNum = extractSerialNumInSGName(storageGroupName) % clusterSize; - if (sgSerialNum >= 0) { - return maxSlotNum / clusterSize * sgSerialNum; - } else { - return defaultStrategy.calculateSlotByTime(storageGroupName, timestamp, maxSlotNum); - } - } - - @Override - public int calculateSlotByPartitionNum( - String storageGroupName, long partitionId, int maxSlotNum) { - int sgSerialNum = extractSerialNumInSGName(storageGroupName) % clusterSize; - if (sgSerialNum >= 0) { - return maxSlotNum / clusterSize * sgSerialNum; - } else { - return defaultStrategy.calculateSlotByPartitionNum( - storageGroupName, partitionId, maxSlotNum); - } - } - - private int extractSerialNumInSGName(String storageGroupName) { - String[] s = storageGroupName.split("_"); - if (s.length != 2) { - return -1; - } - try { - return Integer.parseInt(s[1]); - } catch (NumberFormatException e) { - return -1; - } - } - }); - } - - public void stop() { - deactivate(); - } - - private void deactivate() { - logger.info("Deactivating Cluster IoTDB..."); - stopThreadPools(); - registerManager.deregisterAll(); - JMXService.deregisterMBean(mbeanName); - logger.info("ClusterIoTDB is deactivated."); - // stop the iotdb kernel - iotdb.stop(); - } - - private void stopThreadPools() { - stopThreadPool(reportThread, "reportThread"); - stopThreadPool(hardLinkCleanerThread, "hardLinkCleanerThread"); - } - - private void stopThreadPool(ExecutorService pool, String name) { - if (pool != null) { - pool.shutdownNow(); - try { - pool.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for {} to end", name, e); - } - } - } - - @TestOnly - public void setClientManager(IClientManager clientManager) { - this.clientManager = clientManager; - } - - @TestOnly - public IClientManager getClientManager() { - return this.clientManager; - } - - @TestOnly - public void setDataGroupEngine(DataGroupEngine dataGroupEngine) { - this.dataGroupEngine = dataGroupEngine; - } - - public MetaGroupMember getMetaGroupMember() { - return metaGroupMember; - } - - public Node getThisNode() { - return thisNode; - } - - public Coordinator getCoordinator() { - return coordinator; - } - - public IoTDB getIotdb() { - return iotdb; - } - - public RegisterManager getRegisterManager() { - return registerManager; - } - - public DataGroupEngine getDataGroupEngine() { - return dataGroupEngine; - } - - public void setMetaGroupMember(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - public static ClusterIoTDB getInstance() { - return ClusterIoTDBHolder.INSTANCE; - } - - @Override - public boolean startRaftInfoReport() { - logger.info("Raft status report is enabled."); - allowReport = true; - return logger.isDebugEnabled(); - } - - @Override - public void stopRaftInfoReport() { - logger.info("Raft status report is disabled."); - allowReport = false; - } - - @Override - public void enablePrintClientConnectionErrorStack() { - printClientConnectionErrorStack = true; - } - - @Override - public void disablePrintClientConnectionErrorStack() { - printClientConnectionErrorStack = false; - } - - public boolean shouldPrintClientConnectionErrorStack() { - return printClientConnectionErrorStack; - } - - public SyncDataClient getSyncDataClient(Node node, int readOperationTimeoutMS) - throws IOException { - SyncDataClient dataClient = - (SyncDataClient) clientManager.borrowSyncClient(node, ClientCategory.DATA); - if (dataClient != null) { - dataClient.setTimeout(readOperationTimeoutMS); - } - return dataClient; - } - - public AsyncDataClient getAsyncDataClient(Node node, int readOperationTimeoutMS) - throws IOException { - AsyncDataClient dataClient = - (AsyncDataClient) clientManager.borrowAsyncClient(node, ClientCategory.DATA); - if (dataClient != null) { - dataClient.setTimeout(readOperationTimeoutMS); - } - return dataClient; - } - - private static class ClusterIoTDBHolder { - - private static final ClusterIoTDB INSTANCE = new ClusterIoTDB(); - - private ClusterIoTDBHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java deleted file mode 100644 index 7fedf0f3d90a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBMBean.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster; - -// we do not inherent IoTDB instance, as it may break the singleton mode of IoTDB. -public interface ClusterIoTDBMBean { - /** - * try to enable the raft info report. - * - * @return true only if the log level is lower than debug and the report is enabled. - */ - boolean startRaftInfoReport(); - - void stopRaftInfoReport(); - - void enablePrintClientConnectionErrorStack(); - - void disablePrintClientConnectionErrorStack(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java b/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java deleted file mode 100644 index 220fbb643d2b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/ClusterIoTDBServerCommandLine.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster; - -import org.apache.iotdb.commons.ServerCommandLine; -import org.apache.iotdb.commons.exception.ConfigurationException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -public class ClusterIoTDBServerCommandLine extends ServerCommandLine { - private static final Logger logger = LoggerFactory.getLogger(ClusterIoTDBServerCommandLine.class); - - // establish the cluster as a seed - private static final String MODE_START = "-s"; - // join an established cluster - private static final String MODE_ADD = "-a"; - // send a request to remove a node, more arguments: ip-of-removed-node - // metaport-of-removed-node - private static final String MODE_REMOVE = "-r"; - - private static final String USAGE = - "Usage: <-s|-a|-r> " - + "[-D{} ] \n" - + "-s: start the node as a seed\n" - + "-a: start the node as a new node\n" - + "-r: remove the node out of the cluster\n"; - - @Override - protected String getUsage() { - return USAGE; - } - - @Override - protected int run(String[] args) { - if (args.length < 1) { - usage(null); - return -1; - } - - ClusterIoTDB cluster = ClusterIoTDB.getInstance(); - // check config of iotdb,and set some configs in cluster mode - try { - if (!cluster.serverCheckAndInit()) { - return -1; - } - } catch (ConfigurationException | IOException e) { - logger.error("meet error when doing start checking", e); - return -1; - } - String mode = args[0]; - logger.info("Running mode {}", mode); - - // initialize the current node and its services - if (!cluster.initLocalEngines()) { - logger.error("initLocalEngines error, stop process!"); - return -1; - } - - // we start IoTDB kernel first. then we start the cluster module. - if (MODE_START.equals(mode)) { - cluster.activeStartNodeMode(); - } else if (MODE_ADD.equals(mode)) { - cluster.activeAddNodeMode(); - } else if (MODE_REMOVE.equals(mode)) { - try { - cluster.doRemoveNode(args); - } catch (IOException e) { - logger.error("Fail to remove node in cluster", e); - } - } else { - logger.error("Unrecognized mode {}", mode); - } - return 0; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java b/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java deleted file mode 100644 index e8878880d08e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/RemoteTsFileResource.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.engine.storagegroup.TsFileResourceStatus; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -public class RemoteTsFileResource extends TsFileResource { - - private Node source; - private boolean isRemote = false; - private boolean withModification = false; - - /** - * Whether the plan range ([minPlanIndex, maxPlanIndex]) overlaps with another TsFile in the same - * time partition. If not (unique = true), we shall have confidence that the file has all data - * whose plan indexes are within [minPlanIndex, maxPlanIndex], so we can remove other local files - * that overlaps with it. - */ - private boolean isPlanRangeUnique = false; - - public RemoteTsFileResource() { - setStatus(TsFileResourceStatus.CLOSED); - this.timeIndex = IoTDBDescriptor.getInstance().getConfig().getTimeIndexLevel().getTimeIndex(); - } - - private RemoteTsFileResource(TsFileResource other) throws IOException { - super(other); - withModification = new File(getModFile().getFilePath()).exists(); - setStatus(TsFileResourceStatus.CLOSED); - } - - public RemoteTsFileResource(TsFileResource other, Node source) throws IOException { - this(other); - this.source = source; - this.isRemote = true; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - if (!super.equals(o)) { - return false; - } - RemoteTsFileResource that = (RemoteTsFileResource) o; - return Objects.equals(source, that.source); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), source); - } - - public void serialize(DataOutputStream dataOutputStream) { - NodeSerializeUtils.serialize(source, dataOutputStream); - try { - // the path here is only for the remote node to get a download link, so it does not matter - // if it is absolute - SerializeUtils.serialize(getTsFile().getPath(), dataOutputStream); - - timeIndex.serialize(dataOutputStream); - dataOutputStream.writeBoolean(withModification); - - dataOutputStream.writeLong(maxPlanIndex); - dataOutputStream.writeLong(minPlanIndex); - - dataOutputStream.writeByte(isPlanRangeUnique ? 1 : 0); - } catch (IOException ignored) { - // unreachable - } - } - - public void deserialize(ByteBuffer buffer) { - source = new Node(); - NodeSerializeUtils.deserialize(source, buffer); - setFile(new File(SerializeUtils.deserializeString(buffer))); - - timeIndex = - IoTDBDescriptor.getInstance() - .getConfig() - .getTimeIndexLevel() - .getTimeIndex() - .deserialize(buffer); - - withModification = buffer.get() == 1; - - maxPlanIndex = buffer.getLong(); - minPlanIndex = buffer.getLong(); - - isPlanRangeUnique = buffer.get() == 1; - - isRemote = true; - } - - public Node getSource() { - return source; - } - - public boolean isRemote() { - return isRemote; - } - - public void setRemote(boolean remote) { - isRemote = remote; - } - - public boolean isWithModification() { - return withModification; - } - - public boolean isPlanRangeUnique() { - return isPlanRangeUnique; - } - - public void setPlanRangeUnique(boolean planRangeUnique) { - isPlanRangeUnique = planRangeUnique; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java deleted file mode 100644 index 1c60df6b8748..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/BaseFactory.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.commons.pool2.KeyedPooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.protocol.TProtocolFactory; - -import java.util.concurrent.atomic.AtomicInteger; - -public abstract class BaseFactory implements KeyedPooledObjectFactory { - - protected TAsyncClientManager[] managers; - protected TProtocolFactory protocolFactory; - protected AtomicInteger clientCnt = new AtomicInteger(); - protected ClientCategory category; - protected IClientManager clientPoolManager; - - protected BaseFactory(TProtocolFactory protocolFactory, ClientCategory category) { - this.protocolFactory = protocolFactory; - this.category = category; - } - - protected BaseFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - this.protocolFactory = protocolFactory; - this.category = category; - this.clientPoolManager = clientManager; - } - - @Override - public void activateObject(K node, PooledObject pooledObject) throws Exception {} - - @Override - public void passivateObject(K node, PooledObject pooledObject) throws Exception {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java deleted file mode 100644 index fdd6669845a9..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientCategory.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -public enum ClientCategory { - META("MetaClient"), - META_HEARTBEAT("MetaHeartbeatClient"), - DATA("DataClient"), - DATA_HEARTBEAT("DataHeartbeatClient"), - DATA_ASYNC_APPEND_CLIENT("DataAsyncAppendClient"); - - private final String name; - - ClientCategory(String name) { - this.name = name; - } - - public String getName() { - return name; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java deleted file mode 100644 index ae24a6dea9e7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientManager.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -import com.google.common.collect.Maps; -import org.apache.commons.pool2.KeyedObjectPool; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; - -/** - * One should borrow the reusable client from this manager and return the client after use. The - * underlying client pool is powered by Apache Commons Pool. The class provided 3 default pool group - * according to current usage: RequestForwardClient, DataGroupClients, MetaGroupClients. - * - *

TODO: We can refine the client structure by reorg the interfaces defined in cluster-thrift. - */ -public class ClientManager implements IClientManager { - - private static final Logger logger = LoggerFactory.getLogger(ClientManager.class); - - private Map> asyncClientPoolMap; - private Map> syncClientPoolMap; - private final ClientPoolFactory clientPoolFactory; - - /** - * {@link ClientManager.Type#RequestForwardClient} represents the clients used to forward external - * client requests to proper node to handle such as query, insert request. - * - *

{@link ClientManager.Type#DataGroupClient} represents the clients used to appendEntry, - * appendEntries, sendHeartbeat, etc for data raft group. - * - *

{@link ClientManager.Type#MetaGroupClient} represents the clients used to appendEntry, - * appendEntries, sendHeartbeat, etc for meta raft group. * - */ - public enum Type { - RequestForwardClient, - DataGroupClient, - MetaGroupClient - } - - public ClientManager(boolean isAsyncMode, Type type) { - clientPoolFactory = new ClientPoolFactory(); - clientPoolFactory.setClientManager(this); - if (isAsyncMode) { - asyncClientPoolMap = Maps.newHashMap(); - constructAsyncClientMap(type); - } else { - syncClientPoolMap = Maps.newHashMap(); - constructSyncClientMap(type); - } - } - - private void constructAsyncClientMap(Type type) { - switch (type) { - case RequestForwardClient: - asyncClientPoolMap.put( - ClientCategory.DATA, clientPoolFactory.createAsyncDataPool(ClientCategory.DATA)); - break; - case MetaGroupClient: - asyncClientPoolMap.put( - ClientCategory.META, clientPoolFactory.createAsyncMetaPool(ClientCategory.META)); - asyncClientPoolMap.put( - ClientCategory.META_HEARTBEAT, - clientPoolFactory.createAsyncMetaPool(ClientCategory.META_HEARTBEAT)); - break; - case DataGroupClient: - asyncClientPoolMap.put( - ClientCategory.DATA, clientPoolFactory.createAsyncDataPool(ClientCategory.DATA)); - asyncClientPoolMap.put( - ClientCategory.DATA_HEARTBEAT, - clientPoolFactory.createAsyncDataPool(ClientCategory.DATA_HEARTBEAT)); - asyncClientPoolMap.put( - ClientCategory.DATA_ASYNC_APPEND_CLIENT, - clientPoolFactory.createSingleManagerAsyncDataPool()); - break; - default: - logger.warn("unsupported ClientManager type: {}", type); - break; - } - } - - private void constructSyncClientMap(Type type) { - switch (type) { - case RequestForwardClient: - syncClientPoolMap.put( - ClientCategory.DATA, clientPoolFactory.createSyncDataPool(ClientCategory.DATA)); - break; - case MetaGroupClient: - syncClientPoolMap.put( - ClientCategory.META, clientPoolFactory.createSyncMetaPool(ClientCategory.META)); - syncClientPoolMap.put( - ClientCategory.META_HEARTBEAT, - clientPoolFactory.createSyncMetaPool(ClientCategory.META_HEARTBEAT)); - break; - case DataGroupClient: - syncClientPoolMap.put( - ClientCategory.DATA, clientPoolFactory.createSyncDataPool(ClientCategory.DATA)); - syncClientPoolMap.put( - ClientCategory.DATA_HEARTBEAT, - clientPoolFactory.createSyncDataPool(ClientCategory.DATA_HEARTBEAT)); - break; - default: - logger.warn("unsupported ClientManager type: {}", type); - break; - } - } - - /** - * It's safe to convert: 1. RaftService.AsyncClient to TSDataService.AsyncClient when category is - * DATA or DATA_HEARTBEAT; 2. RaftService.AsyncClient to TSMetaService.AsyncClient when category - * is META or META_HEARTBEAT. - * - * @return RaftService.AsyncClient - */ - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - KeyedObjectPool pool; - RaftService.AsyncClient client = null; - if (asyncClientPoolMap != null && (pool = asyncClientPoolMap.get(category)) != null) { - try { - client = pool.borrowObject(node); - } catch (IOException e) { - // external needs the IOException to check connection - throw e; - } catch (Exception e) { - // external doesn't care of other exceptions - logger.error("BorrowAsyncClient fail.", e); - } - } else { - logger.warn( - "BorrowSyncClient invoke on unsupported mode or category: Node:{}, ClientCategory:{}, " - + "isSyncMode:{}", - node, - clientPoolFactory, - syncClientPoolMap != null); - } - return client; - } - - /** - * It's safe to convert: 1. RaftService.Client to TSDataService.Client when category is DATA or - * DATA_HEARTBEAT; 2. RaftService.Client to TSMetaService.Client when category is META or - * META_HEARTBEAT. - * - * @return RaftService.Client - */ - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) - throws IOException { - KeyedObjectPool pool; - RaftService.Client client = null; - if (syncClientPoolMap != null && (pool = syncClientPoolMap.get(category)) != null) { - try { - client = pool.borrowObject(node); - } catch (TTransportException e) { - // external needs to check transport related exception - throw new IOException(e); - } catch (IOException e) { - // external needs the IOException to check connection - throw e; - } catch (Exception e) { - // external doesn't care of other exceptions - logger.error("BorrowSyncClient fail.", e); - } - } else { - logger.warn( - "BorrowSyncClient invoke on unsupported mode or category: Node:{}, ClientCategory:{}, " - + "isSyncMode:{}", - node, - clientPoolFactory, - syncClientPoolMap != null); - } - return client; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) { - if (client != null && node != null) { - try { - asyncClientPoolMap.get(category).returnObject(node, client); - } catch (Exception e) { - logger.error("AsyncClient return error: {}", client, e); - } - } - } - - @Override - public void returnSyncClient(RaftService.Client client, Node node, ClientCategory category) { - if (client != null && node != null) { - try { - syncClientPoolMap.get(category).returnObject(node, client); - } catch (Exception e) { - logger.error("SyncClient return error: {}", client, e); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java deleted file mode 100644 index 00db59adcbca..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/ClientPoolFactory.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -import org.apache.commons.pool2.impl.GenericKeyedObjectPool; -import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; - -import java.time.Duration; - -public class ClientPoolFactory { - - protected long waitClientTimeoutMS; - protected int maxConnectionForEachNode; - protected int maxIdleConnectionForEachNode; - private final TProtocolFactory protocolFactory; - private GenericKeyedObjectPoolConfig poolConfig; - private IClientManager clientManager; - - public ClientPoolFactory() { - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - this.waitClientTimeoutMS = config.getWaitClientTimeoutMS(); - this.maxConnectionForEachNode = config.getMaxClientPerNodePerMember(); - this.maxIdleConnectionForEachNode = config.getMaxIdleClientPerNodePerMember(); - protocolFactory = - config.isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - poolConfig = new GenericKeyedObjectPoolConfig(); - poolConfig.setMaxTotalPerKey(maxConnectionForEachNode); - poolConfig.setMaxIdlePerKey(maxIdleConnectionForEachNode); - poolConfig.setMaxWait(Duration.ofMillis(waitClientTimeoutMS)); - poolConfig.setTestOnReturn(true); - poolConfig.setTestOnBorrow(true); - } - - public void setClientManager(IClientManager clientManager) { - this.clientManager = clientManager; - } - - public GenericKeyedObjectPool createSyncDataPool( - ClientCategory category) { - return new GenericKeyedObjectPool<>( - new SyncDataClient.SyncDataClientFactory(protocolFactory, category, clientManager), - poolConfig); - } - - public GenericKeyedObjectPool createSyncMetaPool( - ClientCategory category) { - return new GenericKeyedObjectPool<>( - new SyncMetaClient.SyncMetaClientFactory(protocolFactory, category, clientManager), - poolConfig); - } - - public GenericKeyedObjectPool createAsyncDataPool( - ClientCategory category) { - return new GenericKeyedObjectPool<>( - new AsyncDataClient.AsyncDataClientFactory(protocolFactory, category, clientManager), - poolConfig); - } - - public GenericKeyedObjectPool createAsyncMetaPool( - ClientCategory category) { - return new GenericKeyedObjectPool<>( - new AsyncMetaClient.AsyncMetaClientFactory(protocolFactory, category, clientManager), - poolConfig); - } - - public GenericKeyedObjectPool createSingleManagerAsyncDataPool() { - return new GenericKeyedObjectPool<>( - new AsyncDataClient.SingleManagerFactory(protocolFactory, clientManager), poolConfig); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java deleted file mode 100644 index 6652e0c6e7c0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/IClientManager.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -import java.io.IOException; - -public interface IClientManager { - RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) throws IOException; - - RaftService.Client borrowSyncClient(Node node, ClientCategory category) throws IOException; - - void returnAsyncClient(RaftService.AsyncClient client, Node node, ClientCategory category); - - void returnSyncClient(RaftService.Client client, Node node, ClientCategory category); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java deleted file mode 100644 index b2b84153a69e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncBaseFactory.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.async; - -import org.apache.iotdb.cluster.client.BaseFactory; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.protocol.TProtocolFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -public abstract class AsyncBaseFactory - extends BaseFactory { - - private static final Logger logger = LoggerFactory.getLogger(AsyncBaseFactory.class); - - protected AsyncBaseFactory(TProtocolFactory protocolFactory, ClientCategory category) { - super(protocolFactory, category); - managers = - new TAsyncClientManager - [ClusterDescriptor.getInstance().getConfig().getSelectorNumOfClientPool()]; - for (int i = 0; i < managers.length; i++) { - try { - managers[i] = new TAsyncClientManager(); - } catch (IOException e) { - logger.error("Cannot create data heartbeat client manager for factory", e); - } - } - } - - protected AsyncBaseFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - super(protocolFactory, category, clientManager); - managers = - new TAsyncClientManager - [ClusterDescriptor.getInstance().getConfig().getSelectorNumOfClientPool()]; - for (int i = 0; i < managers.length; i++) { - try { - managers[i] = new TAsyncClientManager(); - } catch (IOException e) { - logger.error("Cannot create data heartbeat client manager for factory", e); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java deleted file mode 100644 index ebfb7ded5a4d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncDataClient.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.async; - -import org.apache.iotdb.cluster.client.BaseFactory; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.rpc.TNonblockingSocketWrapper; - -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.async.TAsyncMethodCall; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TNonblockingTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Notice: Because a client will be returned to a pool immediately after a successful request, you - * should not cache it anywhere else. - */ -public class AsyncDataClient extends TSDataService.AsyncClient { - - private static final Logger logger = LoggerFactory.getLogger(AsyncDataClient.class); - - private Node node; - private ClientCategory category; - private IClientManager clientManager; - - @TestOnly - public AsyncDataClient( - TProtocolFactory protocolFactory, - TAsyncClientManager clientManager, - TNonblockingTransport transport) { - super(protocolFactory, clientManager, transport); - } - - public AsyncDataClient( - TProtocolFactory protocolFactory, - TAsyncClientManager tClientManager, - Node node, - ClientCategory category) - throws IOException { - // the difference of the two clients lies in the port - super( - protocolFactory, - tClientManager, - TNonblockingSocketWrapper.wrap( - node.getInternalIp(), - ClientUtils.getPort(node, category), - ClusterConstant.getConnectionTimeoutInMS())); - this.node = node; - this.category = category; - } - - public AsyncDataClient( - TProtocolFactory protocolFactory, - TAsyncClientManager tClientManager, - Node node, - ClientCategory category, - IClientManager manager) - throws IOException { - this(protocolFactory, tClientManager, node, category); - this.clientManager = manager; - } - - public void close() { - ___transport.close(); - ___currentMethod = null; - } - - public boolean isValid() { - return ___transport != null; - } - - /** - * return self if clientPool is not null, the method doesn't need to call by user, it will trigger - * once client transport complete. - */ - private void returnSelf() { - if (clientManager != null) { - clientManager.returnAsyncClient(this, node, category); - } - } - - @Override - public void onComplete() { - super.onComplete(); - returnSelf(); - } - - @Override - public String toString() { - return "Async" - + category.getName() - + "{" - + "node=" - + node - + "," - + "port=" - + ClientUtils.getPort(node, category) - + '}'; - } - - public Node getNode() { - return node; - } - - public boolean isReady() { - try { - checkReady(); - return true; - } catch (Exception e) { - return false; - } - } - - @TestOnly - TAsyncMethodCall getCurrMethod() { - return ___currentMethod; - } - - public static class AsyncDataClientFactory extends AsyncBaseFactory { - - public AsyncDataClientFactory(TProtocolFactory protocolFactory, ClientCategory category) { - super(protocolFactory, category); - } - - public AsyncDataClientFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - super(protocolFactory, category, clientManager); - } - - @Override - public void destroyObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().close(); - } - - @Override - public PooledObject makeObject(Node node) throws Exception { - TAsyncClientManager manager = managers[clientCnt.incrementAndGet() % managers.length]; - manager = manager == null ? new TAsyncClientManager() : manager; - return new DefaultPooledObject<>( - new AsyncDataClient(protocolFactory, manager, node, category, clientPoolManager)); - } - - @Override - public boolean validateObject(Node node, PooledObject pooledObject) { - return pooledObject.getObject() != null && pooledObject.getObject().isValid(); - } - } - - public static class SingleManagerFactory extends BaseFactory { - - public SingleManagerFactory(TProtocolFactory protocolFactory) { - super(protocolFactory, ClientCategory.DATA); - managers = new TAsyncClientManager[1]; - try { - managers[0] = new TAsyncClientManager(); - } catch (IOException e) { - logger.error("Cannot create data heartbeat client manager for factory", e); - } - } - - public SingleManagerFactory(TProtocolFactory protocolFactory, IClientManager clientManager) { - this(protocolFactory); - this.clientPoolManager = clientManager; - } - - @Override - public void activateObject(Node node, PooledObject pooledObject) {} - - @Override - public void destroyObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().close(); - } - - @Override - public PooledObject makeObject(Node node) throws Exception { - return new DefaultPooledObject<>( - new AsyncDataClient( - protocolFactory, managers[0], node, ClientCategory.DATA, clientPoolManager)); - } - - @Override - public void passivateObject(Node node, PooledObject pooledObject) {} - - @Override - public boolean validateObject(Node node, PooledObject pooledObject) { - return pooledObject.getObject() != null && pooledObject.getObject().isValid(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java deleted file mode 100644 index 6d4c03998cd9..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/async/AsyncMetaClient.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.async; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.rpc.TNonblockingSocketWrapper; - -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.async.TAsyncMethodCall; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TNonblockingTransport; - -import java.io.IOException; - -/** - * Notice: Because a client will be returned to a pool immediately after a successful request, you - * should not cache it anywhere else. - */ -public class AsyncMetaClient extends TSMetaService.AsyncClient { - - private Node node; - private ClientCategory category; - private IClientManager clientManager; - - public AsyncMetaClient( - TProtocolFactory protocolFactory, - TAsyncClientManager clientManager, - TNonblockingTransport transport) { - super(protocolFactory, clientManager, transport); - } - - public AsyncMetaClient( - TProtocolFactory protocolFactory, - TAsyncClientManager clientManager, - Node node, - ClientCategory category) - throws IOException { - // the difference of the two clients lies in the port - super( - protocolFactory, - clientManager, - TNonblockingSocketWrapper.wrap( - node.getInternalIp(), - ClientUtils.getPort(node, category), - ClusterConstant.getConnectionTimeoutInMS())); - this.node = node; - this.category = category; - } - - public AsyncMetaClient( - TProtocolFactory protocolFactory, - TAsyncClientManager clientManager, - Node node, - ClientCategory category, - IClientManager manager) - throws IOException { - this(protocolFactory, clientManager, node, category); - this.clientManager = manager; - } - - /** - * return self if clientManager is not null, the method doesn't need to call by user, it will - * trigger once client transport complete. - */ - public void returnSelf() { - if (clientManager != null) { - clientManager.returnAsyncClient(this, node, category); - } - } - - @Override - public void onComplete() { - super.onComplete(); - returnSelf(); - } - - @Override - public String toString() { - return "Async" - + category.getName() - + "{" - + "node=" - + node - + "," - + "port=" - + ClientUtils.getPort(node, category) - + '}'; - } - - public void close() { - ___transport.close(); - ___currentMethod = null; - } - - public Node getNode() { - return node; - } - - @TestOnly - public boolean isReady() { - try { - checkReady(); - return true; - } catch (Exception e) { - return false; - } - } - - public boolean isValid() { - return ___transport != null; - } - - @TestOnly - TAsyncMethodCall getCurrMethod() { - return ___currentMethod; - } - - public static class AsyncMetaClientFactory extends AsyncBaseFactory { - - public AsyncMetaClientFactory(TProtocolFactory protocolFactory, ClientCategory category) { - super(protocolFactory, category); - } - - public AsyncMetaClientFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - super(protocolFactory, category, clientManager); - } - - @Override - public void activateObject(Node node, PooledObject pooledObject) {} - - @Override - public void destroyObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().close(); - } - - @Override - public PooledObject makeObject(Node node) throws Exception { - TAsyncClientManager manager = managers[clientCnt.incrementAndGet() % managers.length]; - manager = manager == null ? new TAsyncClientManager() : manager; - return new DefaultPooledObject<>( - new AsyncMetaClient(protocolFactory, manager, node, category, clientPoolManager)); - } - - @Override - public void passivateObject(Node node, PooledObject pooledObject) {} - - @Override - public boolean validateObject(Node node, PooledObject pooledObject) { - return pooledObject != null && pooledObject.getObject().isValid(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java deleted file mode 100644 index b066bc86a03c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptor.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GetChildNodeNextLevelHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GetChildNodeNextLevelPathHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GetDevicesHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GetNodesListHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GetTimeseriesSchemaHandler; -import org.apache.iotdb.cluster.server.handlers.caller.JoinClusterHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullMeasurementSchemaHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullSnapshotHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullTimeseriesSchemaHandler; -import org.apache.iotdb.cluster.server.handlers.forwarder.ForwardPlanHandler; -import org.apache.iotdb.cluster.utils.PlanSerializer; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowDevicesPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; - -/** - * SyncClientAdaptor convert the async of AsyncClient method call to a sync one by synchronizing on - * an AtomicReference of the return value of an RPC, and wait for at most connectionTimeoutInMS - * until the reference is set by the handler or the request timeouts. - */ -@SuppressWarnings("java:S2274") // enable timeout -public class SyncClientAdaptor { - - private static final Logger logger = LoggerFactory.getLogger(SyncClientAdaptor.class); - - private SyncClientAdaptor() { - // static class - } - - public static Long removeNode(AsyncMetaClient asyncMetaClient, Node nodeToRemove) - throws TException, InterruptedException { - AtomicReference responseRef = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(asyncMetaClient.getNode(), responseRef); - asyncMetaClient.removeNode(nodeToRemove, handler); - synchronized (responseRef) { - if (responseRef.get() == null) { - responseRef.wait(ClusterConstant.getConnectionTimeoutInMS()); - } - } - return responseRef.get(); - } - - public static Boolean matchTerm( - AsyncClient client, Node target, long prevLogIndex, long prevLogTerm, RaftNode header) - throws TException, InterruptedException { - try { - AtomicReference resultRef = new AtomicReference<>(null); - GenericHandler matchTermHandler = new GenericHandler<>(target, resultRef); - - client.matchTerm(prevLogIndex, prevLogTerm, header, matchTermHandler); - synchronized (resultRef) { - if (resultRef.get() == null) { - resultRef.wait(ClusterConstant.getConnectionTimeoutInMS()); - } - } - return resultRef.get(); - } catch (NullPointerException e) { - logger.error("match term null exception", e); - return false; - } - } - - public static Long querySingleSeriesByTimestamp( - AsyncDataClient client, SingleSeriesQueryRequest request) - throws TException, InterruptedException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - - client.querySingleSeriesByTimestamp(request, handler); - synchronized (result) { - if (result.get() == null && handler.getException() == null) { - result.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return result.get(); - } - - public static Long querySingleSeries( - AsyncDataClient client, SingleSeriesQueryRequest request, long timeOffset) - throws TException, InterruptedException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - Filter newFilter; - // add timestamp to as a timeFilter to skip the data which has been read - if (request.isSetTimeFilterBytes()) { - Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - newFilter = new AndFilter(timeFilter, TimeFilter.gt(timeOffset)); - } else { - newFilter = TimeFilter.gt(timeOffset); - } - request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter)); - - client.querySingleSeries(request, handler); - synchronized (result) { - if (result.get() == null && handler.getException() == null) { - result.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return result.get(); - } - - public static List getNodeList( - AsyncDataClient client, RaftNode header, String schemaPattern, int level) - throws TException, InterruptedException { - GetNodesListHandler handler = new GetNodesListHandler(); - AtomicReference> response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - - client.getNodeList(header, schemaPattern, level, handler); - synchronized (response) { - if (response.get() == null) { - response.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return response.get(); - } - - public static Set getChildNodeInNextLevel( - AsyncDataClient client, RaftNode header, String path) - throws TException, InterruptedException { - GetChildNodeNextLevelHandler handler = new GetChildNodeNextLevelHandler(); - AtomicReference> response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - - client.getChildNodeInNextLevel(header, path, handler); - synchronized (response) { - if (response.get() == null) { - response.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return response.get(); - } - - public static Set getNextChildren(AsyncDataClient client, RaftNode header, String path) - throws TException, InterruptedException { - GetChildNodeNextLevelPathHandler handler = new GetChildNodeNextLevelPathHandler(); - AtomicReference> response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - - client.getChildNodePathInNextLevel(header, path, handler); - synchronized (response) { - if (response.get() == null) { - response.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return response.get(); - } - - public static ByteBuffer getAllMeasurementSchema( - AsyncDataClient client, MeasurementSchemaRequest request) - throws IOException, InterruptedException, TException { - GetTimeseriesSchemaHandler handler = new GetTimeseriesSchemaHandler(); - AtomicReference response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - client.getAllMeasurementSchema(request, handler); - synchronized (response) { - if (response.get() == null) { - response.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return response.get(); - } - - public static TNodeStatus queryNodeStatus(AsyncMetaClient client) - throws TException, InterruptedException { - AtomicReference resultRef = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), resultRef); - - client.queryNodeStatus(handler); - synchronized (resultRef) { - if (resultRef.get() == null) { - resultRef.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - if (handler.getException() != null) { - throw new TException(handler.getException()); - } - return resultRef.get(); - } - - public static CheckStatusResponse checkStatus(AsyncMetaClient client, StartUpStatus startUpStatus) - throws TException, InterruptedException { - AtomicReference resultRef = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), resultRef); - - client.checkStatus(startUpStatus, handler); - synchronized (resultRef) { - if (resultRef.get() == null) { - resultRef.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - if (handler.getException() != null) { - throw new TException(handler.getException()); - } - return resultRef.get(); - } - - public static AddNodeResponse addNode( - AsyncMetaClient client, Node thisNode, StartUpStatus startUpStatus) - throws TException, InterruptedException { - JoinClusterHandler handler = new JoinClusterHandler(); - AtomicReference response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - - client.addNode(thisNode, startUpStatus, handler); - synchronized (response) { - if (response.get() == null) { - response.wait(60 * 1000L); - } - } - return response.get(); - } - - public static List pullMeasurementSchema( - AsyncDataClient client, PullSchemaRequest pullSchemaRequest) - throws TException, InterruptedException { - AtomicReference> measurementSchemas = new AtomicReference<>(); - - client.pullMeasurementSchema( - pullSchemaRequest, - new PullMeasurementSchemaHandler( - client.getNode(), pullSchemaRequest.getPrefixPaths(), measurementSchemas)); - synchronized (measurementSchemas) { - if (measurementSchemas.get() == null) { - measurementSchemas.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return measurementSchemas.get(); - } - - public static List pullTimeseriesSchema( - AsyncDataClient client, PullSchemaRequest pullSchemaRequest) - throws TException, InterruptedException { - AtomicReference> timeseriesSchemas = new AtomicReference<>(); - client.pullTimeSeriesSchema( - pullSchemaRequest, - new PullTimeseriesSchemaHandler( - client.getNode(), pullSchemaRequest.getPrefixPaths(), timeseriesSchemas)); - - synchronized (timeseriesSchemas) { - if (timeseriesSchemas.get() == null) { - timeseriesSchemas.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return timeseriesSchemas.get(); - } - - public static List getAggrResult(AsyncDataClient client, GetAggrResultRequest request) - throws TException, InterruptedException { - AtomicReference> resultReference = new AtomicReference<>(); - GenericHandler> handler = - new GenericHandler<>(client.getNode(), resultReference); - - client.getAggrResult(request, handler); - synchronized (resultReference) { - if (resultReference.get() == null) { - resultReference.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - if (handler.getException() != null) { - throw new TException(handler.getException()); - } - return resultReference.get(); - } - - public static List getUnregisteredMeasurements( - AsyncDataClient client, RaftNode header, List seriesPaths) - throws TException, InterruptedException { - AtomicReference> remoteResult = new AtomicReference<>(); - GenericHandler> handler = new GenericHandler<>(client.getNode(), remoteResult); - - client.getUnregisteredTimeseries(header, seriesPaths, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static GetAllPathsResult getAllPaths( - AsyncDataClient client, RaftNode header, List pathsToQuery, boolean withAlias) - throws InterruptedException, TException { - AtomicReference remoteResult = new AtomicReference<>(); - GenericHandler handler = - new GenericHandler<>(client.getNode(), remoteResult); - - client.getAllPaths(header, pathsToQuery, withAlias, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static Integer getPathCount( - AsyncDataClient client, RaftNode header, List pathsToQuery, int level) - throws InterruptedException, TException { - AtomicReference remoteResult = new AtomicReference<>(null); - GenericHandler handler = new GenericHandler<>(client.getNode(), remoteResult); - - client.getPathCount(header, pathsToQuery, level, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static Integer getDeviceCount( - AsyncDataClient client, RaftNode header, List pathsToQuery) - throws InterruptedException, TException { - AtomicReference remoteResult = new AtomicReference<>(null); - GenericHandler handler = new GenericHandler<>(client.getNode(), remoteResult); - - client.getDeviceCount(header, pathsToQuery, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static Set getAllDevices( - AsyncDataClient client, RaftNode header, List pathsToQuery, boolean isPrefixMatch) - throws InterruptedException, TException { - AtomicReference> remoteResult = new AtomicReference<>(); - GenericHandler> handler = new GenericHandler<>(client.getNode(), remoteResult); - - client.getAllDevices(header, pathsToQuery, isPrefixMatch, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static ByteBuffer getDevices(AsyncDataClient client, RaftNode header, ShowDevicesPlan plan) - throws InterruptedException, TException, IOException { - GetDevicesHandler handler = new GetDevicesHandler(); - AtomicReference response = new AtomicReference<>(null); - handler.setResponse(response); - handler.setContact(client.getNode()); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - plan.serialize(dataOutputStream); - - client.getDevices(header, ByteBuffer.wrap(byteArrayOutputStream.toByteArray()), handler); - synchronized (handler) { - if (response.get() == null) { - response.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return response.get(); - } - - public static Long getGroupByExecutor(AsyncDataClient client, GroupByRequest request) - throws TException, InterruptedException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - - client.getGroupByExecutor(request, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static ByteBuffer previousFill(AsyncDataClient client, PreviousFillRequest request) - throws TException, InterruptedException { - AtomicReference resultRef = new AtomicReference<>(); - GenericHandler nodeHandler = new GenericHandler<>(client.getNode(), resultRef); - - client.previousFill(request, nodeHandler); - return nodeHandler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static TSStatus executeNonQuery( - AsyncClient client, PhysicalPlan plan, RaftNode header, Node receiver) - throws IOException, TException, InterruptedException { - AtomicReference status = new AtomicReference<>(); - ExecutNonQueryReq req = new ExecutNonQueryReq(); - req.planBytes = ByteBuffer.wrap(PlanSerializer.getInstance().serialize(plan)); - if (header != null) { - req.setHeader(header); - } - - client.executeNonQueryPlan(req, new ForwardPlanHandler(status, plan, receiver)); - synchronized (status) { - if (status.get() == null) { - status.wait(ClusterConstant.getWriteOperationTimeoutMS()); - } - } - return status.get(); - } - - public static ByteBuffer readFile( - AsyncDataClient client, String remotePath, long offset, int fetchSize) - throws InterruptedException, TException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - - client.readFile(remotePath, offset, fetchSize, handler); - return handler.getResult(ClusterConstant.getWriteOperationTimeoutMS()); - } - - public static List getGroupByResult( - AsyncDataClient client, RaftNode header, long executorId, long curStartTime, long curEndTime) - throws InterruptedException, TException { - AtomicReference> fetchResult = new AtomicReference<>(); - GenericHandler> handler = new GenericHandler<>(client.getNode(), fetchResult); - - client.getGroupByResult(header, executorId, curStartTime, curEndTime, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static ByteBuffer peekNextNotNullValue( - AsyncDataClient client, RaftNode header, long executorId, long curStartTime, long curEndTime) - throws InterruptedException, TException { - AtomicReference fetchResult = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), fetchResult); - - client.peekNextNotNullValue(header, executorId, curStartTime, curEndTime, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static Map pullSnapshot( - AsyncDataClient client, - PullSnapshotRequest request, - List slots, - SnapshotFactory factory) - throws TException, InterruptedException { - AtomicReference> snapshotRef = new AtomicReference<>(); - - client.pullSnapshot( - request, new PullSnapshotHandler<>(snapshotRef, client.getNode(), slots, factory)); - synchronized (snapshotRef) { - if (snapshotRef.get() == null) { - snapshotRef.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return snapshotRef.get(); - } - - public static ByteBuffer last( - AsyncDataClient client, - List seriesPaths, - List dataTypeOrdinals, - Filter timeFilter, - QueryContext context, - Map> deviceMeasurements, - RaftNode header) - throws TException, InterruptedException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - LastQueryRequest request = - new LastQueryRequest( - PartialPath.toStringList(seriesPaths), - dataTypeOrdinals, - context.getQueryId(), - deviceMeasurements, - header, - client.getNode()); - if (timeFilter != null) { - request.setFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - client.last(request, handler); - return handler.getResult(ClusterConstant.getReadOperationTimeoutMS()); - } - - public static boolean onSnapshotApplied( - AsyncDataClient client, RaftNode header, List slots) - throws TException, InterruptedException { - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - - client.onSnapshotApplied(header, slots, handler); - return handler.getResult(ClusterConstant.getWriteOperationTimeoutMS()); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java deleted file mode 100644 index 75aa6a096bbe..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncDataClient.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.BaseFactory; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.rpc.RpcTransportFactory; -import org.apache.iotdb.rpc.TConfigurationConst; -import org.apache.iotdb.rpc.TimeoutChangeableTransport; - -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransportException; - -import java.net.SocketException; - -/** - * Notice: Because a client will be returned to a pool immediately after a successful request, you - * should not cache it anywhere else. - */ -// TODO: Refine the interfaces of TSDataService. TSDataService interfaces doesn't need extends -// RaftService interfaces. -public class SyncDataClient extends TSDataService.Client { - - private Node node; - private ClientCategory category; - private IClientManager clientManager; - - @TestOnly - public SyncDataClient(TProtocol prot) { - super(prot); - } - - public SyncDataClient(TProtocolFactory protocolFactory, Node node, ClientCategory category) - throws TTransportException { - - // the difference of the two clients lies in the port - super( - protocolFactory.getProtocol( - RpcTransportFactory.INSTANCE.getTransport( - new TSocket( - TConfigurationConst.defaultTConfiguration, - node.getInternalIp(), - ClientUtils.getPort(node, category), - ClusterConstant.getConnectionTimeoutInMS())))); - this.node = node; - this.category = category; - getInputProtocol().getTransport().open(); - } - - public SyncDataClient( - TProtocolFactory protocolFactory, Node node, ClientCategory category, IClientManager manager) - throws TTransportException { - this(protocolFactory, node, category); - this.clientManager = manager; - } - - public void returnSelf() { - if (clientManager != null) { - clientManager.returnSyncClient(this, node, category); - } - } - - public void setTimeout(int timeout) { - // the same transport is used in both input and output - ((TimeoutChangeableTransport) (getInputProtocol().getTransport())).setTimeout(timeout); - } - - public void close() { - getInputProtocol().getTransport().close(); - } - - @TestOnly - public int getTimeout() throws SocketException { - return ((TimeoutChangeableTransport) getInputProtocol().getTransport()).getTimeOut(); - } - - @Override - public String toString() { - return "Sync" - + category.getName() - + "{" - + "node=" - + node - + "," - + "port=" - + ClientUtils.getPort(node, category) - + '}'; - } - - public Node getNode() { - return node; - } - - public static class SyncDataClientFactory extends BaseFactory { - - public SyncDataClientFactory(TProtocolFactory protocolFactory, ClientCategory category) { - super(protocolFactory, category); - } - - public SyncDataClientFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - super(protocolFactory, category, clientManager); - } - - @Override - public void activateObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - } - - @Override - public void destroyObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().close(); - } - - @Override - public PooledObject makeObject(Node node) throws Exception { - return new DefaultPooledObject<>( - new SyncDataClient(protocolFactory, node, category, clientPoolManager)); - } - - @Override - public boolean validateObject(Node node, PooledObject pooledObject) { - return pooledObject.getObject() != null - && pooledObject.getObject().getInputProtocol().getTransport().isOpen(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java b/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java deleted file mode 100644 index 2d2481cb2bc0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/client/sync/SyncMetaClient.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.BaseFactory; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.rpc.RpcTransportFactory; -import org.apache.iotdb.rpc.TConfigurationConst; -import org.apache.iotdb.rpc.TimeoutChangeableTransport; - -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransportException; - -import java.net.SocketException; - -/** - * Notice: Because a client will be returned to a pool immediately after a successful request, you - * should not cache it anywhere else. - */ -public class SyncMetaClient extends TSMetaService.Client { - - private Node node; - private ClientCategory category; - private IClientManager clientManager; - - public SyncMetaClient(TProtocolFactory protocolFactory, Node node, ClientCategory category) - throws TTransportException { - super( - protocolFactory.getProtocol( - RpcTransportFactory.INSTANCE.getTransport( - new TSocket( - TConfigurationConst.defaultTConfiguration, - node.getInternalIp(), - ClientUtils.getPort(node, category), - ClusterConstant.getConnectionTimeoutInMS())))); - this.node = node; - this.category = category; - getInputProtocol().getTransport().open(); - } - - public SyncMetaClient( - TProtocolFactory protocolFactory, Node node, ClientCategory category, IClientManager manager) - throws TTransportException { - this(protocolFactory, node, category); - this.clientManager = manager; - } - - public void returnSelf() { - if (clientManager != null) { - clientManager.returnSyncClient(this, node, category); - } - } - - public void setTimeout(int timeout) { - // the same transport is used in both input and output - ((TimeoutChangeableTransport) (getInputProtocol().getTransport())).setTimeout(timeout); - } - - @TestOnly - public int getTimeout() throws SocketException { - return ((TimeoutChangeableTransport) getInputProtocol().getTransport()).getTimeOut(); - } - - public void close() { - getInputProtocol().getTransport().close(); - } - - public Node getNode() { - return node; - } - - @Override - public String toString() { - return "Sync" - + category.getName() - + "{" - + "node=" - + node - + "," - + "port=" - + ClientUtils.getPort(node, category) - + '}'; - } - - public static class SyncMetaClientFactory extends BaseFactory { - - public SyncMetaClientFactory(TProtocolFactory protocolFactory, ClientCategory category) { - super(protocolFactory, category); - } - - public SyncMetaClientFactory( - TProtocolFactory protocolFactory, ClientCategory category, IClientManager clientManager) { - super(protocolFactory, category, clientManager); - } - - @Override - public void activateObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - } - - @Override - public void destroyObject(Node node, PooledObject pooledObject) { - pooledObject.getObject().close(); - } - - @Override - public PooledObject makeObject(Node node) throws Exception { - return new DefaultPooledObject<>( - new SyncMetaClient(protocolFactory, node, category, clientPoolManager)); - } - - @Override - public boolean validateObject(Node node, PooledObject pooledObject) { - return pooledObject.getObject() != null - && pooledObject.getObject().getInputProtocol().getTransport().isOpen(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java deleted file mode 100644 index 3e08e067ba73..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConfig.java +++ /dev/null @@ -1,584 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.config; - -import org.apache.iotdb.cluster.utils.ClusterConsistent; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.TimeUnit; - -public class ClusterConfig { - private static Logger logger = LoggerFactory.getLogger(ClusterConfig.class); - static final String CONFIG_NAME = "iotdb-cluster.properties"; - - private String internalIp; - private int internalMetaPort = 9003; - private int internalDataPort = 40010; - private int clusterRpcPort = IoTDBDescriptor.getInstance().getConfig().getRpcPort(); - private int clusterInfoRpcPort = 6567; - - /** each one is a {internalIp | domain name}:{meta port} string tuple. */ - private List seedNodeUrls; - - @ClusterConsistent private boolean isRpcThriftCompressionEnabled = false; - - @ClusterConsistent private int replicationNum = 1; - - @ClusterConsistent private int multiRaftFactor = 1; - - @ClusterConsistent private String clusterName = "default"; - - @ClusterConsistent private boolean useAsyncServer = false; - - private boolean useAsyncApplier = true; - - private int connectionTimeoutInMS = (int) TimeUnit.SECONDS.toMillis(20); - - private long heartbeatIntervalMs = TimeUnit.SECONDS.toMillis(1); - - private long electionTimeoutMs = TimeUnit.SECONDS.toMillis(20); - - private int readOperationTimeoutMS = (int) TimeUnit.SECONDS.toMillis(30); - - private int writeOperationTimeoutMS = (int) TimeUnit.SECONDS.toMillis(30); - - private int catchUpTimeoutMS = (int) TimeUnit.SECONDS.toMillis(300); - - private boolean useBatchInLogCatchUp = true; - - /** max number of committed logs to be saved */ - private int minNumOfLogsInMem = 1000; - - /** max number of committed logs in memory */ - private int maxNumOfLogsInMem = 2000; - - /** max memory size of committed logs in memory, default 512M */ - private long maxMemorySizeForRaftLog = 536870912; - - /** Ratio of write memory allocated for raft log */ - private double RaftLogMemoryProportion = 0.2; - - /** deletion check period of the submitted log */ - private int logDeleteCheckIntervalSecond = -1; - - /** max number of clients in a ClientPool of a member for one node. */ - private int maxClientPerNodePerMember = 1000; - - /** max number of idle clients in a ClientPool of a member for one node. */ - private int maxIdleClientPerNodePerMember = 500; - - /** - * If the number of connections created for a node exceeds `max_client_pernode_permember_number`, - * we need to wait so much time for other connections to be released until timeout, or a new - * connection will be created. - */ - private long waitClientTimeoutMS = 5 * 1000L; - - /** - * ClientPool will have so many selector threads (TAsyncClientManager) to distribute to its - * clients. - */ - private int selectorNumOfClientPool = - Runtime.getRuntime().availableProcessors() / 3 > 0 - ? Runtime.getRuntime().availableProcessors() / 3 - : 1; - - /** - * Whether creating schema automatically is enabled, this will replace the one in - * iotdb-engine.properties - */ - private boolean enableAutoCreateSchema = true; - - private boolean enableRaftLogPersistence = true; - - private int flushRaftLogThreshold = 10000; - - /** - * Size of log buffer. If raft log persistence is enabled and the size of a insert plan is smaller - * than this parameter, then the insert plan will be rejected by WAL. - */ - private int raftLogBufferSize = 16 * 1024 * 1024; - - /** - * consistency level, now three consistency levels are supported: strong, mid and weak. Strong - * consistency means the server will first try to synchronize with the leader to get the newest - * meta data, if failed(timeout), directly report an error to the user; While mid consistency - * means the server will first try to synchronize with the leader, but if failed(timeout), it will - * give up and just use current data it has cached before; Weak consistency do not synchronize - * with the leader and simply use the local data - */ - private ConsistencyLevel consistencyLevel = ConsistencyLevel.MID_CONSISTENCY; - - private long joinClusterTimeOutMs = TimeUnit.SECONDS.toMillis(5); - - private int pullSnapshotRetryIntervalMs = (int) TimeUnit.SECONDS.toMillis(5); - - /** - * The maximum value of the raft log index stored in the memory per raft group, These indexes are - * used to index the location of the log on the disk - */ - private int maxRaftLogIndexSizeInMemory = 10000; - - /** - * If leader finds too many uncommitted raft logs, raft group leader will wait for a short period - * of time, and then append the raft log - */ - private int UnCommittedRaftLogNumForRejectThreshold = 500; - - /** - * If followers find too many committed raft logs have not been applied, followers will reject the - * raft log sent by leader - */ - private int UnAppliedRaftLogNumForRejectThreshold = 500; - - /** - * The maximum size of the raft log saved on disk for each file (in bytes) of each raft group. The - * default size is 1GB - */ - private int maxRaftLogPersistDataSizePerFile = 1073741824; - - /** - * The maximum number of persistent raft log files on disk per raft group, So each raft group's - * log takes up disk space approximately equals max_raft_log_persist_data_size_per_file * - * max_number_of_persist_raft_log_files - */ - private int maxNumberOfPersistRaftLogFiles = 5; - - /** The maximum number of logs saved on the disk */ - private int maxPersistRaftLogNumberOnDisk = 1_000_000; - - private boolean enableUsePersistLogOnDiskToCatchUp = true; - - /** - * The number of logs read on the disk at one time, which is mainly used to control the memory - * usage.This value multiplied by the log size is about the amount of memory used to read logs - * from the disk at one time. - */ - private int maxNumberOfLogsPerFetchOnDisk = 1000; - - /** - * When set to true, if the log queue of a follower fills up, LogDispatcher will wait for a while - * until the queue becomes available, otherwise LogDispatcher will just ignore that slow node. - */ - private boolean waitForSlowNode = true; - - /** - * When consistency level is set to mid, query will fail if the log lag exceeds max_read_log_lag. - */ - private long maxReadLogLag = 1000L; - - /** - * When a follower tries to sync log with the leader, sync will fail if the log Lag exceeds - * maxSyncLogLag. - */ - private long maxSyncLogLag = 100000L; - - private boolean openServerRpcPort = false; - - /** - * create a clusterConfig class. The internalIP will be set according to the server's hostname. If - * there is something error for getting the ip of the hostname, then set the internalIp as - * localhost. - */ - public ClusterConfig() { - try { - internalIp = InetAddress.getLocalHost().getHostAddress(); - } catch (UnknownHostException e) { - logger.error(e.getMessage()); - internalIp = "127.0.0.1"; - } - seedNodeUrls = Arrays.asList(String.format("%s:%d", internalIp, internalMetaPort)); - } - - public int getSelectorNumOfClientPool() { - return selectorNumOfClientPool; - } - - public int getMaxClientPerNodePerMember() { - return maxClientPerNodePerMember; - } - - public void setMaxClientPerNodePerMember(int maxClientPerNodePerMember) { - this.maxClientPerNodePerMember = maxClientPerNodePerMember; - } - - public int getMaxIdleClientPerNodePerMember() { - return maxIdleClientPerNodePerMember; - } - - public void setMaxIdleClientPerNodePerMember(int maxIdleClientPerNodePerMember) { - this.maxIdleClientPerNodePerMember = maxIdleClientPerNodePerMember; - } - - public boolean isUseBatchInLogCatchUp() { - return useBatchInLogCatchUp; - } - - public void setUseBatchInLogCatchUp(boolean useBatchInLogCatchUp) { - this.useBatchInLogCatchUp = useBatchInLogCatchUp; - } - - public int getInternalMetaPort() { - return internalMetaPort; - } - - public void setInternalMetaPort(int internalMetaPort) { - this.internalMetaPort = internalMetaPort; - } - - public boolean isRpcThriftCompressionEnabled() { - return isRpcThriftCompressionEnabled; - } - - void setRpcThriftCompressionEnabled(boolean rpcThriftCompressionEnabled) { - isRpcThriftCompressionEnabled = rpcThriftCompressionEnabled; - } - - public List getSeedNodeUrls() { - return seedNodeUrls; - } - - public void setSeedNodeUrls(List seedNodeUrls) { - this.seedNodeUrls = seedNodeUrls; - } - - public int getReplicationNum() { - return replicationNum; - } - - public void setReplicationNum(int replicationNum) { - this.replicationNum = replicationNum; - } - - public int getMultiRaftFactor() { - return multiRaftFactor; - } - - public void setMultiRaftFactor(int multiRaftFactor) { - this.multiRaftFactor = multiRaftFactor; - } - - void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getClusterName() { - return clusterName; - } - - public int getInternalDataPort() { - return internalDataPort; - } - - public void setInternalDataPort(int internalDataPort) { - this.internalDataPort = internalDataPort; - } - - public int getClusterRpcPort() { - return clusterRpcPort; - } - - public void setClusterRpcPort(int clusterRpcPort) { - this.clusterRpcPort = clusterRpcPort; - } - - public int getConnectionTimeoutInMS() { - return connectionTimeoutInMS; - } - - void setConnectionTimeoutInMS(int connectionTimeoutInMS) { - this.connectionTimeoutInMS = connectionTimeoutInMS; - } - - public int getCatchUpTimeoutMS() { - return catchUpTimeoutMS; - } - - public void setCatchUpTimeoutMS(int catchUpTimeoutMS) { - this.catchUpTimeoutMS = catchUpTimeoutMS; - } - - public int getReadOperationTimeoutMS() { - return readOperationTimeoutMS; - } - - void setReadOperationTimeoutMS(int readOperationTimeoutMS) { - this.readOperationTimeoutMS = readOperationTimeoutMS; - } - - public int getWriteOperationTimeoutMS() { - return writeOperationTimeoutMS; - } - - public void setWriteOperationTimeoutMS(int writeOperationTimeoutMS) { - this.writeOperationTimeoutMS = writeOperationTimeoutMS; - } - - public int getMinNumOfLogsInMem() { - return minNumOfLogsInMem; - } - - public void setMinNumOfLogsInMem(int minNumOfLogsInMem) { - this.minNumOfLogsInMem = minNumOfLogsInMem; - } - - public int getLogDeleteCheckIntervalSecond() { - return logDeleteCheckIntervalSecond; - } - - void setLogDeleteCheckIntervalSecond(int logDeleteCheckIntervalSecond) { - this.logDeleteCheckIntervalSecond = logDeleteCheckIntervalSecond; - } - - public ConsistencyLevel getConsistencyLevel() { - return consistencyLevel; - } - - public void setConsistencyLevel(ConsistencyLevel consistencyLevel) { - this.consistencyLevel = consistencyLevel; - } - - public boolean isEnableAutoCreateSchema() { - return enableAutoCreateSchema; - } - - public void setEnableAutoCreateSchema(boolean enableAutoCreateSchema) { - this.enableAutoCreateSchema = enableAutoCreateSchema; - } - - public boolean isUseAsyncServer() { - return useAsyncServer; - } - - public void setUseAsyncServer(boolean useAsyncServer) { - this.useAsyncServer = useAsyncServer; - } - - public boolean isEnableRaftLogPersistence() { - return enableRaftLogPersistence; - } - - public void setEnableRaftLogPersistence(boolean enableRaftLogPersistence) { - this.enableRaftLogPersistence = enableRaftLogPersistence; - } - - public boolean isUseAsyncApplier() { - return useAsyncApplier; - } - - public void setUseAsyncApplier(boolean useAsyncApplier) { - this.useAsyncApplier = useAsyncApplier; - } - - public int getMaxNumOfLogsInMem() { - return maxNumOfLogsInMem; - } - - public void setMaxNumOfLogsInMem(int maxNumOfLogsInMem) { - this.maxNumOfLogsInMem = maxNumOfLogsInMem; - } - - public int getUnCommittedRaftLogNumForRejectThreshold() { - return UnCommittedRaftLogNumForRejectThreshold; - } - - public void setUnCommittedRaftLogNumForRejectThreshold( - int unCommittedRaftLogNumForRejectThreshold) { - UnCommittedRaftLogNumForRejectThreshold = unCommittedRaftLogNumForRejectThreshold; - } - - public int getUnAppliedRaftLogNumForRejectThreshold() { - return UnAppliedRaftLogNumForRejectThreshold; - } - - public void setUnAppliedRaftLogNumForRejectThreshold(int unAppliedRaftLogNumForRejectThreshold) { - UnAppliedRaftLogNumForRejectThreshold = unAppliedRaftLogNumForRejectThreshold; - } - - public int getRaftLogBufferSize() { - return raftLogBufferSize; - } - - public void setRaftLogBufferSize(int raftLogBufferSize) { - this.raftLogBufferSize = raftLogBufferSize; - } - - public int getFlushRaftLogThreshold() { - return flushRaftLogThreshold; - } - - void setFlushRaftLogThreshold(int flushRaftLogThreshold) { - this.flushRaftLogThreshold = flushRaftLogThreshold; - } - - public long getJoinClusterTimeOutMs() { - return joinClusterTimeOutMs; - } - - public void setJoinClusterTimeOutMs(long joinClusterTimeOutMs) { - this.joinClusterTimeOutMs = joinClusterTimeOutMs; - } - - public int getPullSnapshotRetryIntervalMs() { - return pullSnapshotRetryIntervalMs; - } - - public void setPullSnapshotRetryIntervalMs(int pullSnapshotRetryIntervalMs) { - this.pullSnapshotRetryIntervalMs = pullSnapshotRetryIntervalMs; - } - - public int getMaxRaftLogIndexSizeInMemory() { - return maxRaftLogIndexSizeInMemory; - } - - public void setMaxRaftLogIndexSizeInMemory(int maxRaftLogIndexSizeInMemory) { - this.maxRaftLogIndexSizeInMemory = maxRaftLogIndexSizeInMemory; - } - - public long getMaxMemorySizeForRaftLog() { - return maxMemorySizeForRaftLog; - } - - public void setMaxMemorySizeForRaftLog(long maxMemorySizeForRaftLog) { - this.maxMemorySizeForRaftLog = maxMemorySizeForRaftLog; - } - - public double getRaftLogMemoryProportion() { - return RaftLogMemoryProportion; - } - - public void setRaftLogMemoryProportion(double raftLogMemoryProportion) { - RaftLogMemoryProportion = raftLogMemoryProportion; - } - - public int getMaxRaftLogPersistDataSizePerFile() { - return maxRaftLogPersistDataSizePerFile; - } - - public void setMaxRaftLogPersistDataSizePerFile(int maxRaftLogPersistDataSizePerFile) { - this.maxRaftLogPersistDataSizePerFile = maxRaftLogPersistDataSizePerFile; - } - - public int getMaxNumberOfPersistRaftLogFiles() { - return maxNumberOfPersistRaftLogFiles; - } - - public void setMaxNumberOfPersistRaftLogFiles(int maxNumberOfPersistRaftLogFiles) { - this.maxNumberOfPersistRaftLogFiles = maxNumberOfPersistRaftLogFiles; - } - - public int getMaxPersistRaftLogNumberOnDisk() { - return maxPersistRaftLogNumberOnDisk; - } - - public void setMaxPersistRaftLogNumberOnDisk(int maxPersistRaftLogNumberOnDisk) { - this.maxPersistRaftLogNumberOnDisk = maxPersistRaftLogNumberOnDisk; - } - - public boolean isEnableUsePersistLogOnDiskToCatchUp() { - return enableUsePersistLogOnDiskToCatchUp; - } - - public void setEnableUsePersistLogOnDiskToCatchUp(boolean enableUsePersistLogOnDiskToCatchUp) { - this.enableUsePersistLogOnDiskToCatchUp = enableUsePersistLogOnDiskToCatchUp; - } - - public int getMaxNumberOfLogsPerFetchOnDisk() { - return maxNumberOfLogsPerFetchOnDisk; - } - - public void setMaxNumberOfLogsPerFetchOnDisk(int maxNumberOfLogsPerFetchOnDisk) { - this.maxNumberOfLogsPerFetchOnDisk = maxNumberOfLogsPerFetchOnDisk; - } - - public boolean isWaitForSlowNode() { - return waitForSlowNode; - } - - public long getMaxReadLogLag() { - return maxReadLogLag; - } - - public void setMaxReadLogLag(long maxReadLogLag) { - this.maxReadLogLag = maxReadLogLag; - } - - public long getMaxSyncLogLag() { - return maxSyncLogLag; - } - - public void setMaxSyncLogLag(long maxSyncLogLag) { - this.maxSyncLogLag = maxSyncLogLag; - } - - public String getInternalIp() { - return internalIp; - } - - public void setInternalIp(String internalIp) { - this.internalIp = internalIp; - } - - public boolean isOpenServerRpcPort() { - return openServerRpcPort; - } - - public void setOpenServerRpcPort(boolean openServerRpcPort) { - this.openServerRpcPort = openServerRpcPort; - } - - public long getWaitClientTimeoutMS() { - return waitClientTimeoutMS; - } - - public void setWaitClientTimeoutMS(long waitClientTimeoutMS) { - this.waitClientTimeoutMS = waitClientTimeoutMS; - } - - public long getHeartbeatIntervalMs() { - return heartbeatIntervalMs; - } - - public void setHeartbeatIntervalMs(long heartbeatIntervalMs) { - this.heartbeatIntervalMs = heartbeatIntervalMs; - } - - public long getElectionTimeoutMs() { - return electionTimeoutMs; - } - - public void setElectionTimeoutMs(long electionTimeoutMs) { - this.electionTimeoutMs = electionTimeoutMs; - } - - public int getClusterInfoRpcPort() { - return clusterInfoRpcPort; - } - - public void setClusterInfoRpcPort(int clusterInfoRpcPort) { - this.clusterInfoRpcPort = clusterInfoRpcPort; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java deleted file mode 100644 index 529f44fe5f07..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterConstant.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.config; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.commons.utils.TestOnly; - -public class ClusterConstant { - - /** - * We only change the value in tests to reduce test time, so they are essentially constant. A - * failed election will restart in [0, max(heartbeatInterval, 50ms)). If this range is too small, - * a stale node may frequently issue elections and thus makes the leader step down. - */ - private static long electionMaxWaitMs = - Math.max(ClusterDescriptor.getInstance().getConfig().getHeartbeatIntervalMs(), 50L); - - // Heartbeat client connection timeout should not be larger than heartbeat interval, otherwise - // the thread pool of sending heartbeats or requesting votes may be used up by waiting for - // establishing connection with some slow or dead nodes. - private static final int heartbeatClientConnTimeoutMs = - Math.min( - (int) ClusterConstant.getHeartbeatIntervalMs(), - ClusterConstant.getConnectionTimeoutInMS()); - - public static final int SLOT_NUM = 10000; - public static final int HASH_SALT = 2333; - public static final int CHECK_ALIVE_TIME_OUT_MS = 1000; - - public static final int LOG_NUM_IN_BATCH = 100; - - public static final int RETRY_WAIT_TIME_MS = 10; - - public static final int THREAD_POLL_WAIT_TERMINATION_TIME_S = 10; - - /** - * every "REPORT_INTERVAL_SEC" seconds, a reporter thread will print the status of all raft - * members in this node. - */ - public static final int REPORT_INTERVAL_SEC = 10; - - /** - * during snapshot, hardlinks of data files are created to for downloading. hardlinks will be - * checked every hour by default to see if they have expired, and will be cleaned if so. - */ - public static final long CLEAN_HARDLINK_INTERVAL_SEC = 3600L; - - public static final Node EMPTY_NODE = new Node(); - - private ClusterConstant() { - // constant class - } - - public static long getElectionMaxWaitMs() { - return electionMaxWaitMs; - } - - @TestOnly - public static void setElectionMaxWaitMs(long electionMaxWaitMs) { - ClusterConstant.electionMaxWaitMs = electionMaxWaitMs; - } - - private static int connectionTimeoutInMS = - ClusterDescriptor.getInstance().getConfig().getConnectionTimeoutInMS(); - private static int readOperationTimeoutMS = - ClusterDescriptor.getInstance().getConfig().getReadOperationTimeoutMS(); - private static int writeOperationTimeoutMS = - ClusterDescriptor.getInstance().getConfig().getWriteOperationTimeoutMS(); - private static int syncLeaderMaxWaitMs = 20 * 1000; - private static long heartbeatIntervalMs = - ClusterDescriptor.getInstance().getConfig().getHeartbeatIntervalMs(); - private static long electionTimeoutMs = - ClusterDescriptor.getInstance().getConfig().getElectionTimeoutMs(); - - public static int getConnectionTimeoutInMS() { - return connectionTimeoutInMS; - } - - public static void setConnectionTimeoutInMS(int connectionTimeoutInMS) { - ClusterConstant.connectionTimeoutInMS = connectionTimeoutInMS; - } - - public static int getReadOperationTimeoutMS() { - return readOperationTimeoutMS; - } - - public static int getWriteOperationTimeoutMS() { - return writeOperationTimeoutMS; - } - - public static int getSyncLeaderMaxWaitMs() { - return syncLeaderMaxWaitMs; - } - - public static void setSyncLeaderMaxWaitMs(int syncLeaderMaxWaitMs) { - ClusterConstant.syncLeaderMaxWaitMs = syncLeaderMaxWaitMs; - } - - public static long getHeartbeatIntervalMs() { - return heartbeatIntervalMs; - } - - public static void setHeartbeatIntervalMs(long heartBeatIntervalMs) { - ClusterConstant.heartbeatIntervalMs = heartBeatIntervalMs; - } - - public static long getElectionTimeoutMs() { - return electionTimeoutMs; - } - - public static void setElectionTimeoutMs(long electionTimeoutMs) { - ClusterConstant.electionTimeoutMs = electionTimeoutMs; - } - - public static int getHeartbeatClientConnTimeoutMs() { - return heartbeatClientConnTimeoutMs; - } - - @TestOnly - public static void setReadOperationTimeoutMS(int readOperationTimeoutMS) { - ClusterConstant.readOperationTimeoutMS = readOperationTimeoutMS; - } - - @TestOnly - public static void setWriteOperationTimeoutMS(int writeOperationTimeoutMS) { - ClusterConstant.writeOperationTimeoutMS = writeOperationTimeoutMS; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java deleted file mode 100644 index f556998bc290..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ClusterDescriptor.java +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.config; - -import org.apache.iotdb.cluster.exception.BadSeedUrlFormatException; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.query.QueryProcessException; - -import com.google.common.net.InetAddresses; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Properties; - -public class ClusterDescriptor { - - private static final Logger logger = LoggerFactory.getLogger(ClusterDescriptor.class); - private static final ClusterDescriptor INSTANCE = new ClusterDescriptor(); - private final ClusterConfig config = new ClusterConfig(); - - private ClusterDescriptor() { - // load settings from cluster's file. - // so, iotdb-cluster.properties can overwrite iotdb-engine.properties. - loadProps(); - - // if open the server rpc port, we will enable the rpc service and change the server's rpc port - // to rpc_port + 1 - if (config.isOpenServerRpcPort()) { - IoTDBDescriptor.getInstance().getConfig().setEnableRpcService(true); - IoTDBDescriptor.getInstance() - .getConfig() - .setRpcPort(IoTDBDescriptor.getInstance().getConfig().getRpcPort() + 1); - } else { - IoTDBDescriptor.getInstance().getConfig().setEnableRpcService(false); - } - } - - public ClusterConfig getConfig() { - return config; - } - - public static ClusterDescriptor getInstance() { - return INSTANCE; - } - - public String getPropsUrl() { - String url = System.getProperty(IoTDBConstant.IOTDB_CONF, null); - if (url == null) { - url = System.getProperty(IoTDBConstant.IOTDB_HOME, null); - if (url != null) { - url = url + File.separatorChar + "conf" + File.separatorChar + ClusterConfig.CONFIG_NAME; - } else { - logger.warn( - "Cannot find IOTDB_HOME or IOTDB_CONF environment variable when loading " - + "config file {}, use default configuration", - ClusterConfig.CONFIG_NAME); - // update all data seriesPath - return null; - } - } else { - url += (File.separatorChar + ClusterConfig.CONFIG_NAME); - } - return url; - } - - public void replaceHostnameWithIp() throws UnknownHostException, BadSeedUrlFormatException { - boolean isInvalidClusterInternalIp = InetAddresses.isInetAddress(config.getInternalIp()); - if (!isInvalidClusterInternalIp) { - config.setInternalIp(hostnameToIP(config.getInternalIp())); - } - List newSeedUrls = new ArrayList<>(); - for (String seedUrl : config.getSeedNodeUrls()) { - String[] splits = seedUrl.split(":"); - if (splits.length != 2) { - throw new BadSeedUrlFormatException(seedUrl); - } - String seedIP = splits[0]; - boolean isInvalidSeedIp = InetAddresses.isInetAddress(seedIP); - if (!isInvalidSeedIp) { - String newSeedIP = hostnameToIP(seedIP); - newSeedUrls.add(newSeedIP + ":" + splits[1]); - } else { - newSeedUrls.add(seedUrl); - } - } - config.setSeedNodeUrls(newSeedUrls); - logger.debug( - "after replace, the rpcIP={}, internalIP={}, seedUrls={}", - IoTDBDescriptor.getInstance().getConfig().getRpcAddress(), - config.getInternalIp(), - config.getSeedNodeUrls()); - } - - /** load an property file and set TsfileDBConfig variables. */ - private void loadProps() { - String url = getPropsUrl(); - Properties properties = System.getProperties(); - if (url != null) { - try (InputStream inputStream = new FileInputStream(new File(url))) { - logger.info("Start to read config file {}", url); - properties.load(inputStream); - } catch (IOException e) { - logger.warn("Fail to find config file {}", url, e); - } - } - config.setInternalIp(properties.getProperty("internal_ip", config.getInternalIp())); - - config.setInternalMetaPort( - Integer.parseInt( - properties.getProperty( - "internal_meta_port", String.valueOf(config.getInternalMetaPort())))); - - config.setInternalDataPort( - Integer.parseInt( - properties.getProperty( - "internal_data_port", Integer.toString(config.getInternalDataPort())))); - - // rpc port and rpc address are defined in iotdb-engine.properties. - // To avoid inconsistency, we do not read "rpc_port" in iotdb-cluster.properties - // even users claim the property. - - config.setClusterInfoRpcPort( - Integer.parseInt( - properties.getProperty( - "cluster_info_public_port", Integer.toString(config.getClusterInfoRpcPort())))); - - config.setMultiRaftFactor( - Integer.parseInt( - properties.getProperty( - "multi_raft_factor", String.valueOf(config.getMultiRaftFactor())))); - - config.setReplicationNum( - Integer.parseInt( - properties.getProperty( - "default_replica_num", String.valueOf(config.getReplicationNum())))); - - config.setClusterName(properties.getProperty("cluster_name", config.getClusterName())); - - config.setRpcThriftCompressionEnabled( - Boolean.parseBoolean( - properties.getProperty( - "rpc_thrift_compression_enable", - String.valueOf(config.isRpcThriftCompressionEnabled())))); - - config.setConnectionTimeoutInMS( - Integer.parseInt( - properties.getProperty( - "connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS())))); - - config.setHeartbeatIntervalMs( - Long.parseLong( - properties.getProperty( - "heartbeat_interval_ms", String.valueOf(config.getHeartbeatIntervalMs())))); - - config.setElectionTimeoutMs( - Long.parseLong( - properties.getProperty( - "election_timeout_ms", String.valueOf(config.getElectionTimeoutMs())))); - - config.setReadOperationTimeoutMS( - Integer.parseInt( - properties.getProperty( - "read_operation_timeout_ms", String.valueOf(config.getReadOperationTimeoutMS())))); - - config.setCatchUpTimeoutMS( - Integer.parseInt( - properties.getProperty( - "catch_up_timeout_ms", String.valueOf(config.getCatchUpTimeoutMS())))); - - config.setWriteOperationTimeoutMS( - Integer.parseInt( - properties.getProperty( - "write_operation_timeout_ms", - String.valueOf(config.getWriteOperationTimeoutMS())))); - - config.setUseBatchInLogCatchUp( - Boolean.parseBoolean( - properties.getProperty( - "use_batch_in_catch_up", String.valueOf(config.isUseBatchInLogCatchUp())))); - - config.setMinNumOfLogsInMem( - Integer.parseInt( - properties.getProperty( - "min_num_of_logs_in_mem", String.valueOf(config.getMinNumOfLogsInMem())))); - - config.setMaxNumOfLogsInMem( - Integer.parseInt( - properties.getProperty( - "max_num_of_logs_in_mem", String.valueOf(config.getMaxNumOfLogsInMem())))); - - config.setRaftLogMemoryProportion( - Double.parseDouble( - properties.getProperty( - "raft_log_memory_proportion", - String.valueOf(config.getRaftLogMemoryProportion())))); - - config.setLogDeleteCheckIntervalSecond( - Integer.parseInt( - properties.getProperty( - "log_deletion_check_interval_second", - String.valueOf(config.getLogDeleteCheckIntervalSecond())))); - - config.setEnableAutoCreateSchema( - Boolean.parseBoolean( - properties.getProperty( - "enable_auto_create_schema", String.valueOf(config.isEnableAutoCreateSchema())))); - - config.setUseAsyncServer( - Boolean.parseBoolean( - properties.getProperty( - "is_use_async_server", String.valueOf(config.isUseAsyncServer())))); - - config.setOpenServerRpcPort( - Boolean.parseBoolean( - properties.getProperty( - "open_server_rpc_port", String.valueOf(config.isOpenServerRpcPort())))); - - config.setUseAsyncApplier( - Boolean.parseBoolean( - properties.getProperty( - "is_use_async_applier", String.valueOf(config.isUseAsyncApplier())))); - - config.setEnableRaftLogPersistence( - Boolean.parseBoolean( - properties.getProperty( - "is_enable_raft_log_persistence", - String.valueOf(config.isEnableRaftLogPersistence())))); - - config.setFlushRaftLogThreshold( - Integer.parseInt( - properties.getProperty( - "flush_raft_log_threshold", String.valueOf(config.getFlushRaftLogThreshold())))); - - config.setRaftLogBufferSize( - Integer.parseInt( - properties.getProperty( - "raft_log_buffer_size", String.valueOf(config.getRaftLogBufferSize())))); - - config.setMaxRaftLogIndexSizeInMemory( - Integer.parseInt( - properties.getProperty( - "max_raft_log_index_size_in_memory", - String.valueOf(config.getMaxRaftLogIndexSizeInMemory())))); - - config.setUnCommittedRaftLogNumForRejectThreshold( - Integer.parseInt( - properties.getProperty( - "uncommitted_raft_log_num_for_reject_threshold", - String.valueOf(config.getUnCommittedRaftLogNumForRejectThreshold())))); - - config.setUnAppliedRaftLogNumForRejectThreshold( - Integer.parseInt( - properties.getProperty( - "unapplied_raft_log_num_for_reject_threshold", - String.valueOf(config.getUnAppliedRaftLogNumForRejectThreshold())))); - - config.setMaxRaftLogPersistDataSizePerFile( - Integer.parseInt( - properties.getProperty( - "max_raft_log_persist_data_size_per_file", - String.valueOf(config.getMaxRaftLogPersistDataSizePerFile())))); - - config.setMaxNumberOfPersistRaftLogFiles( - Integer.parseInt( - properties.getProperty( - "max_number_of_persist_raft_log_files", - String.valueOf(config.getMaxNumberOfPersistRaftLogFiles())))); - - config.setMaxPersistRaftLogNumberOnDisk( - Integer.parseInt( - properties.getProperty( - "max_persist_raft_log_number_on_disk", - String.valueOf(config.getMaxPersistRaftLogNumberOnDisk())))); - - config.setMaxNumberOfLogsPerFetchOnDisk( - Integer.parseInt( - properties.getProperty( - "max_number_of_logs_per_fetch_on_disk", - String.valueOf(config.getMaxNumberOfLogsPerFetchOnDisk())))); - - config.setEnableUsePersistLogOnDiskToCatchUp( - Boolean.parseBoolean( - properties.getProperty( - "enable_use_persist_log_on_disk_to_catch_up", - String.valueOf(config.isEnableUsePersistLogOnDiskToCatchUp())))); - - config.setMaxReadLogLag( - Long.parseLong( - properties.getProperty("max_read_log_lag", String.valueOf(config.getMaxReadLogLag())))); - - config.setMaxSyncLogLag( - Long.parseLong( - properties.getProperty("max_sync_log_lag", String.valueOf(config.getMaxSyncLogLag())))); - - config.setMaxClientPerNodePerMember( - Integer.parseInt( - properties.getProperty( - "max_client_pernode_permember_number", - String.valueOf(config.getMaxClientPerNodePerMember())))); - - config.setMaxIdleClientPerNodePerMember( - Integer.parseInt( - properties.getProperty( - "max_idle_client_pernode_permember_number", - String.valueOf(config.getMaxIdleClientPerNodePerMember())))); - - config.setWaitClientTimeoutMS( - Long.parseLong( - properties.getProperty( - "wait_client_timeout_ms", String.valueOf(config.getWaitClientTimeoutMS())))); - - String consistencyLevel = properties.getProperty("consistency_level"); - if (consistencyLevel != null) { - config.setConsistencyLevel(ConsistencyLevel.getConsistencyLevel(consistencyLevel)); - } - - String seedUrls = properties.getProperty("seed_nodes"); - if (seedUrls != null) { - List urlList = getSeedUrlList(seedUrls); - config.setSeedNodeUrls(urlList); - } - } - - /** - * Split the seed urls as one list. - * - * @param seedUrls the seed urls. - * @return the seed urls as a list. - */ - public static List getSeedUrlList(String seedUrls) { - if (seedUrls == null) { - return Collections.emptyList(); - } - List urlList = new ArrayList<>(); - String[] split = seedUrls.split(","); - for (String nodeUrl : split) { - nodeUrl = nodeUrl.trim(); - if ("".equals(nodeUrl)) { - continue; - } - urlList.add(nodeUrl); - } - return urlList; - } - - public void loadHotModifiedProps() throws QueryProcessException { - Properties properties = getProperties(); - if (properties != null) { - loadHotModifiedProps(properties); - } - } - - private Properties getProperties() throws QueryProcessException { - String url = getPropsUrl(); - if (url == null) { - return null; - } - Properties properties; - try (InputStream inputStream = new FileInputStream(new File(url))) { - logger.info("Start to reload config file {}", url); - properties = new Properties(); - properties.load(inputStream); - } catch (Exception e) { - throw new QueryProcessException( - String.format("Fail to reload config file %s because %s", url, e.getMessage())); - } - return properties; - } - - /** - * This method is for setting hot modified properties of the cluster. Currently, we support - * connection_timeout_ms, max_resolved_log_size - * - * @param properties - * @throws QueryProcessException - */ - public void loadHotModifiedProps(Properties properties) { - - config.setConnectionTimeoutInMS( - Integer.parseInt( - properties.getProperty( - "connection_timeout_ms", String.valueOf(config.getConnectionTimeoutInMS())))); - - logger.info("Set cluster configuration {}", properties); - } - - private String hostnameToIP(String hostname) throws UnknownHostException { - InetAddress address = InetAddress.getByName(hostname); - return address.getHostAddress(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java b/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java deleted file mode 100644 index cd95bde18a10..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/config/ConsistencyLevel.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.config; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public enum ConsistencyLevel { - /** - * Strong consistency means the server will first try to synchronize with the leader to get the - * newest meta data, if failed(timeout), directly report an error to the user; - */ - STRONG_CONSISTENCY("strong"), - - /** - * mid consistency means the server will first try to synchronize with the leader, but if - * failed(timeout), it will give up and just use current data it has cached before; - */ - MID_CONSISTENCY("mid"), - - /** weak consistency do not synchronize with the leader and simply use the local data */ - WEAK_CONSISTENCY("weak"), - ; - - private String consistencyLevelName; - private static final Logger logger = LoggerFactory.getLogger(ConsistencyLevel.class); - - ConsistencyLevel(String consistencyLevelName) { - this.consistencyLevelName = consistencyLevelName; - } - - public static ConsistencyLevel getConsistencyLevel(String consistencyLevel) { - if (consistencyLevel == null) { - return ConsistencyLevel.MID_CONSISTENCY; - } - switch (consistencyLevel.toLowerCase()) { - case "strong": - return ConsistencyLevel.STRONG_CONSISTENCY; - case "mid": - return ConsistencyLevel.MID_CONSISTENCY; - case "weak": - return ConsistencyLevel.WEAK_CONSISTENCY; - default: - logger.warn( - "Unsupported consistency level={}, use default consistency level={}", - consistencyLevel, - ConsistencyLevel.MID_CONSISTENCY.consistencyLevelName); - return ConsistencyLevel.MID_CONSISTENCY; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java b/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java deleted file mode 100644 index 8cc848622d07..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/coordinator/Coordinator.java +++ /dev/null @@ -1,807 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.coordinator; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.ChangeMembershipException; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.exception.UnsupportedPlanException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.ClusterPlanRouter; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.utils.PartitionUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.BatchPlan; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateAlignedTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetTemplatePlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.rpc.RpcUtils; -import org.apache.iotdb.rpc.TSStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; - -/** Coordinator of client non-query request */ -public class Coordinator { - - private static final Logger logger = LoggerFactory.getLogger(Coordinator.class); - - private MetaGroupMember metaGroupMember; - - private String name; - private Node thisNode; - /** router calculates the partition groups that a partitioned plan should be sent to */ - private ClusterPlanRouter router; - - private static final String MSG_MULTIPLE_ERROR = - "The following errors occurred when executing " - + "the query, please retry or contact the DBA: "; - - @TestOnly - public Coordinator(MetaGroupMember metaGroupMember) { - linkMetaGroupMember(metaGroupMember); - } - - public Coordinator() {} - - public void linkMetaGroupMember(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - if (metaGroupMember.getCoordinator() != null && metaGroupMember.getCoordinator() != this) { - logger.warn("MetadataGroupMember linked inconsistent Coordinator, will correct it."); - metaGroupMember.setCoordinator(this); - } - this.name = metaGroupMember.getName(); - this.thisNode = metaGroupMember.getThisNode(); - } - - public void setRouter(ClusterPlanRouter router) { - this.router = router; - } - - /** - * Execute a non-query plan. According to the type of the plan, the plan will be executed on all - * nodes (like timeseries deletion) or the nodes that belong to certain groups (like data - * ingestion). - * - * @param plan a non-query plan. - */ - public TSStatus executeNonQueryPlan(PhysicalPlan plan) { - TSStatus result; - long startTime = Timer.Statistic.COORDINATOR_EXECUTE_NON_QUERY.getOperationStartTime(); - if (PartitionUtils.isLocalNonQueryPlan(plan)) { - // run locally - result = executeNonQueryLocally(plan); - } else if (PartitionUtils.isGlobalMetaPlan(plan)) { - // forward the plan to all meta group nodes - result = metaGroupMember.processNonPartitionedMetaPlan(plan); - } else if (PartitionUtils.isGlobalDataPlan(plan)) { - // forward the plan to all data group nodes - result = processNonPartitionedDataPlan(plan); - } else { - // split the plan and forward them to some PartitionGroups - try { - result = processPartitionedPlan(plan); - } catch (UnsupportedPlanException e) { - return StatusUtils.getStatus(StatusUtils.UNSUPPORTED_OPERATION, e.getMessage()); - } - } - Timer.Statistic.COORDINATOR_EXECUTE_NON_QUERY.calOperationCostTimeFromStart(startTime); - return result; - } - - /** execute a non-query plan that is not necessary to be executed on other nodes. */ - private TSStatus executeNonQueryLocally(PhysicalPlan plan) { - boolean execRet; - try { - execRet = metaGroupMember.getLocalExecutor().processNonQuery(plan); - } catch (QueryProcessException e) { - if (e.getErrorCode() != TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) { - logger.debug("meet error while processing non-query. ", e); - } else { - logger.warn("meet error while processing non-query. ", e); - } - return RpcUtils.getStatus(e.getErrorCode(), e.getMessage()); - } catch (Exception e) { - logger.error("{}: server Internal Error: ", IoTDBConstant.GLOBAL_DB_NAME, e); - return RpcUtils.getStatus(TSStatusCode.INTERNAL_SERVER_ERROR, e.getMessage()); - } - - return execRet - ? RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Execute successfully") - : RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR); - } - - /** - * A non-partitioned plan (like DeleteData) should be executed on all data group nodes, so the - * DataGroupLeader should take the responsible to make sure that every node receives the plan. - * Thus the plan will be processed locally only by the DataGroupLeader and forwarded by non-leader - * nodes. - */ - private TSStatus processNonPartitionedDataPlan(PhysicalPlan plan) { - try { - if (plan instanceof DeleteTimeSeriesPlan) { - // as delete related plans may have abstract paths (paths with wildcards), we convert - // them to full paths so the executor nodes will not need to query the metadata holders, - // eliminating the risk that when they are querying the metadata holders, the timeseries - // has already been deleted - ((CSchemaProcessor) IoTDB.schemaProcessor).convertToFullPaths(plan); - } else { - // function convertToFullPaths has already sync leader - metaGroupMember.syncLeaderWithConsistencyCheck(true); - } - } catch (PathNotExistException e) { - if (plan.getPaths().isEmpty()) { - // only reports an error when there is no matching path - return StatusUtils.getStatus(StatusUtils.TIMESERIES_NOT_EXIST_ERROR, e.getMessage()); - } - } catch (CheckConsistencyException e) { - logger.debug( - "Forwarding global data plan {} to meta leader {}", plan, metaGroupMember.getLeader()); - metaGroupMember.waitLeader(); - return metaGroupMember.forwardPlan(plan, metaGroupMember.getLeader(), null); - } - try { - createSchemaIfNecessary(plan); - } catch (MetadataException | CheckConsistencyException e) { - logger.error("{}: Cannot find storage groups for {}", name, plan); - return StatusUtils.NO_STORAGE_GROUP; - } - List globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups(); - logger.debug("Forwarding global data plan {} to {} groups", plan, globalGroups.size()); - return forwardPlan(globalGroups, plan); - } - - public void createSchemaIfNecessary(PhysicalPlan plan) - throws MetadataException, CheckConsistencyException { - if (plan instanceof SetTemplatePlan) { - try { - IoTDB.schemaProcessor.getBelongedStorageGroup( - new PartialPath(((SetTemplatePlan) plan).getPrefixPath())); - } catch (IllegalPathException e) { - // the plan has been checked - } catch (StorageGroupNotSetException e) { - ((CSchemaProcessor) IoTDB.schemaProcessor).createSchema(plan); - } - } - } - - /** - * A partitioned plan (like batch insertion) will be split into several sub-plans, each belongs to - * a data group. And these sub-plans will be sent to and executed on the corresponding groups - * separately. - */ - public TSStatus processPartitionedPlan(PhysicalPlan plan) throws UnsupportedPlanException { - logger.debug("{}: Received a partitioned plan {}", name, plan); - if (metaGroupMember.getPartitionTable() == null) { - logger.debug("{}: Partition table is not ready", name); - return StatusUtils.PARTITION_TABLE_NOT_READY; - } - - if (!checkPrivilegeForBatchExecution(plan)) { - return concludeFinalStatus( - plan, plan.getPaths().size(), true, false, false, null, Collections.emptyList()); - } - - // split the plan into sub-plans that each only involve one data group - Map planGroupMap; - try { - planGroupMap = splitPlan(plan); - } catch (CheckConsistencyException checkConsistencyException) { - return StatusUtils.getStatus( - StatusUtils.CONSISTENCY_FAILURE, checkConsistencyException.getMessage()); - } - - // the storage group is not found locally - if (planGroupMap == null || planGroupMap.isEmpty()) { - if ((plan instanceof InsertPlan - || plan instanceof CreateTimeSeriesPlan - || plan instanceof CreateAlignedTimeSeriesPlan - || plan instanceof CreateMultiTimeSeriesPlan) - && ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema()) { - - logger.debug("{}: No associated storage group found for {}, auto-creating", name, plan); - try { - ((CSchemaProcessor) IoTDB.schemaProcessor).createSchema(plan); - return processPartitionedPlan(plan); - } catch (MetadataException | CheckConsistencyException e) { - logger.error( - String.format("Failed to set storage group or create timeseries, because %s", e)); - } - } - logger.error("{}: Cannot find storage groups for {}", name, plan); - return StatusUtils.NO_STORAGE_GROUP; - } - logger.debug("{}: The data groups of {} are {}", name, plan, planGroupMap); - return forwardPlan(planGroupMap, plan); - } - - /** - * check if batch execution plan has privilege on any sg - * - * @param plan - * @return - */ - private boolean checkPrivilegeForBatchExecution(PhysicalPlan plan) { - if (plan instanceof BatchPlan) { - return ((BatchPlan) plan).getResults().size() != plan.getPaths().size(); - } else { - return true; - } - } - - /** - * Forward a plan to all DataGroupMember groups. Only when all nodes time out, will a TIME_OUT be - * returned. The error messages from each group (if any) will be compacted into one string. - * - * @param partitionGroups - * @param plan - */ - private TSStatus forwardPlan(List partitionGroups, PhysicalPlan plan) { - // the error codes from the groups that cannot execute the plan - TSStatus status; - List errorCodePartitionGroups = new ArrayList<>(); - for (PartitionGroup partitionGroup : partitionGroups) { - if (partitionGroup.contains(thisNode)) { - // the query should be handled by a group the local node is in, handle it with in the group - status = - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader()) - .executeNonQueryPlan(plan); - logger.debug( - "Execute {} in a local group of {} with status {}", - plan, - partitionGroup.getHeader(), - status); - } else { - // forward the query to the group that should handle it - status = forwardPlan(plan, partitionGroup); - logger.debug( - "Forward {} to a remote group of {} with status {}", - plan, - partitionGroup.getHeader(), - status); - } - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && !(plan instanceof SetTemplatePlan - && status.getCode() == TSStatusCode.DUPLICATED_TEMPLATE.getStatusCode()) - && !(plan instanceof DeleteTimeSeriesPlan - && status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode())) { - // execution failed, record the error message - errorCodePartitionGroups.add( - String.format( - "[%s@%s:%s]", status.getCode(), partitionGroup.getHeader(), status.getMessage())); - } - } - if (errorCodePartitionGroups.isEmpty()) { - status = StatusUtils.OK; - } else { - status = - StatusUtils.getStatus( - StatusUtils.EXECUTE_STATEMENT_ERROR, MSG_MULTIPLE_ERROR + errorCodePartitionGroups); - } - logger.debug("{}: executed {} with answer {}", name, plan, status); - return status; - } - - public void sendLogToAllDataGroups(Log log) throws ChangeMembershipException { - if (logger.isDebugEnabled()) { - logger.debug("Send log {} to all data groups: start", log); - } - - Map planGroupMap = router.splitAndRouteChangeMembershipLog(log); - List errorCodePartitionGroups = new CopyOnWriteArrayList<>(); - CountDownLatch counter = new CountDownLatch(planGroupMap.size()); - for (Map.Entry entry : planGroupMap.entrySet()) { - metaGroupMember - .getAppendLogThreadPool() - .submit(() -> forwardChangeMembershipPlan(log, entry, errorCodePartitionGroups, counter)); - } - try { - counter.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new ChangeMembershipException( - String.format("Can not wait all data groups to apply %s", log)); - } - if (!errorCodePartitionGroups.isEmpty()) { - throw new ChangeMembershipException( - String.format("Apply %s failed with status {%s}", log, errorCodePartitionGroups)); - } - if (logger.isDebugEnabled()) { - logger.debug("Send log {} to all data groups: end", log); - } - } - - private void forwardChangeMembershipPlan( - Log log, - Map.Entry entry, - List errorCodePartitionGroups, - CountDownLatch counter) { - int retryTime = 0; - long startTime = System.currentTimeMillis(); - try { - while (true) { - if (logger.isDebugEnabled()) { - logger.debug( - "Send change membership log {} to data group {}, retry time: {}", - log, - entry.getValue(), - retryTime); - } - try { - TSStatus status = forwardToSingleGroup(entry); - if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - if (logger.isDebugEnabled()) { - logger.debug( - "Success to send change membership log {} to data group {}", - log, - entry.getValue()); - } - return; - } - long cost = System.currentTimeMillis() - startTime; - if (cost > ClusterDescriptor.getInstance().getConfig().getWriteOperationTimeoutMS()) { - errorCodePartitionGroups.add( - String.format( - "Forward change membership log %s to data group %s", log, entry.getValue())); - return; - } - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - errorCodePartitionGroups.add(e.getMessage()); - return; - } - retryTime++; - } - } finally { - counter.countDown(); - } - } - - /** split a plan into several sub-plans, each belongs to only one data group. */ - private Map splitPlan(PhysicalPlan plan) - throws UnsupportedPlanException, CheckConsistencyException { - Map planGroupMap = null; - try { - planGroupMap = router.splitAndRoutePlan(plan); - } catch (StorageGroupNotSetException e) { - // synchronize with the leader to see if this node has unpulled storage groups - metaGroupMember.syncLeaderWithConsistencyCheck(true); - try { - planGroupMap = router.splitAndRoutePlan(plan); - } catch (MetadataException | UnknownLogTypeException ex) { - // ignore - } - } catch (MetadataException | UnknownLogTypeException e) { - logger.error("Cannot route plan {}", plan, e); - } - logger.debug("route plan {} with partitionGroup {}", plan, planGroupMap); - return planGroupMap; - } - - /** - * Forward plans to the DataGroupMember of one node in the corresponding group. Only when all - * nodes time out, will a TIME_OUT be returned. - * - * @param planGroupMap sub-plan -> belong data group pairs - */ - private TSStatus forwardPlan(Map planGroupMap, PhysicalPlan plan) { - // the error codes from the groups that cannot execute the plan - TSStatus status; - // need to create substatus for multiPlan - - // InsertTabletPlan, InsertMultiTabletsPlan, InsertRowsPlan and CreateMultiTimeSeriesPlan - // contains many rows, - // each will correspond to a TSStatus as its execution result, - // as the plan is split and the sub-plans may have interleaving ranges, - // we must assure that each TSStatus is placed to the right position - // e.g., an InsertTabletPlan contains 3 rows, row1 and row3 belong to NodeA and row2 - // belongs to NodeB, when NodeA returns a success while NodeB returns a failure, the - // failure and success should be placed into proper positions in TSStatus.subStatus - if (plan instanceof InsertMultiTabletsPlan - || plan instanceof CreateMultiTimeSeriesPlan - || plan instanceof InsertRowsPlan) { - status = forwardMultiSubPlan(planGroupMap, plan); - } else if (planGroupMap.size() == 1) { - status = forwardToSingleGroup(planGroupMap.entrySet().iterator().next()); - } else { - status = forwardToMultipleGroup(planGroupMap); - } - if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - && status.isSetRedirectNode()) { - status.setCode(TSStatusCode.NEED_REDIRECTION.getStatusCode()); - } - logger.debug("{}: executed {} with answer {}", name, plan, status); - return status; - } - - private TSStatus forwardToSingleGroup(Map.Entry entry) { - TSStatus result; - if (entry.getValue().contains(thisNode)) { - // the query should be handled by a group the local node is in, handle it with in the group - long startTime = - Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP - .getOperationStartTime(); - result = - metaGroupMember - .getLocalDataMember(entry.getValue().getHeader()) - .executeNonQueryPlan(entry.getKey()); - logger.debug( - "Execute {} in a local group of {}, {}", - entry.getKey(), - entry.getValue().getHeader(), - result); - Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP - .calOperationCostTimeFromStart(startTime); - } else { - // forward the query to the group that should handle it - long startTime = - Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP - .getOperationStartTime(); - logger.debug( - "Forward {} to a remote group of {}", entry.getKey(), entry.getValue().getHeader()); - result = forwardPlan(entry.getKey(), entry.getValue()); - Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP - .calOperationCostTimeFromStart(startTime); - } - return result; - } - - /** - * forward each sub-plan to its corresponding data group, if some groups goes wrong, the error - * messages from each group will be compacted into one string. - * - * @param planGroupMap sub-plan -> data group pairs - */ - private TSStatus forwardToMultipleGroup(Map planGroupMap) { - List errorCodePartitionGroups = new ArrayList<>(); - TSStatus tmpStatus; - boolean allRedirect = true; - TEndPoint endPoint = null; - for (Map.Entry entry : planGroupMap.entrySet()) { - tmpStatus = forwardToSingleGroup(entry); - if (tmpStatus.isSetRedirectNode()) { - endPoint = tmpStatus.getRedirectNode(); - } else { - allRedirect = false; - } - if (tmpStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - // execution failed, record the error message - errorCodePartitionGroups.add( - String.format( - "[%s@%s:%s]", - tmpStatus.getCode(), entry.getValue().getHeader(), tmpStatus.getMessage())); - } - } - TSStatus status; - if (errorCodePartitionGroups.isEmpty()) { - if (allRedirect) { - status = StatusUtils.getStatus(TSStatusCode.NEED_REDIRECTION, endPoint); - } else { - status = StatusUtils.OK; - } - } else { - status = - StatusUtils.getStatus( - StatusUtils.EXECUTE_STATEMENT_ERROR, MSG_MULTIPLE_ERROR + errorCodePartitionGroups); - } - return status; - } - - /** - * Forward each sub-plan to its belonging data group, and combine responses from the groups. - * - * @param planGroupMap sub-plan -> data group pairs - */ - @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning - private TSStatus forwardMultiSubPlan( - Map planGroupMap, PhysicalPlan parentPlan) { - List errorCodePartitionGroups = new ArrayList<>(); - TSStatus tmpStatus; - TSStatus[] subStatus = null; - boolean noFailure = true; - boolean isBatchFailure = false; - boolean isBatchRedirect = false; - int totalRowNum = parentPlan.getPaths().size(); - // send sub-plans to each belonging data group and collect results - for (Map.Entry entry : planGroupMap.entrySet()) { - tmpStatus = forwardToSingleGroup(entry); - logger.debug("{}: from {},{},{}", name, entry.getKey(), entry.getValue(), tmpStatus); - noFailure = (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) && noFailure; - isBatchFailure = - (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) || isBatchFailure; - if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode() - || tmpStatus.isSetRedirectNode() && !(parentPlan instanceof CreateMultiTimeSeriesPlan)) { - if (parentPlan instanceof InsertMultiTabletsPlan) { - // the subStatus is the two-dimensional array, - // The first dimension is the number of InsertTabletPlans, - // and the second dimension is the number of rows per InsertTabletPlan - totalRowNum = ((InsertMultiTabletsPlan) parentPlan).getTabletsSize(); - } else if (parentPlan instanceof CreateMultiTimeSeriesPlan) { - totalRowNum = parentPlan.getPaths().size(); - } else if (parentPlan instanceof InsertRowsPlan) { - totalRowNum = ((InsertRowsPlan) parentPlan).getRowCount(); - } - - if (subStatus == null) { - subStatus = new TSStatus[totalRowNum]; - Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS); - } - // set the status from one group to the proper positions of the overall status - if (parentPlan instanceof InsertMultiTabletsPlan) { - InsertMultiTabletsPlan tmpMultiTabletPlan = ((InsertMultiTabletsPlan) entry.getKey()); - for (int i = 0; i < tmpMultiTabletPlan.getInsertTabletPlanList().size(); i++) { - InsertTabletPlan tmpInsertTabletPlan = tmpMultiTabletPlan.getInsertTabletPlan(i); - int parentIndex = tmpMultiTabletPlan.getParentIndex(i); - int parentPlanRowCount = ((InsertMultiTabletsPlan) parentPlan).getRowCount(parentIndex); - if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - subStatus[parentIndex] = tmpStatus.subStatus.get(i); - if (tmpStatus.subStatus.get(i).getCode() - == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - if (subStatus[parentIndex].subStatus == null) { - TSStatus[] tmpSubTsStatus = new TSStatus[parentPlanRowCount]; - Arrays.fill(tmpSubTsStatus, RpcUtils.SUCCESS_STATUS); - subStatus[parentIndex].subStatus = Arrays.asList(tmpSubTsStatus); - } - TSStatus[] reorderTsStatus = - subStatus[parentIndex].subStatus.toArray(new TSStatus[] {}); - - PartitionUtils.reordering( - tmpInsertTabletPlan, - reorderTsStatus, - tmpStatus.subStatus.get(i).subStatus.toArray(new TSStatus[] {})); - subStatus[parentIndex].subStatus = Arrays.asList(reorderTsStatus); - } - if (tmpStatus.isSetRedirectNode()) { - if (tmpStatus.isSetRedirectNode() - && tmpInsertTabletPlan.getMaxTime() - == ((InsertMultiTabletsPlan) parentPlan) - .getInsertTabletPlan(parentIndex) - .getMaxTime()) { - subStatus[parentIndex].setRedirectNode(tmpStatus.redirectNode); - isBatchRedirect = true; - } - } - } else if (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - if (tmpStatus.isSetRedirectNode() - && tmpInsertTabletPlan.getMaxTime() - == ((InsertMultiTabletsPlan) parentPlan) - .getInsertTabletPlan(parentIndex) - .getMaxTime()) { - subStatus[parentIndex] = - StatusUtils.getStatus(RpcUtils.SUCCESS_STATUS, tmpStatus.redirectNode); - isBatchRedirect = true; - } - } - } - } else if (parentPlan instanceof CreateMultiTimeSeriesPlan) { - CreateMultiTimeSeriesPlan subPlan = (CreateMultiTimeSeriesPlan) entry.getKey(); - for (int i = 0; i < subPlan.getIndexes().size(); i++) { - subStatus[subPlan.getIndexes().get(i)] = tmpStatus.subStatus.get(i); - } - } else if (parentPlan instanceof InsertRowsPlan) { - InsertRowsPlan subPlan = (InsertRowsPlan) entry.getKey(); - if (tmpStatus.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - for (int i = 0; i < subPlan.getInsertRowPlanIndexList().size(); i++) { - subStatus[subPlan.getInsertRowPlanIndexList().get(i)] = tmpStatus.subStatus.get(i); - if (tmpStatus.isSetRedirectNode()) { - subStatus[subPlan.getInsertRowPlanIndexList().get(i)].setRedirectNode( - tmpStatus.getRedirectNode()); - isBatchRedirect = true; - } - } - } else if (tmpStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - if (tmpStatus.isSetRedirectNode()) { - isBatchRedirect = true; - TSStatus redirectStatus = - StatusUtils.getStatus(RpcUtils.SUCCESS_STATUS, tmpStatus.getRedirectNode()); - for (int i = 0; i < subPlan.getInsertRowPlanIndexList().size(); i++) { - subStatus[subPlan.getInsertRowPlanIndexList().get(i)] = redirectStatus; - } - } - } - } - } - - if (tmpStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - // execution failed, record the error message - errorCodePartitionGroups.add( - String.format( - "[%s@%s:%s:%s]", - tmpStatus.getCode(), - entry.getValue().getHeader(), - tmpStatus.getMessage(), - tmpStatus.subStatus)); - } - } - return concludeFinalStatus( - parentPlan, - totalRowNum, - noFailure, - isBatchRedirect, - isBatchFailure, - subStatus, - errorCodePartitionGroups); - } - - private TSStatus concludeFinalStatus( - PhysicalPlan parentPlan, - int totalRowNum, - boolean noFailure, - boolean isBatchRedirect, - boolean isBatchFailure, - TSStatus[] subStatus, - List errorCodePartitionGroups) { - if (parentPlan instanceof InsertMultiTabletsPlan - && !((InsertMultiTabletsPlan) parentPlan).getResults().isEmpty()) { - if (subStatus == null) { - subStatus = new TSStatus[totalRowNum]; - Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS); - } - noFailure = false; - isBatchFailure = true; - for (Map.Entry integerTSStatusEntry : - ((InsertMultiTabletsPlan) parentPlan).getResults().entrySet()) { - subStatus[integerTSStatusEntry.getKey()] = integerTSStatusEntry.getValue(); - } - } - - if (parentPlan instanceof CreateMultiTimeSeriesPlan - && !((CreateMultiTimeSeriesPlan) parentPlan).getResults().isEmpty()) { - if (subStatus == null) { - subStatus = new TSStatus[totalRowNum]; - Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS); - } - noFailure = false; - isBatchFailure = true; - for (Map.Entry integerTSStatusEntry : - ((CreateMultiTimeSeriesPlan) parentPlan).getResults().entrySet()) { - subStatus[integerTSStatusEntry.getKey()] = integerTSStatusEntry.getValue(); - } - } - - if (parentPlan instanceof InsertRowsPlan - && !((InsertRowsPlan) parentPlan).getResults().isEmpty()) { - if (subStatus == null) { - subStatus = new TSStatus[totalRowNum]; - Arrays.fill(subStatus, RpcUtils.SUCCESS_STATUS); - } - noFailure = false; - isBatchFailure = true; - for (Map.Entry integerTSStatusEntry : - ((InsertRowsPlan) parentPlan).getResults().entrySet()) { - subStatus[integerTSStatusEntry.getKey()] = integerTSStatusEntry.getValue(); - } - } - - TSStatus status; - if (noFailure) { - if (isBatchRedirect) { - status = RpcUtils.getStatus(Arrays.asList(subStatus)); - status.setCode(TSStatusCode.NEED_REDIRECTION.getStatusCode()); - } else { - status = StatusUtils.OK; - } - } else if (isBatchFailure) { - status = RpcUtils.getStatus(Arrays.asList(subStatus)); - } else { - status = - StatusUtils.getStatus( - StatusUtils.EXECUTE_STATEMENT_ERROR, MSG_MULTIPLE_ERROR + errorCodePartitionGroups); - } - return status; - } - - /** - * Forward a plan to the DataGroupMember of one node in the group. Only when all nodes time out, - * will a TIME_OUT be returned. - */ - private TSStatus forwardPlan(PhysicalPlan plan, PartitionGroup group) { - for (Node node : group) { - TSStatus status; - try { - // only data plans are partitioned, so it must be processed by its data server instead of - // meta server - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - status = forwardDataPlanAsync(plan, node, group.getHeader()); - } else { - status = forwardDataPlanSync(plan, node, group.getHeader()); - } - } catch (IOException e) { - status = StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR, e.getMessage()); - } - if (!StatusUtils.TIME_OUT.equals(status)) { - if (!status.isSetRedirectNode()) { - status.setRedirectNode(new TEndPoint(node.getClientIp(), node.getClientPort())); - } - return status; - } else { - logger.warn("Forward {} to {} timed out", plan, node); - } - } - logger.warn("Forward {} to {} timed out", plan, group); - return StatusUtils.TIME_OUT; - } - - /** - * Forward a non-query plan to the data port of "receiver" - * - * @param plan a non-query plan - * @param header to determine which DataGroupMember of "receiver" will process the request. - * @return a TSStatus indicating if the forwarding is successful. - */ - private TSStatus forwardDataPlanAsync(PhysicalPlan plan, Node receiver, RaftNode header) - throws IOException { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(receiver, ClusterConstant.getWriteOperationTimeoutMS()); - return this.metaGroupMember.forwardPlanAsync(plan, receiver, header, client); - } - - private TSStatus forwardDataPlanSync(PhysicalPlan plan, Node receiver, RaftNode header) - throws IOException { - SyncDataClient client = - ClusterIoTDB.getInstance() - .getSyncDataClient(receiver, ClusterConstant.getWriteOperationTimeoutMS()); - return this.metaGroupMember.forwardPlanSync(plan, receiver, header, client); - } - - public Node getThisNode() { - return thisNode; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java deleted file mode 100644 index 3e7ff154317d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/AddSelfException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -/** Raised when a node received an AddNodeRequest of adding itself. */ -public class AddSelfException extends Exception { - - public AddSelfException() { - super("Cannot add one itself"); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java deleted file mode 100644 index 24ea7583c1a9..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/BadSeedUrlFormatException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class BadSeedUrlFormatException extends Exception { - - public BadSeedUrlFormatException(String seedUrl) { - super( - String.format( - "Seed url %s has bad format, which should be " + "{IP/DomainName}:{metaPort}", - seedUrl)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ChangeMembershipException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ChangeMembershipException.java deleted file mode 100644 index 6ad64eabb85d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ChangeMembershipException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -/** Raised when add/remove membership log can not be sent to all data groups */ -public class ChangeMembershipException extends Exception { - - public ChangeMembershipException(String errMsg) { - super(String.format("Change membership fails, error message=%s ", errMsg)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java deleted file mode 100644 index c872ceea17d3..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/CheckConsistencyException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.exception; - -/** - * Raised when check consistency failed, now only happens if there is a strong-consistency and - * syncLeader failed - */ -public class CheckConsistencyException extends Exception { - - public CheckConsistencyException(String errMsg) { - super(String.format("check consistency failed, error message=%s ", errMsg)); - } - - public static final CheckConsistencyException CHECK_STRONG_CONSISTENCY_EXCEPTION = - new CheckConsistencyException("strong consistency, sync with leader failed"); - - public static final CheckConsistencyException CHECK_MID_CONSISTENCY_EXCEPTION = - new CheckConsistencyException( - "mid consistency, localAppliedId is smaller than the leaderCommitId"); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java deleted file mode 100644 index 71e2eeb5579b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ConfigInconsistentException.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class ConfigInconsistentException extends Exception { - - public ConfigInconsistentException() { - super( - "The configuration of this node is inconsistent with the cluster. See previous logs for " - + "explanation"); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EmptyIntervalException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EmptyIntervalException.java deleted file mode 100644 index 829ba15b8638..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EmptyIntervalException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -public class EmptyIntervalException extends Exception { - - public EmptyIntervalException(Filter filter) { - super(String.format("The interval of the filter %s is empty.", filter)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java deleted file mode 100644 index 629252f99542..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryCompactedException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class EntryCompactedException extends Exception { - - public EntryCompactedException(long index, long boundary) { - super( - String.format( - "Entry index %d is unavailable due to compaction, and the lower bound is %d", - index, boundary)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java deleted file mode 100644 index d5d030389a7d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/EntryUnavailableException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class EntryUnavailableException extends Exception { - - public EntryUnavailableException(long index, long boundary) { - super( - String.format("Entry index %d is unavailable, and the upper bound is %d", index, boundary)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/GetEntriesWrongParametersException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/GetEntriesWrongParametersException.java deleted file mode 100644 index d6903b9b19d6..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/GetEntriesWrongParametersException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class GetEntriesWrongParametersException extends Exception { - - public GetEntriesWrongParametersException(long low, long high) { - super(String.format("invalid getEntries: parameter: %d >= %d", low, high)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java deleted file mode 100644 index f91285415a00..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LeaderUnknownException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import java.util.Collection; - -/** Raised when a request should be forwarded to the leader but the leader cannot be found. */ -public class LeaderUnknownException extends Exception { - - public LeaderUnknownException(Collection nodes) { - super(String.format("The leader is unknown in this group %s", nodes)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LogExecutionException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/LogExecutionException.java deleted file mode 100644 index 609c5f9e60ed..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/LogExecutionException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class LogExecutionException extends Exception { - - public LogExecutionException(Throwable cause) { - super(cause); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java deleted file mode 100644 index 0139e42e5788..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/MemberReadOnlyException.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -public class MemberReadOnlyException extends Exception { - - public MemberReadOnlyException(Node node) { - super( - String.format( - "The node %s has been set readonly for the partitions, please retry to find " - + "a new node", - node)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java deleted file mode 100644 index b610a6162662..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NoHeaderNodeException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -/** - * Raised when a DataClusterServer receives a request without a header node indicating which group - * it belongs to. - */ -public class NoHeaderNodeException extends Exception { - - public NoHeaderNodeException() { - super("Header Node is required in data group communication!"); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java deleted file mode 100644 index e11b438a3ccf..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotInSameGroupException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import java.util.List; - -/** Raised when a DataClusterServer receives a request but the node is not in the target group. */ -public class NotInSameGroupException extends Exception { - - public NotInSameGroupException(List group, Node thisNode) { - super(String.format("This node %s is not in the data group %s", thisNode, group)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java deleted file mode 100644 index 638f2aae73fa..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/NotManagedSlotException.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import java.util.List; - -/** - * Raised when a data group leader receives a PullSnapshotRequest but finds it does not manage the - * requested slot. - */ -public class NotManagedSlotException extends Exception { - - public NotManagedSlotException(int requiredSlots, List slots) { - super(String.format("%d is not in the managed slots %s", requiredSlots, slots)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java deleted file mode 100644 index 5a9679e2e800..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PartitionTableUnavailableException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -/** Raised when a node receives requests before the its partition table is set up. */ -public class PartitionTableUnavailableException extends Exception { - - public PartitionTableUnavailableException(Node node) { - super(String.format("Partition table of %s is not ready, cannot serve", node)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java deleted file mode 100644 index 2b7151e0388a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/PullFileException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -public class PullFileException extends Exception { - - public PullFileException(String fileName, Node node) { - super(String.format("Cannot pull file %s from %s due to network condition", fileName, node)); - } - - public PullFileException(String fileName, Node node, Exception e) { - super( - String.format("Cannot pull file %s from %s because %s", fileName, node, e.getMessage()), e); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/QueryTimeOutException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/QueryTimeOutException.java deleted file mode 100644 index ea42fdf598bd..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/QueryTimeOutException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class QueryTimeOutException extends Exception { - - public QueryTimeOutException(String query) { - super("Query: " + query + " timed out"); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ReaderNotFoundException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/ReaderNotFoundException.java deleted file mode 100644 index 48a65d4f82f2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/ReaderNotFoundException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class ReaderNotFoundException extends Exception { - - public ReaderNotFoundException(long readerId) { - super(String.format("The requested reader %d is not found", readerId)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java deleted file mode 100644 index 6fbede1d6bfe..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/RequestTimeOutException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.cluster.log.Log; - -/** Raised when a request times out before reaching an agreement in a group. */ -public class RequestTimeOutException extends Exception { - - public RequestTimeOutException(Log log) { - super("Request \"" + log + "\" timeout"); - } - - public RequestTimeOutException(String request) { - super("Request \"" + request + "\" timeout"); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java deleted file mode 100644 index 750c6bf7dbcc..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/SnapshotInstallationException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class SnapshotInstallationException extends Exception { - - public SnapshotInstallationException(String message) { - super(message); - } - - public SnapshotInstallationException(String message, Throwable cause) { - super(message, cause); - } - - public SnapshotInstallationException() {} - - public SnapshotInstallationException(Throwable cause) { - super(cause); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/StartUpCheckFailureException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/StartUpCheckFailureException.java deleted file mode 100644 index d84e7413ddcd..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/StartUpCheckFailureException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class StartUpCheckFailureException extends Exception { - - public StartUpCheckFailureException() { - super("The start up check cannot finish timely, please check the network"); - } - - public StartUpCheckFailureException(Throwable e) { - super(e); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java deleted file mode 100644 index ac1099938cd0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/TruncateCommittedEntryException.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -public class TruncateCommittedEntryException extends Exception { - - public TruncateCommittedEntryException(long index, long committed) { - super( - String.format( - "The committed entries cannot be truncated: parameter: %d, commitIndex : %d", - index, committed)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java deleted file mode 100644 index d2eaa6d2c117..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnknownLogTypeException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -/** Raised when the type of a raft log is unknown. */ -public class UnknownLogTypeException extends Exception { - - public UnknownLogTypeException(int logType) { - super("Unknown log type: " + logType); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnsupportedPlanException.java b/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnsupportedPlanException.java deleted file mode 100644 index 479db4c9c2e5..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/exception/UnsupportedPlanException.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.exception; - -import org.apache.iotdb.db.qp.physical.PhysicalPlan; - -public class UnsupportedPlanException extends Exception { - - public UnsupportedPlanException(PhysicalPlan plan) { - super(String.format("Plan %s is not supported", plan)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogCallback.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogCallback.java deleted file mode 100644 index 7205d2a5e32d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogCallback.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CommitLogCallback implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(CommitLogCallback.class); - private final RaftMember raftMember; - - public CommitLogCallback(RaftMember raftMember) { - this.raftMember = raftMember; - } - - @Override - public void onComplete(Void v) { - synchronized (raftMember.getSyncLock()) { - raftMember.getSyncLock().notifyAll(); - } - } - - @Override - public void onError(Exception e) { - logger.error("async commit log failed", e); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java deleted file mode 100644 index 8c40dc8cc819..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/CommitLogTask.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.log.manage.RaftLogManager; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class CommitLogTask implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(CommitLogTask.class); - private RaftLogManager logManager; - private long leaderCommit; - private long term; - - public CommitLogTask(RaftLogManager logManager, long leaderCommit, long term) { - this.logManager = logManager; - this.leaderCommit = leaderCommit; - this.term = term; - } - - /** listener field */ - private AsyncMethodCallback callback; - - /** @param callback the event listener */ - public void registerCallback(AsyncMethodCallback callback) { - this.callback = callback; - } - - private void doCommitLog() { - if (callback == null) { - logger.error("callback is not registered"); - return; - } - - boolean success = logManager.maybeCommit(leaderCommit, term); - if (success) { - callback.onComplete(null); - } - } - - @Override - public void run() { - doCommitLog(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java deleted file mode 100644 index 2e9e172341ba..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/HardState.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; -import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class HardState { - - private long currentTerm; - private Node voteFor; - - public HardState() { - this.voteFor = null; - } - - public static HardState deserialize(ByteBuffer buffer) { - HardState res = new HardState(); - res.setCurrentTerm(ReadWriteIOUtils.readLong(buffer)); - int isNull = buffer.get(); - if (isNull == 1) { - Node node = new Node(); - NodeSerializeUtils.deserialize(node, buffer); - res.setVoteFor(node); - } else { - res.setVoteFor(null); - } - return res; - } - - public ByteBuffer serialize() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream); - try { - dataOutputStream.writeLong(currentTerm); - if (voteFor == null) { - dataOutputStream.writeByte(0); - } else { - dataOutputStream.writeByte(1); - NodeSerializeUtils.serialize(voteFor, dataOutputStream); - } - } catch (IOException e) { - // unreachable - } - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - public long getCurrentTerm() { - return currentTerm; - } - - public void setCurrentTerm(long currentTerm) { - this.currentTerm = currentTerm; - } - - public Node getVoteFor() { - return voteFor; - } - - public void setVoteFor(Node voteFor) { - this.voteFor = voteFor; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof HardState)) { - return false; - } - HardState that = (HardState) o; - return new EqualsBuilder() - .append(currentTerm, that.currentTerm) - .append(voteFor, that.voteFor) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37).append(currentTerm).append(voteFor).toHashCode(); - } - - @Override - public String toString() { - return "HardState{" + "currentTerm=" + currentTerm + ", voteFor=" + voteFor + '}'; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java deleted file mode 100644 index e70c326e336d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/Log.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.log; - -import java.nio.ByteBuffer; -import java.util.Comparator; -import java.util.Objects; - -/** - * Log records operations that are made on this cluster. Each log records 2 longs: currLogIndex, - * currLogTerm, so that the logs in a cluster will form a log chain and abnormal operations can thus - * be distinguished and removed. - */ -public abstract class Log implements Comparable { - - private static final Comparator COMPARATOR = - Comparator.comparingLong(Log::getCurrLogIndex).thenComparing(Log::getCurrLogTerm); - - protected static final int DEFAULT_BUFFER_SIZE = 4096; - private long currLogIndex; - private long currLogTerm; - - // for async application - private volatile boolean applied; - - @SuppressWarnings("java:S3077") - private volatile Exception exception; - - private long createTime; - private long enqueueTime; - - private int byteSize = 0; - - public abstract ByteBuffer serialize(); - - public abstract void deserialize(ByteBuffer buffer); - - public enum Types { - // DO CHECK LogParser when you add a new type of log - ADD_NODE, - PHYSICAL_PLAN, - CLOSE_FILE, - REMOVE_NODE, - EMPTY_CONTENT, - TEST_LARGE_CONTENT - } - - public long getCurrLogIndex() { - return currLogIndex; - } - - public void setCurrLogIndex(long currLogIndex) { - this.currLogIndex = currLogIndex; - } - - public long getCurrLogTerm() { - return currLogTerm; - } - - public void setCurrLogTerm(long currLogTerm) { - this.currLogTerm = currLogTerm; - } - - @SuppressWarnings("java:S2886") // synchronized outside - public boolean isApplied() { - return applied; - } - - public void setApplied(boolean applied) { - synchronized (this) { - this.applied = applied; - this.notifyAll(); - } - } - - public Exception getException() { - return exception; - } - - public void setException(Exception exception) { - this.exception = exception; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Log log = (Log) o; - return currLogIndex == log.currLogIndex && currLogTerm == log.currLogTerm; - } - - @Override - public int hashCode() { - return Objects.hash(currLogIndex, currLogTerm); - } - - @Override - public int compareTo(Log o) { - return COMPARATOR.compare(this, o); - } - - public long getCreateTime() { - return createTime; - } - - public void setCreateTime(long createTime) { - this.createTime = createTime; - } - - public long getEnqueueTime() { - return enqueueTime; - } - - public void setEnqueueTime(long enqueueTime) { - this.enqueueTime = enqueueTime; - } - - public long getByteSize() { - return byteSize; - } - - public void setByteSize(int byteSize) { - this.byteSize = byteSize; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java deleted file mode 100644 index e4fbec77ca0a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogApplier.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -/** LogApplier applies the log to the local node to make it take effect. */ -public interface LogApplier { - - /** - * Apply the given log, if any exception is thrown during the execution, the exception will be - * recorded in the log. Either an exception is thrown or the log is executed successfully, log - * .setApplied(true) must be called. - * - * @param log - */ - void apply(Log log); - - default void close() {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java deleted file mode 100644 index c9ebf31d1775..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogDispatcher.java +++ /dev/null @@ -1,461 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.handlers.caller.AppendNodeEntryHandler; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -/** - * A LogDispatcher serves a raft leader by queuing logs that the leader wants to send to its - * followers and send the logs in an ordered manner so that the followers will not wait for previous - * logs for too long. For example: if the leader send 3 logs, log1, log2, log3, concurrently to - * follower A, the actual reach order may be log3, log2, and log1. According to the protocol, log3 - * and log2 must halt until log1 reaches, as a result, the total delay may increase significantly. - */ -public class LogDispatcher { - - private static final Logger logger = LoggerFactory.getLogger(LogDispatcher.class); - private RaftMember member; - private boolean useBatchInLogCatchUp = - ClusterDescriptor.getInstance().getConfig().isUseBatchInLogCatchUp(); - // each follower has a queue and a dispatch thread is attached in executorService. - private List> nodeLogQueues = new ArrayList<>(); - private ExecutorService executorService; - - // TODO we have no way to close this pool. - private static final ExecutorService serializationService = - IoTDBThreadPoolFactory.newFixedThreadPoolWithDaemonThread( - Runtime.getRuntime().availableProcessors(), "DispatcherEncoder"); - - public LogDispatcher(RaftMember member) { - this.member = member; - executorService = - IoTDBThreadPoolFactory.newCachedThreadPool("LogDispatcher-" + member.getName()); - for (Node node : member.getAllNodes()) { - if (!node.equals(member.getThisNode())) { - nodeLogQueues.add(createQueueAndBindingThread(node)); - } - } - } - - @TestOnly - public void close() throws InterruptedException { - executorService.shutdownNow(); - executorService.awaitTermination(10, TimeUnit.SECONDS); - } - - public void offer(SendLogRequest log) { - // if nodeLogQueues.isEmpty(), then nothing to do. - - // do serialization here to avoid taking LogManager for too long - if (!nodeLogQueues.isEmpty()) { - log.serializedLogFuture = - serializationService.submit( - () -> { - ByteBuffer byteBuffer = log.getLog().serialize(); - log.getLog().setByteSize(byteBuffer.array().length); - return byteBuffer; - }); - } - for (int i = 0; i < nodeLogQueues.size(); i++) { - BlockingQueue nodeLogQueue = nodeLogQueues.get(i); - try { - boolean addSucceeded; - if (ClusterDescriptor.getInstance().getConfig().isWaitForSlowNode()) { - addSucceeded = - nodeLogQueue.offer( - log, - ClusterDescriptor.getInstance().getConfig().getWriteOperationTimeoutMS(), - TimeUnit.MILLISECONDS); - } else { - addSucceeded = nodeLogQueue.add(log); - } - - if (!addSucceeded) { - logger.debug( - "Log queue[{}] of {} is full, ignore the log to this node", i, member.getName()); - } else { - log.setEnqueueTime(System.nanoTime()); - } - } catch (IllegalStateException e) { - logger.debug( - "Log queue[{}] of {} is full, ignore the log to this node", i, member.getName()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - - private BlockingQueue createQueueAndBindingThread(Node node) { - BlockingQueue logBlockingQueue = - new ArrayBlockingQueue<>( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - int bindingThreadNum = 1; - for (int i = 0; i < bindingThreadNum; i++) { - executorService.submit(new DispatcherThread(node, logBlockingQueue)); - } - return logBlockingQueue; - } - - public static class SendLogRequest { - - private Log log; - private AtomicInteger voteCounter; - private AtomicBoolean leaderShipStale; - private AtomicLong newLeaderTerm; - private AppendEntryRequest appendEntryRequest; - private long enqueueTime; - private Future serializedLogFuture; - - public SendLogRequest( - Log log, - AtomicInteger voteCounter, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - AppendEntryRequest appendEntryRequest) { - this.setLog(log); - this.setVoteCounter(voteCounter); - this.setLeaderShipStale(leaderShipStale); - this.setNewLeaderTerm(newLeaderTerm); - this.setAppendEntryRequest(appendEntryRequest); - } - - public AtomicInteger getVoteCounter() { - return voteCounter; - } - - public void setVoteCounter(AtomicInteger voteCounter) { - this.voteCounter = voteCounter; - } - - public Log getLog() { - return log; - } - - public void setLog(Log log) { - this.log = log; - } - - public long getEnqueueTime() { - return enqueueTime; - } - - public void setEnqueueTime(long enqueueTime) { - this.enqueueTime = enqueueTime; - } - - public AtomicBoolean getLeaderShipStale() { - return leaderShipStale; - } - - public void setLeaderShipStale(AtomicBoolean leaderShipStale) { - this.leaderShipStale = leaderShipStale; - } - - public AtomicLong getNewLeaderTerm() { - return newLeaderTerm; - } - - void setNewLeaderTerm(AtomicLong newLeaderTerm) { - this.newLeaderTerm = newLeaderTerm; - } - - public AppendEntryRequest getAppendEntryRequest() { - return appendEntryRequest; - } - - public void setAppendEntryRequest(AppendEntryRequest appendEntryRequest) { - this.appendEntryRequest = appendEntryRequest; - } - - @Override - public String toString() { - return "SendLogRequest{" + "log=" + log + '}'; - } - } - - class DispatcherThread implements Runnable { - - private Node receiver; - private BlockingQueue logBlockingDeque; - private List currBatch = new ArrayList<>(); - private Peer peer; - - DispatcherThread(Node receiver, BlockingQueue logBlockingDeque) { - this.receiver = receiver; - this.logBlockingDeque = logBlockingDeque; - this.peer = - member - .getPeerMap() - .computeIfAbsent(receiver, r -> new Peer(member.getLogManager().getLastLogIndex())); - } - - @Override - public void run() { - Thread.currentThread().setName("LogDispatcher-" + member.getName() + "-" + receiver); - try { - while (!Thread.interrupted()) { - SendLogRequest poll = logBlockingDeque.take(); - currBatch.add(poll); - logBlockingDeque.drainTo(currBatch); - if (logger.isDebugEnabled()) { - logger.debug("Sending {} logs to {}", currBatch.size(), receiver); - } - for (SendLogRequest request : currBatch) { - request.getAppendEntryRequest().entry = request.serializedLogFuture.get(); - } - sendBatchLogs(currBatch); - currBatch.clear(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - logger.error("Unexpected error in log dispatcher", e); - } - logger.info("Dispatcher exits"); - } - - private void appendEntriesAsync( - List logList, AppendEntriesRequest request, List currBatch) - throws TException { - AsyncMethodCallback handler = new AppendEntriesHandler(currBatch); - AsyncClient client = member.getSendLogAsyncClient(receiver); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: append entries {} with {} logs", member.getName(), receiver, logList.size()); - } - if (client != null) { - client.appendEntries(request, handler); - } - } - - private void appendEntriesSync( - List logList, AppendEntriesRequest request, List currBatch) { - - long startTime = Timer.Statistic.RAFT_SENDER_WAIT_FOR_PREV_LOG.getOperationStartTime(); - if (!member.waitForPrevLog(peer, currBatch.get(0).getLog())) { - logger.warn( - "{}: node {} timed out when appending {}", - member.getName(), - receiver, - currBatch.get(0).getLog()); - return; - } - Timer.Statistic.RAFT_SENDER_WAIT_FOR_PREV_LOG.calOperationCostTimeFromStart(startTime); - - Client client = member.getSyncClient(receiver); - if (client == null) { - logger.error("No available client for {}", receiver); - return; - } - AsyncMethodCallback handler = new AppendEntriesHandler(currBatch); - startTime = Timer.Statistic.RAFT_SENDER_SEND_LOG.getOperationStartTime(); - try { - long result = client.appendEntries(request); - Timer.Statistic.RAFT_SENDER_SEND_LOG.calOperationCostTimeFromStart(startTime); - if (result != -1 && logger.isInfoEnabled()) { - logger.info( - "{}: Append {} logs to {}, resp: {}", - member.getName(), - logList.size(), - receiver, - result); - } - handler.onComplete(result); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - handler.onError(e); - logger.warn("Failed logs: {}, first index: {}", logList, request.prevLogIndex + 1); - } finally { - ClientUtils.putBackSyncClient(client); - } - } - - private AppendEntriesRequest prepareRequest( - List logList, List currBatch, int firstIndex) { - AppendEntriesRequest request = new AppendEntriesRequest(); - - if (member.getHeader() != null) { - request.setHeader(member.getHeader()); - } - request.setLeader(member.getThisNode()); - request.setLeaderCommit(member.getLogManager().getCommitLogIndex()); - - synchronized (member.getTerm()) { - request.setTerm(member.getTerm().get()); - } - - request.setEntries(logList); - // set index for raft - request.setPrevLogIndex(currBatch.get(firstIndex).getLog().getCurrLogIndex() - 1); - try { - request.setPrevLogTerm(currBatch.get(firstIndex).getAppendEntryRequest().prevLogTerm); - } catch (Exception e) { - logger.error("getTerm failed for newly append entries", e); - } - return request; - } - - private void sendLogs(List currBatch) throws TException { - int logIndex = 0; - logger.debug( - "send logs from index {} to {}", - currBatch.get(0).getLog().getCurrLogIndex(), - currBatch.get(currBatch.size() - 1).getLog().getCurrLogIndex()); - while (logIndex < currBatch.size()) { - long logSize = IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize(); - List logList = new ArrayList<>(); - int prevIndex = logIndex; - - for (; logIndex < currBatch.size(); logIndex++) { - long curSize = currBatch.get(logIndex).getAppendEntryRequest().entry.array().length; - if (logSize - curSize <= IoTDBConstant.LEFT_SIZE_IN_REQUEST) { - break; - } - logSize -= curSize; - Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE.calOperationCostTimeFromStart( - currBatch.get(logIndex).getLog().getCreateTime()); - logList.add(currBatch.get(logIndex).getAppendEntryRequest().entry); - } - - AppendEntriesRequest appendEntriesRequest = prepareRequest(logList, currBatch, prevIndex); - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - appendEntriesAsync(logList, appendEntriesRequest, currBatch.subList(prevIndex, logIndex)); - } else { - appendEntriesSync(logList, appendEntriesRequest, currBatch.subList(prevIndex, logIndex)); - } - for (; prevIndex < logIndex; prevIndex++) { - Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END.calOperationCostTimeFromStart( - currBatch.get(prevIndex).getLog().getCreateTime()); - } - } - } - - private void sendBatchLogs(List currBatch) throws TException { - if (currBatch.size() > 1) { - if (useBatchInLogCatchUp) { - sendLogs(currBatch); - } else { - for (SendLogRequest batch : currBatch) { - sendLog(batch); - } - } - } else { - sendLog(currBatch.get(0)); - } - } - - private void sendLog(SendLogRequest logRequest) { - Timer.Statistic.LOG_DISPATCHER_LOG_IN_QUEUE.calOperationCostTimeFromStart( - logRequest.getLog().getCreateTime()); - member.sendLogToFollower( - logRequest.getLog(), - logRequest.getVoteCounter(), - receiver, - logRequest.getLeaderShipStale(), - logRequest.getNewLeaderTerm(), - logRequest.getAppendEntryRequest()); - Timer.Statistic.LOG_DISPATCHER_FROM_CREATE_TO_END.calOperationCostTimeFromStart( - logRequest.getLog().getCreateTime()); - } - - class AppendEntriesHandler implements AsyncMethodCallback { - - private final List> singleEntryHandlers; - - private AppendEntriesHandler(List batch) { - singleEntryHandlers = new ArrayList<>(batch.size()); - for (SendLogRequest sendLogRequest : batch) { - AppendNodeEntryHandler handler = - getAppendNodeEntryHandler( - sendLogRequest.getLog(), - sendLogRequest.getVoteCounter(), - receiver, - sendLogRequest.getLeaderShipStale(), - sendLogRequest.getNewLeaderTerm(), - peer); - singleEntryHandlers.add(handler); - } - } - - @Override - public void onComplete(Long aLong) { - for (AsyncMethodCallback singleEntryHandler : singleEntryHandlers) { - singleEntryHandler.onComplete(aLong); - } - } - - @Override - public void onError(Exception e) { - for (AsyncMethodCallback singleEntryHandler : singleEntryHandlers) { - singleEntryHandler.onError(e); - } - } - - private AppendNodeEntryHandler getAppendNodeEntryHandler( - Log log, - AtomicInteger voteCounter, - Node node, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - Peer peer) { - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setReceiver(node); - handler.setVoteCounter(voteCounter); - handler.setLeaderShipStale(leaderShipStale); - handler.setLog(log); - handler.setMember(member); - handler.setPeer(peer); - handler.setReceiverTerm(newLeaderTerm); - return handler; - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java deleted file mode 100644 index e35943b3fa0f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/LogParser.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log.Types; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.LargeTestLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; - -/** LogParser transform a ByteBuffer into a Log. */ -public class LogParser { - - private static final Logger logger = LoggerFactory.getLogger(LogParser.class); - private static final LogParser INSTANCE = new LogParser(); - - private LogParser() { - // singleton class - } - - public static LogParser getINSTANCE() { - return INSTANCE; - } - - public Log parse(ByteBuffer buffer) throws UnknownLogTypeException { - if (logger.isDebugEnabled()) { - logger.debug("Received a log buffer, pos:{}, limit:{}", buffer.position(), buffer.limit()); - } - int typeInt = buffer.get(); - Types type; - try { - type = Types.values()[typeInt]; - } catch (ArrayIndexOutOfBoundsException e) { - throw new UnknownLogTypeException(typeInt); - } - logger.debug("The log type is {}", type); - Log log; - switch (type) { - case ADD_NODE: - AddNodeLog addNodeLog = new AddNodeLog(); - addNodeLog.deserialize(buffer); - if (logger.isDebugEnabled()) { - logger.debug( - "The last meta log index of log {} is {}", addNodeLog, addNodeLog.getMetaLogIndex()); - } - log = addNodeLog; - break; - case PHYSICAL_PLAN: - PhysicalPlanLog physicalPlanLog = new PhysicalPlanLog(); - physicalPlanLog.deserialize(buffer); - log = physicalPlanLog; - break; - case CLOSE_FILE: - CloseFileLog closeFileLog = new CloseFileLog(); - closeFileLog.deserialize(buffer); - log = closeFileLog; - break; - case REMOVE_NODE: - RemoveNodeLog removeNodeLog = new RemoveNodeLog(); - removeNodeLog.deserialize(buffer); - if (logger.isDebugEnabled()) { - logger.debug( - "The last meta log index of log {} is {}", - removeNodeLog, - removeNodeLog.getMetaLogIndex()); - } - log = removeNodeLog; - break; - case EMPTY_CONTENT: - EmptyContentLog emptyLog = new EmptyContentLog(); - emptyLog.deserialize(buffer); - log = emptyLog; - break; - case TEST_LARGE_CONTENT: - LargeTestLog largeLog = new LargeTestLog(); - largeLog.deserialize(buffer); - log = largeLog; - break; - default: - throw new IllegalArgumentException(type.toString()); - } - logger.debug("Parsed a log {}", log); - return log; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java deleted file mode 100644 index b0c0761b16e7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/Snapshot.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.log.snapshot.SnapshotInstaller; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import java.nio.ByteBuffer; - -/** - * As we can only hold a certain amount of logs in memory, when the logs' size exceed the memory - * capacity, they will be deleted or compressed, and a snapshot of current system will be generated. - * If a node need to catch up but its next log is deleted, it will catch up based on the latest - * snapshot and the logs still in memory. The snapshot could be a file recording the list of current - * system files, or the compressed all historical logs, depending on the implementation. - */ -public abstract class Snapshot { - - protected long lastLogIndex; - protected long lastLogTerm; - - public abstract ByteBuffer serialize(); - - public abstract void deserialize(ByteBuffer buffer); - - public void setLastLogIndex(long lastLogIndex) { - this.lastLogIndex = lastLogIndex; - } - - public void setLastLogTerm(long lastLogTerm) { - this.lastLogTerm = lastLogTerm; - } - - public long getLastLogIndex() { - return lastLogIndex; - } - - public long getLastLogTerm() { - return lastLogTerm; - } - - @SuppressWarnings("java:S1452") // is it possible not to use wildcard? - public abstract SnapshotInstaller getDefaultInstaller(RaftMember member); - - /** - * Discard contents which is generated by logs whose index <= 'minIndex' if possible. This method - * is a best-effort one without guarantee that the result will absolutely not contain contents - * before 'minIndex'. - * - * @param minIndex - */ - public void truncateBefore(long minIndex) {} - - @Override - public String toString() { - return String.format("%d-%d", lastLogIndex, lastLogTerm); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java deleted file mode 100644 index c82597e3368e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/StableEntryManager.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta; - -import java.io.IOException; -import java.util.List; - -public interface StableEntryManager { - - List getAllEntriesAfterAppliedIndex(); - - List getAllEntriesAfterCommittedIndex(); - - void append(List entries, long maxHaveAppliedCommitIndex) throws IOException; - - void flushLogBuffer(); - - void forceFlushLogBuffer(); - - void removeCompactedEntries(long index); - - void setHardStateAndFlush(HardState state); - - HardState getHardState(); - - LogManagerMeta getMeta(); - - /** - * @param startIndex (inclusive) the log start index - * @param endIndex (inclusive) the log end index - * @return the raft log which index between [startIndex, endIndex] or empty if not found - */ - List getLogs(long startIndex, long endIndex); - - void close(); - - /** - * clear all logs, this method mainly used for after a follower accept a snapshot, all the logs - * should be cleaned - */ - void clearAllLogs(long commitIndex); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java deleted file mode 100644 index ebcc88556490..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplier.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.server.monitor.Timer.Statistic; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Consumer; - -public class AsyncDataLogApplier implements LogApplier { - - private static final Logger logger = LoggerFactory.getLogger(AsyncDataLogApplier.class); - private static final int CONCURRENT_CONSUMER_NUM = Runtime.getRuntime().availableProcessors(); - private LogApplier embeddedApplier; - private Map consumerMap; - private ExecutorService consumerPool; - private String name; - - // a plan that affects multiple sgs should wait until all consumers become empty to assure all - // previous logs are applied, such a plan will wait on this condition if it finds any - // consumers nonempty, and each time a consumer becomes empty, this will be notified so the - // waiting log can start another round of check - private final Object consumerEmptyCondition = new Object(); - - public AsyncDataLogApplier(LogApplier embeddedApplier, String name) { - this.embeddedApplier = embeddedApplier; - consumerMap = new HashMap<>(); - consumerPool = - new ThreadPoolExecutor( - CONCURRENT_CONSUMER_NUM, - Integer.MAX_VALUE, - 0, - TimeUnit.SECONDS, - new SynchronousQueue<>()); - this.name = name; - } - - @Override - public void close() { - consumerPool.shutdownNow(); - } - - @Override - // synchronized: when a log is draining consumers, avoid other threads adding more logs so that - // the consumers will never be drained - public synchronized void apply(Log log) { - - PartialPath logKey; - try { - logKey = getLogKey(log); - } catch (StorageGroupNotSetException e) { - logger.debug("Exception occurred when applying {}", log, e); - log.setException(e); - log.setApplied(true); - return; - } - - if (logKey != null) { - // this plan only affects one sg, so we can run it with other plans in parallel - long startTime = Statistic.RAFT_SENDER_COMMIT_TO_CONSUMER_LOGS.getOperationStartTime(); - provideLogToConsumers(logKey, log); - Statistic.RAFT_SENDER_COMMIT_TO_CONSUMER_LOGS.calOperationCostTimeFromStart(startTime); - return; - } - - logger.debug("{}: {} is waiting for consumers to drain", name, log); - long startTime = Statistic.RAFT_SENDER_COMMIT_EXCLUSIVE_LOGS.getOperationStartTime(); - drainConsumers(); - applyInternal(log); - Statistic.RAFT_SENDER_COMMIT_EXCLUSIVE_LOGS.calOperationCostTimeFromStart(startTime); - } - - private PartialPath getLogKey(Log log) throws StorageGroupNotSetException { - // we can only apply some kinds of plans in parallel, for other logs, we must wait until all - // previous logs are applied, or the order of deletions and insertions may get wrong - if (log instanceof PhysicalPlanLog) { - PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log; - PhysicalPlan plan = physicalPlanLog.getPlan(); - // this plan only affects one sg, so we can run it with other plans in parallel - return getPlanKey(plan); - } else if (log instanceof CloseFileLog) { - CloseFileLog closeFileLog = (CloseFileLog) log; - PartialPath partialPath = null; - try { - partialPath = new PartialPath(closeFileLog.getStorageGroupName()); - } catch (IllegalPathException e) { - // unreachable - } - return partialPath; - } - return null; - } - - private PartialPath getPlanKey(PhysicalPlan plan) throws StorageGroupNotSetException { - return getPlanSG(plan); - } - - /** - * We can sure that the storage group of all InsertTabletPlans in InsertMultiTabletsPlan are the - * same. this is done in {@link - * org.apache.iotdb.cluster.query.ClusterPlanRouter#splitAndRoutePlan(InsertMultiTabletsPlan)} - * - *

We can also sure that the storage group of all InsertRowPlans in InsertRowsPlan are the - * same. this is done in {@link - * org.apache.iotdb.cluster.query.ClusterPlanRouter#splitAndRoutePlan(InsertRowsPlan)} - * - * @return the sg that the plan belongs to - * @throws StorageGroupNotSetException if no sg found - */ - private PartialPath getPlanSG(PhysicalPlan plan) throws StorageGroupNotSetException { - PartialPath sgPath = null; - if (plan instanceof InsertMultiTabletsPlan) { - PartialPath deviceId = ((InsertMultiTabletsPlan) plan).getFirstDeviceId(); - sgPath = IoTDB.schemaProcessor.getBelongedStorageGroup(deviceId); - } else if (plan instanceof InsertRowsPlan) { - PartialPath path = ((InsertRowsPlan) plan).getFirstDeviceId(); - sgPath = IoTDB.schemaProcessor.getBelongedStorageGroup(path); - } else if (plan instanceof InsertPlan) { - PartialPath deviceId = ((InsertPlan) plan).getDevicePath(); - sgPath = IoTDB.schemaProcessor.getBelongedStorageGroup(deviceId); - } else if (plan instanceof CreateTimeSeriesPlan) { - PartialPath path = ((CreateTimeSeriesPlan) plan).getPath(); - sgPath = IoTDB.schemaProcessor.getBelongedStorageGroup(path); - } - return sgPath; - } - - private void provideLogToConsumers(PartialPath planKey, Log log) { - if (Timer.ENABLE_INSTRUMENTING) { - log.setEnqueueTime(System.nanoTime()); - } - consumerMap.computeIfAbsent(planKey, d -> new DataLogConsumer(name + "-" + d)).accept(log); - } - - private void drainConsumers() { - synchronized (consumerEmptyCondition) { - while (!allConsumersEmpty()) { - // wait until all consumers empty - try { - consumerEmptyCondition.wait(5); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return; - } - } - } - } - - private boolean allConsumersEmpty() { - for (DataLogConsumer consumer : consumerMap.values()) { - if (!consumer.isEmpty()) { - if (logger.isDebugEnabled()) { - logger.debug("Consumer not empty: {}", consumer); - } - return false; - } - } - return true; - } - - private void applyInternal(Log log) { - long startTime = Statistic.RAFT_SENDER_DATA_LOG_APPLY.getOperationStartTime(); - embeddedApplier.apply(log); - if (Timer.ENABLE_INSTRUMENTING) { - Statistic.RAFT_SENDER_DATA_LOG_APPLY.calOperationCostTimeFromStart(startTime); - } - } - - private class DataLogConsumer implements Runnable, Consumer { - - private BlockingQueue logQueue = new ArrayBlockingQueue<>(4096); - private volatile long lastLogIndex; - private volatile long lastAppliedLogIndex; - private String name; - private Future future; - - public DataLogConsumer(String name) { - this.name = name; - } - - public boolean isEmpty() { - return lastLogIndex == lastAppliedLogIndex; - } - - @Override - public void run() { - // appliers have a higher priority than normal threads (like client threads and low - // priority background threads), to assure fast ingestion, but a lower priority than - // heartbeat threads - Thread.currentThread().setPriority(8); - Thread.currentThread().setName(name); - while (!Thread.currentThread().isInterrupted()) { - try { - Log log = logQueue.take(); - Statistic.RAFT_SENDER_IN_APPLY_QUEUE.calOperationCostTimeFromStart(log.getEnqueueTime()); - try { - applyInternal(log); - } finally { - lastAppliedLogIndex = log.getCurrLogIndex(); - if (isEmpty()) { - synchronized (consumerEmptyCondition) { - consumerEmptyCondition.notifyAll(); - } - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } catch (Exception e) { - logger.error("DataLogConsumer exits", e); - return; - } - } - logger.info("DataLogConsumer exits"); - } - - @Override - public void accept(Log log) { - if (future == null || future.isCancelled() || future.isDone()) { - if (future != null) { - try { - future.get(); - } catch (InterruptedException e) { - logger.error("Last applier thread exits unexpectedly", e); - Thread.currentThread().interrupt(); - } catch (ExecutionException e) { - logger.error("Last applier thread exits unexpectedly", e); - } - } - future = consumerPool.submit(this); - } - - try { - lastLogIndex = log.getCurrLogIndex(); - logQueue.put(log); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - log.setException(e); - log.setApplied(true); - lastAppliedLogIndex = log.getCurrLogIndex(); - } - } - - @Override - public String toString() { - return "DataLogConsumer{" - + "logQueue=" - + logQueue.size() - + ", lastLogIndex=" - + lastLogIndex - + ", lastAppliedLogIndex=" - + lastAppliedLogIndex - + ", name='" - + name - + '\'' - + '}'; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java deleted file mode 100644 index 996d083d775a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/BaseApplier.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.cluster.query.ClusterPlanExecutor; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.BatchProcessException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.metadata.template.UndefinedTemplateException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.BatchPlan; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan; -import org.apache.iotdb.db.utils.SchemaUtils; -import org.apache.iotdb.rpc.TSStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; - -/** BaseApplier use PlanExecutor to execute PhysicalPlans. */ -abstract class BaseApplier implements LogApplier { - - private static final Logger logger = LoggerFactory.getLogger(BaseApplier.class); - - MetaGroupMember metaGroupMember; - private PlanExecutor queryExecutor; - - BaseApplier(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - /** - * @param plan - * @param dataGroupMember the data group member that is applying the log, null if the log is - * applied by a meta group member - * @throws QueryProcessException - * @throws StorageGroupNotSetException - * @throws StorageEngineException - */ - void applyPhysicalPlan(PhysicalPlan plan, DataGroupMember dataGroupMember) - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException { - if (plan instanceof InsertPlan) { - processPlanWithTolerance((InsertPlan) plan, dataGroupMember); - } else if (plan != null && !plan.isQuery()) { - try { - getQueryExecutor().processNonQuery(plan); - } catch (BatchProcessException e) { - handleBatchProcessException(e, plan); - } catch (QueryProcessException e) { - if (e.getCause() instanceof StorageGroupNotSetException - || e.getCause() instanceof UndefinedTemplateException) { - executeAfterSync(plan); - } else { - throw e; - } - } catch (StorageGroupNotSetException e) { - executeAfterSync(plan); - } - } else if (plan != null) { - logger.error("Unsupported physical plan: {}", plan); - } - } - - private void handleBatchProcessException( - BatchProcessException e, InsertPlan plan, DataGroupMember dataGroupMember) - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException { - if (IoTDBDescriptor.getInstance().getConfig().isEnablePartition()) { - TSStatus[] failingStatus = e.getFailingStatus(); - for (int i = 0; i < failingStatus.length; i++) { - TSStatus status = failingStatus[i]; - // skip succeeded plans in later execution - if (status != null - && status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - && plan instanceof BatchPlan) { - ((BatchPlan) plan).setIsExecuted(i); - } - } - - boolean needRetry = false, hasError = false; - for (int i = 0, failingStatusLength = failingStatus.length; i < failingStatusLength; i++) { - TSStatus status = failingStatus[i]; - if (status != null) { - if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode() - && plan instanceof BatchPlan) { - ((BatchPlan) plan).unsetIsExecuted(i); - needRetry = true; - } else if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - hasError = true; - } - } - } - if (hasError) { - throw e; - } - if (needRetry) { - pullTimeseriesSchema(plan, dataGroupMember.getHeader()); - plan.recoverFromFailure(); - getQueryExecutor().processNonQuery(plan); - } - } else { - throw e; - } - } - - private void handleBatchProcessException(BatchProcessException e, PhysicalPlan plan) - throws QueryProcessException, StorageEngineException, StorageGroupNotSetException { - TSStatus[] failingStatus = e.getFailingStatus(); - boolean needThrow = false; - for (int i = 0; i < failingStatus.length; i++) { - TSStatus status = failingStatus[i]; - // skip succeeded plans in later execution - if (status != null - && status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() - && plan instanceof BatchPlan) { - ((BatchPlan) plan).setIsExecuted(i); - } - - if (plan instanceof DeleteTimeSeriesPlan) { - if (status != null && status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()) { - logger.info("{} doesn't exist, it may has been deleted.", plan.getPaths().get(i)); - } else { - needThrow = true; - } - } - } - } - boolean needRetry = false; - for (int i = 0, failingStatusLength = failingStatus.length; i < failingStatusLength; i++) { - TSStatus status = failingStatus[i]; - if (status != null - && (status.getCode() == TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode() - || status.getCode() == TSStatusCode.UNDEFINED_TEMPLATE.getStatusCode()) - && plan instanceof BatchPlan) { - ((BatchPlan) plan).unsetIsExecuted(i); - needRetry = true; - } - } - if (needRetry) { - executeAfterSync(plan); - return; - } - - if (!(plan instanceof DeleteTimeSeriesPlan) || needThrow) { - throw e; - } - } - - private void executeAfterSync(PhysicalPlan plan) - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException { - try { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - } catch (CheckConsistencyException ce) { - throw new QueryProcessException(ce.getMessage()); - } - getQueryExecutor().processNonQuery(plan); - } - - /** - * @param plan - * @param dataGroupMember the data group member that is applying the log, null if the log is - * applied by a meta group member - * @throws QueryProcessException - * @throws StorageGroupNotSetException - * @throws StorageEngineException - */ - private void processPlanWithTolerance(InsertPlan plan, DataGroupMember dataGroupMember) - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException { - try { - getQueryExecutor().processNonQuery(plan); - } catch (BatchProcessException e) { - handleBatchProcessException(e, plan, dataGroupMember); - } catch (QueryProcessException | StorageGroupNotSetException | StorageEngineException e) { - if (IoTDBDescriptor.getInstance().getConfig().isEnablePartition()) { - // check if this is caused by metadata missing, if so, pull metadata and retry - Throwable metaMissingException = SchemaUtils.findMetaMissingException(e); - boolean causedByPathNotExist = metaMissingException instanceof PathNotExistException; - - if (causedByPathNotExist) { - if (logger.isDebugEnabled()) { - logger.debug( - "Timeseries is not found locally[{}], try pulling it from another group: {}", - metaGroupMember.getName(), - e.getCause().getMessage()); - } - pullTimeseriesSchema(plan, dataGroupMember.getHeader()); - plan.recoverFromFailure(); - getQueryExecutor().processNonQuery(plan); - } else { - throw e; - } - } else { - throw e; - } - } - } - - /** - * @param plan - * @param ignoredGroup do not pull schema from the group to avoid backward dependency - * @throws QueryProcessException - */ - private void pullTimeseriesSchema(InsertPlan plan, RaftNode ignoredGroup) - throws QueryProcessException { - try { - if (plan instanceof BatchPlan) { - MetaPuller.getInstance() - .pullTimeSeriesSchemas(((BatchPlan) plan).getPrefixPaths(), ignoredGroup); - } else { - PartialPath path = plan.getDevicePath(); - MetaPuller.getInstance() - .pullTimeSeriesSchemas(Collections.singletonList(path), ignoredGroup); - } - } catch (MetadataException e1) { - throw new QueryProcessException(e1); - } - } - - private PlanExecutor getQueryExecutor() throws QueryProcessException { - if (queryExecutor == null) { - queryExecutor = new ClusterPlanExecutor(metaGroupMember); - } - return queryExecutor; - } - - @TestOnly - public void setQueryExecutor(PlanExecutor queryExecutor) { - this.queryExecutor = queryExecutor; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java deleted file mode 100644 index b2f138f71e8f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/DataLogApplier.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.DeletePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * DataLogApplier applies logs like data insertion/deletion/update and timeseries creation to IoTDB. - */ -public class DataLogApplier extends BaseApplier { - - private static final Logger logger = LoggerFactory.getLogger(DataLogApplier.class); - - protected DataGroupMember dataGroupMember; - - public DataLogApplier(MetaGroupMember metaGroupMember, DataGroupMember dataGroupMember) { - super(metaGroupMember); - this.dataGroupMember = dataGroupMember; - } - - @Override - public void apply(Log log) { - logger.debug("DataMember [{}] start applying Log {}", dataGroupMember.getName(), log); - - try { - if (log instanceof AddNodeLog) { - ClusterIoTDB.getInstance() - .getDataGroupEngine() - .preAddNodeForDataGroup((AddNodeLog) log, dataGroupMember); - dataGroupMember.setAndSaveLastAppliedPartitionTableVersion( - ((AddNodeLog) log).getMetaLogIndex()); - } else if (log instanceof RemoveNodeLog) { - ClusterIoTDB.getInstance() - .getDataGroupEngine() - .preRemoveNodeForDataGroup((RemoveNodeLog) log, dataGroupMember); - dataGroupMember.setAndSaveLastAppliedPartitionTableVersion( - ((RemoveNodeLog) log).getMetaLogIndex()); - } else if (log instanceof PhysicalPlanLog) { - PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log; - PhysicalPlan plan = physicalPlanLog.getPlan(); - applyPhysicalPlan(plan); - } else if (log instanceof CloseFileLog) { - CloseFileLog closeFileLog = ((CloseFileLog) log); - StorageEngine.getInstance() - .closeStorageGroupProcessor( - new PartialPath(closeFileLog.getStorageGroupName()), - closeFileLog.getPartitionId(), - closeFileLog.isSeq(), - false); - } else { - logger.error("Unsupported log: {}", log); - } - } catch (Exception e) { - Throwable rootCause = IOUtils.getRootCause(e); - if (!(rootCause instanceof PathNotExistException)) { - logger.debug("Exception occurred when applying {}", log, e); - } - log.setException(e); - } finally { - log.setApplied(true); - } - } - - public void applyPhysicalPlan(PhysicalPlan plan) - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException { - if (plan instanceof DeletePlan) { - ((DeletePlan) plan).setPartitionFilter(dataGroupMember.getTimePartitionFilter()); - } else if (plan instanceof DeleteTimeSeriesPlan) { - ((DeleteTimeSeriesPlan) plan).setPartitionFilter(dataGroupMember.getTimePartitionFilter()); - } - if (plan instanceof InsertMultiTabletsPlan) { - applyInsert((InsertMultiTabletsPlan) plan); - } else if (plan instanceof InsertRowsPlan) { - applyInsert((InsertRowsPlan) plan); - } else if (plan instanceof InsertPlan) { - applyInsert((InsertPlan) plan); - } else { - applyPhysicalPlan(plan, dataGroupMember); - } - } - - private void applyInsert(InsertMultiTabletsPlan plan) - throws StorageGroupNotSetException, QueryProcessException, StorageEngineException { - boolean hasSync = false; - for (InsertTabletPlan insertTabletPlan : plan.getInsertTabletPlanList()) { - try { - IoTDB.schemaProcessor.getBelongedStorageGroup(insertTabletPlan.getDevicePath()); - } catch (StorageGroupNotSetException e) { - try { - if (!hasSync) { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - hasSync = true; - } else { - throw new StorageEngineException(e.getMessage()); - } - } catch (CheckConsistencyException ce) { - throw new QueryProcessException(ce.getMessage()); - } - } - } - applyPhysicalPlan(plan, dataGroupMember); - } - - private void applyInsert(InsertRowsPlan plan) - throws StorageGroupNotSetException, QueryProcessException, StorageEngineException { - boolean hasSync = false; - for (InsertRowPlan insertRowPlan : plan.getInsertRowPlanList()) { - try { - IoTDB.schemaProcessor.getBelongedStorageGroup(insertRowPlan.getDevicePath()); - } catch (StorageGroupNotSetException e) { - try { - if (!hasSync) { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - hasSync = true; - } else { - throw new StorageEngineException(e.getMessage()); - } - } catch (CheckConsistencyException ce) { - throw new QueryProcessException(ce.getMessage()); - } - } - } - applyPhysicalPlan(plan, dataGroupMember); - } - - private void applyInsert(InsertPlan plan) - throws StorageGroupNotSetException, QueryProcessException, StorageEngineException { - try { - IoTDB.schemaProcessor.getBelongedStorageGroup(plan.getDevicePath()); - } catch (StorageGroupNotSetException e) { - // the sg may not exist because the node does not catch up with the leader, retry after - // synchronization - try { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - } catch (CheckConsistencyException ce) { - throw new QueryProcessException(ce.getMessage()); - } - } - applyPhysicalPlan(plan, dataGroupMember); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java deleted file mode 100644 index 1c501f8f0fda..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/applier/MetaLogApplier.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.exception.ChangeMembershipException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** MetaLogApplier applies logs like node addition and storage group creation to IoTDB. */ -public class MetaLogApplier extends BaseApplier { - - private static final Logger logger = LoggerFactory.getLogger(MetaLogApplier.class); - private MetaGroupMember member; - - public MetaLogApplier(MetaGroupMember member) { - super(member); - this.member = member; - } - - @Override - public void apply(Log log) { - try { - logger.debug("MetaMember [{}] starts applying Log {}", metaGroupMember.getName(), log); - if (log instanceof AddNodeLog) { - applyAddNodeLog((AddNodeLog) log); - } else if (log instanceof PhysicalPlanLog) { - applyPhysicalPlan(((PhysicalPlanLog) log).getPlan(), null); - } else if (log instanceof RemoveNodeLog) { - applyRemoveNodeLog((RemoveNodeLog) log); - } else if (log instanceof EmptyContentLog) { - // Do nothing - } else { - logger.error("Unsupported log: {} {}", log.getClass().getName(), log); - } - } catch (Exception e) { - logger.debug("Exception occurred when executing {}", log, e); - log.setException(e); - } finally { - log.setApplied(true); - } - } - - private void applyAddNodeLog(AddNodeLog log) throws ChangeMembershipException { - if (!metaGroupMember.getPartitionTable().deserialize(log.getPartitionTable())) { - logger.info("Ignore previous change membership log"); - // ignore previous change membership log - return; - } - if (metaGroupMember.getCharacter() == NodeCharacter.LEADER) { - metaGroupMember.getCoordinator().sendLogToAllDataGroups(log); - } - member.applyAddNode(log); - } - - private void applyRemoveNodeLog(RemoveNodeLog log) throws ChangeMembershipException { - if (!metaGroupMember.getPartitionTable().deserialize(log.getPartitionTable())) { - // ignore previous change membership log - return; - } - if (metaGroupMember.getCharacter() == NodeCharacter.LEADER) { - metaGroupMember.getCoordinator().sendLogToAllDataGroups(log); - } - member.applyRemoveNode(log); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java deleted file mode 100644 index f11f41eb8e6c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/CatchUpTask.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collections; -import java.util.Comparator; -import java.util.ConcurrentModificationException; -import java.util.List; - -public class CatchUpTask implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(CatchUpTask.class); - - private Node node; - private Peer peer; - private RaftMember raftMember; - private Snapshot snapshot; - private List logs; - private long lastLogIndex; - private boolean abort; - private String name; - private int raftId; - - public CatchUpTask(Node node, int raftId, Peer peer, RaftMember raftMember, long lastLogIdx) { - this.node = node; - this.raftId = raftId; - this.peer = peer; - this.raftMember = raftMember; - this.logs = Collections.emptyList(); - this.snapshot = null; - this.lastLogIndex = lastLogIdx; - this.name = raftMember.getName() + "@" + System.currentTimeMillis(); - } - - /** - * @return true if a matched index is found so that we can use logs only to catch up, or false if - * the catch up must be done with a snapshot. - * @throws TException - * @throws InterruptedException - */ - private boolean checkMatchIndex() - throws TException, InterruptedException, LeaderUnknownException { - - logger.debug("Checking the match index of {}", node); - long lo = 0; - long hi = 0; - long localFirstIndex = 0; - try { - // to avoid snapshot catch up when index is volatile - synchronized (raftMember.getLogManager()) { - localFirstIndex = raftMember.getLogManager().getFirstIndex(); - lo = Math.max(localFirstIndex, peer.getMatchIndex() + 1); - hi = raftMember.getLogManager().getLastLogIndex() + 1; - logs = raftMember.getLogManager().getEntries(lo, hi); - } - // this may result from peer's match index being changed concurrently, making the peer - // actually catch up now - if (logger.isInfoEnabled()) { - logger.info( - "{}: use {} logs of [{}, {}] to fix log inconsistency with node [{}], " - + "local first index: {}", - raftMember.getName(), - logs.size(), - lo, - hi, - node, - localFirstIndex); - } - } catch (ConcurrentModificationException e) { - // ignore - } catch (Exception e) { - logger.error("Unexpected error in logManager's getEntries during matchIndexCheck", e); - } - - if (logs.isEmpty()) { - return true; - } - - int index = findLastMatchIndex(logs); - if (index == -1) { - logger.info("{}, Cannot find matched of {} within [{}, {}] in memory", name, node, lo, hi); - if (!judgeUseLogsInDiskToCatchUp()) { - return false; - } - long startIndex = peer.getMatchIndex() + 1; - long endIndex = raftMember.getLogManager().getCommitLogIndex(); - List logsInDisk = getLogsInStableEntryManager(startIndex, endIndex); - if (!logsInDisk.isEmpty()) { - logger.info( - "{}, found {} logs in disk to catch up {} , startIndex={}, endIndex={}, memoryFirstIndex={}, getFirstLogIndex={}", - name, - logsInDisk.size(), - node, - startIndex, - endIndex, - localFirstIndex, - logsInDisk.get(0).getCurrLogIndex()); - logs = logsInDisk; - index = findLastMatchIndex(logs); - // the follower's matchIndex may have been updated - if (index == -1) { - return false; - } - } else { - logger.info( - "{}, Cannot find matched of {} within [{}, {}] in disk", - name, - node, - startIndex, - endIndex); - return false; - } - } - long newMatchedIndex = logs.get(index).getCurrLogIndex() - 1; - if (newMatchedIndex > lastLogIndex) { - logger.info( - "{}: matched index of {} has moved beyond last log index, node is " - + "self-catching-up, abort this catch up to avoid duplicates", - name, - node); - abort = true; - return true; - } - logger.info("{}: {} matches at {}", name, node, newMatchedIndex); - - peer.setMatchIndex(newMatchedIndex); - // if follower return RESPONSE.AGREE with this empty log, then start sending real logs from - // index. - logs.subList(0, index).clear(); - if (logger.isInfoEnabled()) { - if (logs.isEmpty()) { - logger.info("{}: {} has caught up by previous catch up", name, node); - } else { - logger.info( - "{}: makes {} catch up with {} and other {} logs", - name, - node, - logs.get(0), - logs.size()); - } - } - return true; - } - - @SuppressWarnings("squid:S1135") - private boolean judgeUseLogsInDiskToCatchUp() { - // TODO use log in disk to snapshot first, if the log not found on disk, then use snapshot. - if (!ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) { - return false; - } - // TODO judge the cost of snapshot and logs in disk - return ClusterDescriptor.getInstance().getConfig().isEnableUsePersistLogOnDiskToCatchUp(); - } - - private List getLogsInStableEntryManager(long startIndex, long endIndex) { - List logsInDisk = - raftMember.getLogManager().getStableEntryManager().getLogs(startIndex, endIndex); - logger.debug( - "{}, found {} logs in disk to catchup {}, startIndex={}, endIndex={}", - raftMember.getName(), - logsInDisk.size(), - node, - startIndex, - endIndex); - return logsInDisk; - } - - /** - * return the index of log whose previous log is matched, or -1 when can not found - * - * @param logs - * @return - * @throws LeaderUnknownException - * @throws TException - * @throws InterruptedException - */ - public int findLastMatchIndex(List logs) - throws LeaderUnknownException, TException, InterruptedException { - int start = 0; - int end = logs.size() - 1; - int matchedIndex = -1; - while (start <= end) { - int mid = start + (end - start) / 2; - if (checkMatchIndex(mid)) { - start = mid + 1; - matchedIndex = mid; - } else { - end = mid - 1; - } - } - return matchedIndex; - } - - /** - * @param index the index of a log in logs - * @return true if the previous log at logs[index] matches a log in the remote node, false if the - * corresponding log cannot be found - * @throws LeaderUnknownException - * @throws TException - * @throws InterruptedException - */ - private boolean checkMatchIndex(int index) - throws LeaderUnknownException, TException, InterruptedException { - Log log = logs.get(index); - synchronized (raftMember.getTerm()) { - // make sure this node is still a leader - if (raftMember.getCharacter() != NodeCharacter.LEADER) { - throw new LeaderUnknownException(raftMember.getAllNodes()); - } - } - - long prevLogIndex = log.getCurrLogIndex() - 1; - long prevLogTerm = getPrevLogTerm(index); - - if (prevLogTerm == -1) { - // prev log cannot be found, we cannot know whether is matches if it is not the first log - return prevLogIndex == -1; - } - - boolean matched = checkLogIsMatch(prevLogIndex, prevLogTerm); - raftMember.getLastCatchUpResponseTime().put(node, System.currentTimeMillis()); - logger.info( - "{} check {}'s matchIndex {} with log [{}]", - raftMember.getName(), - node, - matched ? "succeed" : "failed", - log); - return matched; - } - - /** - * @param logIndex the log index needs to check - * @param logTerm the log term need to check - * @return true if the log's index and term matches a log in the remote node, false if the - * corresponding log cannot be found - * @throws TException - * @throws InterruptedException - */ - private boolean checkLogIsMatch(long logIndex, long logTerm) - throws TException, InterruptedException { - boolean matched; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - RaftService.AsyncClient client = raftMember.getAsyncClient(node); - if (client == null) { - return false; - } - matched = - SyncClientAdaptor.matchTerm(client, node, logIndex, logTerm, raftMember.getHeader()); - } else { - Client client = raftMember.getSyncClient(node); - if (client == null) { - return false; - } - try { - matched = client.matchTerm(logIndex, logTerm, raftMember.getHeader()); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } finally { - ClientUtils.putBackSyncClient(client); - } - } - return matched; - } - - private long getPrevLogTerm(int index) { - long prevLogTerm = -1; - if (index > 0) { - prevLogTerm = logs.get(index - 1).getCurrLogTerm(); - } else { - try { - prevLogTerm = raftMember.getLogManager().getTerm(logs.get(0).getCurrLogIndex() - 1); - } catch (EntryCompactedException e) { - logger.info("Log [{}] is compacted during catchup", logs.get(0).getCurrLogIndex() - 1); - } - } - return prevLogTerm; - } - - private void doSnapshot() { - try { - raftMember.getLogManager().takeSnapshot(); - } catch (IOException e) { - logger.error("Unexpected error when taking snapshot.", e); - } - snapshot = raftMember.getLogManager().getSnapshot(peer.getMatchIndex()); - if (logger.isInfoEnabled()) { - logger.info("{}: Logs in {} are too old, catch up with snapshot", raftMember.getName(), node); - } - } - - /** Remove logs that are contained in the snapshot. */ - private void removeSnapshotLogs() { - Log logToSearch = new EmptyContentLog(snapshot.getLastLogIndex(), snapshot.getLastLogTerm()); - int pos = - Collections.binarySearch(logs, logToSearch, Comparator.comparingLong(Log::getCurrLogIndex)); - int prevSize = logs.size(); - if (pos >= 0) { - logs.subList(0, pos + 1).clear(); - } else { - int insertPos = -pos - 1; - if (insertPos > 0) { - logs.subList(0, insertPos).clear(); - } - } - logger.info("Logs are reduced from {} to {}", prevSize, logs.size()); - } - - @Override - public void run() { - try { - boolean findMatchedIndex = checkMatchIndex(); - if (abort) { - peer.resetInconsistentHeartbeatNum(); - raftMember.getLastCatchUpResponseTime().remove(node); - return; - } - boolean catchUpSucceeded; - if (!findMatchedIndex) { - logger.info("{}: performing a snapshot catch-up to {}", raftMember.getName(), node); - doSnapshot(); - // snapshot may overlap with logs - removeSnapshotLogs(); - SnapshotCatchUpTask task = - new SnapshotCatchUpTask(logs, snapshot, node, raftId, raftMember); - catchUpSucceeded = task.call(); - } else { - logger.info("{}: performing a log catch-up to {}", raftMember.getName(), node); - LogCatchUpTask task = new LogCatchUpTask(logs, node, raftId, raftMember); - catchUpSucceeded = task.call(); - } - if (catchUpSucceeded) { - // the catch up may be triggered by an old heartbeat, and the node may have already - // caught up, so logs can be empty - if (!logs.isEmpty() || snapshot != null) { - long lastIndex = - !logs.isEmpty() - ? logs.get(logs.size() - 1).getCurrLogIndex() - : snapshot.getLastLogIndex(); - peer.setMatchIndex(lastIndex); - } - if (logger.isInfoEnabled()) { - logger.info( - "{}: Catch up {} finished, update it's matchIndex to {}", - raftMember.getName(), - node, - peer.getMatchIndex()); - } - peer.resetInconsistentHeartbeatNum(); - } - - } catch (LeaderUnknownException e) { - logger.warn("Catch up {} failed because leadership is lost", node); - } catch (Exception e) { - logger.error("Catch up {} errored", node, e); - } - // the next catch up is enabled - raftMember.getLastCatchUpResponseTime().remove(node); - } - - @TestOnly - public void setLogs(List logs) { - this.logs = logs; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java deleted file mode 100644 index 5cd29424e097..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTask.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.handlers.caller.LogCatchUpHandler; -import org.apache.iotdb.cluster.server.handlers.caller.LogCatchUpInBatchHandler; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; - -/** LogCatchUpTask sends a list of logs to a node to make the node keep up with the leader. */ -@SuppressWarnings("java:S2274") // enable timeout -public class LogCatchUpTask implements Callable { - - // sending logs may take longer than normal communications - private static final long SEND_LOGS_WAIT_MS = ClusterConstant.getWriteOperationTimeoutMS(); - private static final Logger logger = LoggerFactory.getLogger(LogCatchUpTask.class); - Node node; - RaftMember raftMember; - private List logs; - private boolean useBatch = ClusterDescriptor.getInstance().getConfig().isUseBatchInLogCatchUp(); - boolean abort = false; - private int raftId; - - LogCatchUpTask(List logs, Node node, int raftId, RaftMember raftMember) { - this.logs = logs; - this.node = node; - this.raftId = raftId; - this.raftMember = raftMember; - } - - @TestOnly - LogCatchUpTask(List logs, Node node, int raftId, RaftMember raftMember, boolean useBatch) { - this.logs = logs; - this.node = node; - this.raftId = raftId; - this.raftMember = raftMember; - this.useBatch = useBatch; - } - - @TestOnly - void setUseBatch(boolean useBatch) { - this.useBatch = useBatch; - } - - void doLogCatchUp() throws TException, InterruptedException, LeaderUnknownException { - AppendEntryRequest request = new AppendEntryRequest(); - if (raftMember.getHeader() != null) { - request.setHeader(raftMember.getHeader()); - } - request.setLeader(raftMember.getThisNode()); - request.setLeaderCommit(raftMember.getLogManager().getCommitLogIndex()); - - for (int i = 0; i < logs.size() && !abort; i++) { - Log log = logs.get(i); - synchronized (raftMember.getTerm()) { - // make sure this node is still a leader - if (raftMember.getCharacter() != NodeCharacter.LEADER) { - throw new LeaderUnknownException(raftMember.getAllNodes()); - } - request.setTerm(raftMember.getTerm().get()); - } - request.setPrevLogIndex(log.getCurrLogIndex() - 1); - if (i == 0) { - try { - request.setPrevLogTerm(raftMember.getLogManager().getTerm(log.getCurrLogIndex() - 1)); - } catch (Exception e) { - logger.error("getTerm failed for newly append entries", e); - } - } else { - request.setPrevLogTerm(logs.get(i - 1).getCurrLogTerm()); - } - - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - abort = !appendEntryAsync(log, request); - } else { - abort = !appendEntrySync(log, request); - } - } - } - - private boolean appendEntryAsync(Log log, AppendEntryRequest request) - throws TException, InterruptedException { - LogCatchUpHandler handler = getCatchUpHandler(log, request); - synchronized (handler.getAppendSucceed()) { - AsyncClient client = raftMember.getAsyncClient(node); - if (client == null) { - return false; - } - client.appendEntry(request, handler); - raftMember.getLastCatchUpResponseTime().put(node, System.currentTimeMillis()); - handler.getAppendSucceed().wait(ClusterConstant.getWriteOperationTimeoutMS()); - } - return handler.getAppendSucceed().get(); - } - - private LogCatchUpHandler getCatchUpHandler(Log log, AppendEntryRequest request) { - AtomicBoolean appendSucceed = new AtomicBoolean(false); - LogCatchUpHandler handler = new LogCatchUpHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setRaftMember(raftMember); - handler.setFollower(node); - handler.setLog(log); - request.setEntry(log.serialize()); - return handler; - } - - private boolean appendEntrySync(Log log, AppendEntryRequest request) { - LogCatchUpHandler handler = getCatchUpHandler(log, request); - - Client client = raftMember.getSyncClient(node); - if (client == null) { - logger.error("No available client for {} when append entry", node); - return false; - } - - try { - long result = client.appendEntry(request); - handler.onComplete(result); - return handler.getAppendSucceed().get(); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - handler.onError(e); - return false; - } finally { - ClientUtils.putBackSyncClient(client); - } - } - - private AppendEntriesRequest prepareRequest(List logList, int startPos) { - AppendEntriesRequest request = new AppendEntriesRequest(); - - if (raftMember.getHeader() != null) { - request.setHeader(raftMember.getHeader()); - } - request.setLeader(raftMember.getThisNode()); - request.setLeaderCommit(raftMember.getLogManager().getCommitLogIndex()); - - synchronized (raftMember.getTerm()) { - // make sure this node is still a leader - if (raftMember.getCharacter() != NodeCharacter.LEADER) { - logger.debug("Leadership is lost when doing a catch-up to {}, aborting", node); - abort = true; - return null; - } - request.setTerm(raftMember.getTerm().get()); - } - - request.setEntries(logList); - // set index for raft - request.setPrevLogIndex(logs.get(startPos).getCurrLogIndex() - 1); - if (startPos != 0) { - request.setPrevLogTerm(logs.get(startPos - 1).getCurrLogTerm()); - } else { - try { - request.setPrevLogTerm( - raftMember.getLogManager().getTerm(logs.get(0).getCurrLogIndex() - 1)); - } catch (Exception e) { - logger.error("getTerm failed for newly append entries", e); - } - } - logger.debug("{}, node={} catchup request={}", raftMember.getName(), node, request); - return request; - } - - private void doLogCatchUpInBatch() throws TException, InterruptedException { - List logList = new ArrayList<>(); - long totalLogSize = 0; - int firstLogPos = 0; - boolean batchFull; - - for (int i = 0; i < logs.size() && !abort; i++) { - - ByteBuffer logData = logs.get(i).serialize(); - int logSize = logData.array().length; - if (logSize - > IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize() - - IoTDBConstant.LEFT_SIZE_IN_REQUEST) { - logger.warn( - "the frame size {} of thrift is too small", - IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize()); - abort = true; - return; - } - - totalLogSize += logSize; - // we should send logs who's size is smaller than the max frame size of thrift - // left 200 byte for other fields of AppendEntriesRequest - // send at most 100 logs a time to avoid long latency - if (totalLogSize - > IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize() - - IoTDBConstant.LEFT_SIZE_IN_REQUEST) { - // batch oversize, send previous batch and add the log to a new batch - sendBatchLogs(logList, firstLogPos); - logList.add(logData); - firstLogPos = i; - totalLogSize = logSize; - } else { - // just add the log the batch - logList.add(logData); - } - - batchFull = logList.size() >= ClusterConstant.LOG_NUM_IN_BATCH; - if (batchFull) { - sendBatchLogs(logList, firstLogPos); - firstLogPos = i + 1; - totalLogSize = 0; - } - } - - if (!logList.isEmpty()) { - sendBatchLogs(logList, firstLogPos); - } - } - - private void sendBatchLogs(List logList, int firstLogPos) - throws TException, InterruptedException { - if (logger.isInfoEnabled()) { - logger.info( - "{} send logs from {} num {} for {}", - raftMember.getThisNode(), - logs.get(firstLogPos).getCurrLogIndex(), - logList.size(), - node); - } - AppendEntriesRequest request = prepareRequest(logList, firstLogPos); - if (request == null) { - return; - } - // do append entries - if (logger.isInfoEnabled()) { - logger.info("{}: sending {} logs to {}", raftMember.getName(), logList.size(), node); - } - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - abort = !appendEntriesAsync(logList, request); - } else { - abort = !appendEntriesSync(logList, request); - } - if (!abort && logger.isInfoEnabled()) { - logger.info("{}: sent {} logs to {}", raftMember.getName(), logList.size(), node); - } - logList.clear(); - } - - private boolean appendEntriesAsync(List logList, AppendEntriesRequest request) - throws TException, InterruptedException { - AtomicBoolean appendSucceed = new AtomicBoolean(false); - - LogCatchUpInBatchHandler handler = new LogCatchUpInBatchHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setRaftMember(raftMember); - handler.setFollower(node); - handler.setLogs(logList); - synchronized (appendSucceed) { - appendSucceed.set(false); - AsyncClient client = raftMember.getAsyncClient(node); - if (client == null) { - return false; - } - client.appendEntries(request, handler); - raftMember.getLastCatchUpResponseTime().put(node, System.currentTimeMillis()); - appendSucceed.wait(SEND_LOGS_WAIT_MS); - } - return appendSucceed.get(); - } - - private boolean appendEntriesSync(List logList, AppendEntriesRequest request) { - AtomicBoolean appendSucceed = new AtomicBoolean(false); - LogCatchUpInBatchHandler handler = new LogCatchUpInBatchHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setRaftMember(raftMember); - handler.setFollower(node); - handler.setLogs(logList); - - Client client = raftMember.getSyncClient(node); - if (client == null) { - logger.error("No available client for {} when append entries", node); - return false; - } - try { - long result = client.appendEntries(request); - handler.onComplete(result); - return appendSucceed.get(); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - handler.onError(e); - logger.warn("Failed logs: {}, first index: {}", logList, request.prevLogIndex + 1); - return false; - } finally { - ClientUtils.putBackSyncClient(client); - } - } - - @Override - public Boolean call() throws TException, InterruptedException, LeaderUnknownException { - if (logs.isEmpty()) { - return true; - } - - if (useBatch) { - doLogCatchUpInBatch(); - } else { - doLogCatchUp(); - } - logger.info("{}: Catch up {} finished with result {}", raftMember.getName(), node, !abort); - - // the next catch up is enabled - raftMember.getLastCatchUpResponseTime().remove(node); - return !abort; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java deleted file mode 100644 index b11f8861ab99..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTask.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.handlers.caller.SnapshotCatchUpHandler; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.utils.ClientUtils; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * SnapshotCatchUpTask first sends the snapshot to the stale node then sends the logs to the node. - */ -public class SnapshotCatchUpTask extends LogCatchUpTask implements Callable { - - private static final Logger logger = LoggerFactory.getLogger(SnapshotCatchUpTask.class); - - // sending a snapshot may take longer than normal communications - private static final long SEND_SNAPSHOT_WAIT_MS = - ClusterDescriptor.getInstance().getConfig().getCatchUpTimeoutMS(); - private Snapshot snapshot; - - SnapshotCatchUpTask( - List logs, Snapshot snapshot, Node node, int raftId, RaftMember raftMember) { - super(logs, node, raftId, raftMember); - this.snapshot = snapshot; - } - - private void doSnapshotCatchUp() throws TException, InterruptedException, LeaderUnknownException { - SendSnapshotRequest request = new SendSnapshotRequest(); - if (raftMember.getHeader() != null) { - request.setHeader(raftMember.getHeader()); - } - logger.info("Start to send snapshot to {}", node); - ByteBuffer data = snapshot.serialize(); - if (logger.isInfoEnabled()) { - logger.info("Do snapshot catch up with size {}", data.array().length); - } - request.setSnapshotBytes(data); - - synchronized (raftMember.getTerm()) { - // make sure this node is still a leader - if (raftMember.getCharacter() != NodeCharacter.LEADER) { - throw new LeaderUnknownException(raftMember.getAllNodes()); - } - } - - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - abort = !sendSnapshotAsync(request); - } else { - abort = !sendSnapshotSync(request); - } - } - - @SuppressWarnings("java:S2274") // enable timeout - private boolean sendSnapshotAsync(SendSnapshotRequest request) - throws TException, InterruptedException { - AtomicBoolean succeed = new AtomicBoolean(false); - SnapshotCatchUpHandler handler = new SnapshotCatchUpHandler(succeed, node, snapshot); - AsyncClient client = raftMember.getAsyncClient(node); - if (client == null) { - logger.info("{}: client null for node {}", raftMember.getThisNode(), node); - abort = true; - return false; - } - - logger.info( - "{}: the snapshot request size={}", - raftMember.getName(), - request.getSnapshotBytes().length); - synchronized (succeed) { - client.sendSnapshot(request, handler); - raftMember.getLastCatchUpResponseTime().put(node, System.currentTimeMillis()); - succeed.wait(SEND_SNAPSHOT_WAIT_MS); - } - if (logger.isInfoEnabled()) { - logger.info("send snapshot to node {} success {}", raftMember.getThisNode(), succeed.get()); - } - return succeed.get(); - } - - private boolean sendSnapshotSync(SendSnapshotRequest request) throws TException { - logger.info( - "{}: sending a snapshot request size={} to {}", - raftMember.getName(), - request.getSnapshotBytes().length, - node); - Client client = raftMember.getSyncClient(node); - if (client == null) { - return false; - } - try { - try { - client.sendSnapshot(request); - logger.info("{}: snapshot is sent to {}", raftMember.getName(), node); - return true; - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } - } finally { - ClientUtils.putBackSyncClient(client); - } - } - - @Override - public Boolean call() throws InterruptedException, TException, LeaderUnknownException { - doSnapshotCatchUp(); - if (abort) { - logger.warn("{}: Snapshot catch up {} failed", raftMember.getName(), node); - raftMember.getLastCatchUpResponseTime().remove(node); - return false; - } - logger.info( - "{}: Snapshot catch up {} finished, begin to catch up log", raftMember.getName(), node); - doLogCatchUp(); - if (!abort) { - logger.info("{}: Catch up {} finished", raftMember.getName(), node); - } else { - logger.warn("{}: Log catch up {} failed", raftMember.getName(), node); - } - // the next catch up is enabled - raftMember.getLastCatchUpResponseTime().remove(node); - return !abort; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java deleted file mode 100644 index 83c72a864876..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/AddNodeLog.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -/** AddNodeLog records the operation of adding a node into this cluster. */ -public class AddNodeLog extends Log { - - private ByteBuffer partitionTable; - - private Node newNode; - - private long metaLogIndex; - - public AddNodeLog(ByteBuffer partitionTable, Node newNode) { - this.partitionTable = partitionTable; - this.newNode = newNode; - } - - public AddNodeLog() {} - - public long getMetaLogIndex() { - return metaLogIndex; - } - - public void setMetaLogIndex(long metaLogIndex) { - this.metaLogIndex = metaLogIndex; - } - - public void setPartitionTable(ByteBuffer partitionTable) { - this.partitionTable = partitionTable; - } - - public void setNewNode(Node newNode) { - this.newNode = newNode; - } - - public Node getNewNode() { - return newNode; - } - - public ByteBuffer getPartitionTable() { - partitionTable.rewind(); - return partitionTable; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte(Types.ADD_NODE.ordinal()); - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - dataOutputStream.writeLong(getMetaLogIndex()); - - NodeSerializeUtils.serialize(newNode, dataOutputStream); - - dataOutputStream.writeInt(partitionTable.array().length); - dataOutputStream.write(partitionTable.array()); - } catch (IOException e) { - // ignored - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - - // marker is previously read, remaining fields: - // curr index(long), curr term(long) - // ipLength(int), inBytes(byte[]), port(int), identifier(int), dataPort(int) - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - setMetaLogIndex(buffer.getLong()); - - newNode = new Node(); - NodeSerializeUtils.deserialize(newNode, buffer); - - int len = buffer.getInt(); - byte[] data = new byte[len]; - System.arraycopy(buffer.array(), buffer.position(), data, 0, len); - partitionTable = ByteBuffer.wrap(data); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - if (!super.equals(o)) { - return false; - } - AddNodeLog that = (AddNodeLog) o; - return Objects.equals(newNode, that.newNode) - && Objects.equals(partitionTable, that.partitionTable); - } - - @Override - public String toString() { - return "AddNodeLog{" + "newNode=" + newNode.toString() + '}'; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), newNode, partitionTable); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java deleted file mode 100644 index b9a4a87490d6..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/CloseFileLog.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.commons.utils.SerializeUtils; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -import static org.apache.iotdb.cluster.log.Log.Types.CLOSE_FILE; - -public class CloseFileLog extends Log { - - private String storageGroupName; - private boolean isSeq; - private long partitionId; - - public CloseFileLog() {} - - public CloseFileLog(String storageGroupName, long partitionId, boolean isSeq) { - this.storageGroupName = storageGroupName; - this.isSeq = isSeq; - this.partitionId = partitionId; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(DEFAULT_BUFFER_SIZE); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte((byte) CLOSE_FILE.ordinal()); - - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - - SerializeUtils.serialize(storageGroupName, dataOutputStream); - dataOutputStream.writeBoolean(isSeq); - dataOutputStream.writeLong(partitionId); - - } catch (IOException e) { - // unreachable - } - - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - - storageGroupName = SerializeUtils.deserializeString(buffer); - isSeq = buffer.get() == 1; - partitionId = buffer.getLong(); - } - - public boolean isSeq() { - return isSeq; - } - - public String getStorageGroupName() { - return storageGroupName; - } - - public long getPartitionId() { - return partitionId; - } - - @Override - public String toString() { - return "CloseFileLog{" - + "storageGroupName='" - + storageGroupName - + '\'' - + ", isSeq=" - + isSeq - + ", partitionId=" - + partitionId - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - if (!super.equals(o)) { - return false; - } - CloseFileLog that = (CloseFileLog) o; - return isSeq == that.isSeq - && Objects.equals(storageGroupName, that.storageGroupName) - && partitionId == that.partitionId; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), storageGroupName, partitionId, isSeq); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java deleted file mode 100644 index 1e785d5f74fb..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/EmptyContentLog.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -import static org.apache.iotdb.cluster.log.Log.Types.EMPTY_CONTENT; - -public class EmptyContentLog extends Log { - - public EmptyContentLog() {} - - public EmptyContentLog(long index, long term) { - this.setCurrLogIndex(index); - this.setCurrLogTerm(term); - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(DEFAULT_BUFFER_SIZE); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte((byte) EMPTY_CONTENT.ordinal()); - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - } catch (IOException e) { - // unreachable - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - } - - @Override - public String toString() { - return "term:" + getCurrLogTerm() + ",index:" + getCurrLogIndex(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java deleted file mode 100644 index 32e4a1c2fd8d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/LargeTestLog.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -import static org.apache.iotdb.cluster.log.Log.Types.TEST_LARGE_CONTENT; - -public class LargeTestLog extends Log { - private ByteBuffer data; - - public LargeTestLog() { - data = ByteBuffer.wrap(new byte[8192]); - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(DEFAULT_BUFFER_SIZE); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte((byte) TEST_LARGE_CONTENT.ordinal()); - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - dataOutputStream.write(data.array()); - } catch (IOException e) { - // unreachable - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - data.put(buffer); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof LargeTestLog)) { - return false; - } - LargeTestLog obj1 = (LargeTestLog) obj; - return getCurrLogIndex() == obj1.getCurrLogIndex() && getCurrLogTerm() == obj1.getCurrLogTerm(); - } - - @Override - public int hashCode() { - return Objects.hash(getCurrLogIndex(), getCurrLogTerm()); - } - - @Override - public String toString() { - return "LargeTestLog{" + getCurrLogIndex() + "-" + getCurrLogTerm() + "}"; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java deleted file mode 100644 index b0ed72e90bbd..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/PhysicalPlanLog.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -import static org.apache.iotdb.cluster.log.Log.Types.PHYSICAL_PLAN; - -/** PhysicalPlanLog contains a non-partitioned physical plan like set storage group. */ -public class PhysicalPlanLog extends Log { - - private static final Logger logger = LoggerFactory.getLogger(PhysicalPlanLog.class); - private PhysicalPlan plan; - - public PhysicalPlanLog() {} - - public PhysicalPlanLog(PhysicalPlan plan) { - this.plan = plan; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(DEFAULT_BUFFER_SIZE); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte((byte) PHYSICAL_PLAN.ordinal()); - - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - - plan.serialize(dataOutputStream); - } catch (IOException e) { - // unreachable - } - - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - - try { - plan = PhysicalPlan.Factory.create(buffer); - } catch (IOException | IllegalPathException e) { - logger.error( - "Cannot parse a physical {}:{} plan {}", - getCurrLogIndex(), - getCurrLogTerm(), - buffer.array().length, - e); - } - } - - public PhysicalPlan getPlan() { - return plan; - } - - public void setPlan(PhysicalPlan plan) { - this.plan = plan; - } - - @Override - public String toString() { - return plan + ",term:" + getCurrLogTerm() + ",index:" + getCurrLogIndex(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - if (!super.equals(o)) { - return false; - } - PhysicalPlanLog that = (PhysicalPlanLog) o; - return Objects.equals(plan, that.plan); - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), plan); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java deleted file mode 100644 index ea06cfef9fcc..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/logtypes/RemoveNodeLog.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Objects; - -public class RemoveNodeLog extends Log { - - private ByteBuffer partitionTable; - - private Node removedNode; - - private long metaLogIndex; - - public RemoveNodeLog(ByteBuffer partitionTable, Node removedNode) { - this.partitionTable = partitionTable; - this.removedNode = removedNode; - } - - public RemoveNodeLog() {} - - public long getMetaLogIndex() { - return metaLogIndex; - } - - public void setMetaLogIndex(long metaLogIndex) { - this.metaLogIndex = metaLogIndex; - } - - public ByteBuffer getPartitionTable() { - partitionTable.rewind(); - return partitionTable; - } - - public void setPartitionTable(ByteBuffer partitionTable) { - this.partitionTable = partitionTable; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeByte(Types.REMOVE_NODE.ordinal()); - dataOutputStream.writeLong(getCurrLogIndex()); - dataOutputStream.writeLong(getCurrLogTerm()); - dataOutputStream.writeLong(getMetaLogIndex()); - - NodeSerializeUtils.serialize(removedNode, dataOutputStream); - - dataOutputStream.writeInt(partitionTable.array().length); - dataOutputStream.write(partitionTable.array()); - } catch (IOException e) { - // ignored - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - setMetaLogIndex(buffer.getLong()); - - removedNode = new Node(); - NodeSerializeUtils.deserialize(removedNode, buffer); - - int len = buffer.getInt(); - byte[] data = new byte[len]; - System.arraycopy(buffer.array(), buffer.position(), data, 0, len); - partitionTable = ByteBuffer.wrap(data); - } - - public Node getRemovedNode() { - return removedNode; - } - - public void setRemovedNode(Node removedNode) { - this.removedNode = removedNode; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - if (!super.equals(o)) { - return false; - } - RemoveNodeLog that = (RemoveNodeLog) o; - return Objects.equals(removedNode, that.removedNode) - && Objects.equals(partitionTable, that.partitionTable); - } - - @Override - public String toString() { - return "RemoveNodeLog{" + "removedNode=" + removedNode.toString() + '}'; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), removedNode, partitionTable); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java deleted file mode 100644 index c1ccabce9da1..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManager.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.exception.EntryUnavailableException; -import org.apache.iotdb.cluster.exception.TruncateCommittedEntryException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta; -import org.apache.iotdb.commons.utils.TestOnly; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class CommittedEntryManager { - - private static final Logger logger = LoggerFactory.getLogger(CommittedEntryManager.class); - - // memory cache for logs which have been persisted in disk. - private List entries; - - private long entryTotalMemSize; - - /** - * Note that it is better to use applyingSnapshot to update dummy entry immediately after this - * instance is created. - */ - CommittedEntryManager(int maxNumOfLogInMem) { - entries = Collections.synchronizedList(new ArrayList<>(maxNumOfLogInMem)); - entries.add(new EmptyContentLog(-1, -1)); - entryTotalMemSize = 0; - } - - CommittedEntryManager(int maxNumOfLogInMem, LogManagerMeta meta) { - entries = Collections.synchronizedList(new ArrayList<>(maxNumOfLogInMem)); - entries.add( - new EmptyContentLog( - meta.getMaxHaveAppliedCommitIndex() == -1 - ? -1 - : meta.getMaxHaveAppliedCommitIndex() - 1, - meta.getLastLogTerm())); - entryTotalMemSize = 0; - } - - /** - * Overwrite the contents of this object with those of the given snapshot. Note that this function - * is only used if you want to override all the contents, otherwise please use - * compactEntries(snapshot.lastIndex()). - * - * @param snapshot snapshot - */ - void applyingSnapshot(Snapshot snapshot) { - long localIndex = getDummyIndex(); - long snapIndex = snapshot.getLastLogIndex(); - if (localIndex >= snapIndex) { - logger.info("requested snapshot is older than the existing snapshot"); - return; - } - entries.clear(); - entries.add(new EmptyContentLog(snapshot.getLastLogIndex(), snapshot.getLastLogTerm())); - } - - /** - * Return the last entry's index which have been compacted. - * - * @return dummyIndex - */ - Long getDummyIndex() { - return entries.get(0).getCurrLogIndex(); - } - - /** - * Return the first entry's index which have not been compacted. - * - * @return firstIndex - */ - Long getFirstIndex() { - return getDummyIndex() + 1; - } - - /** - * Return the last entry's index which have been committed and persisted. - * - * @return getLastIndex - */ - Long getLastIndex() { - return getDummyIndex() + entries.size() - 1; - } - - /** - * Return the entries's size - * - * @return entries's size - */ - int getTotalSize() { - // the first one is a sentry - return entries.size() - 1; - } - - /** - * Return the entry's term for given index. Note that the called should ensure index <= - * entries[entries.size()-1].index. - * - * @param index request entry index - * @return -1 if index > entries[entries.size()-1].index, throw EntryCompactedException if index < - * dummyIndex, or return the entry's term for given index - * @throws EntryCompactedException - */ - public long maybeTerm(long index) throws EntryCompactedException { - Log log = getEntry(index); - if (log == null) { - return -1; - } - return log.getCurrLogTerm(); - } - - /** - * Pack entries from low through high - 1, just like slice (entries[low:high]). dummyIndex < low - * <= high. Note that caller must ensure low <= high. - * - * @param low request index low bound - * @param high request index upper bound - */ - public List getEntries(long low, long high) { - if (low > high) { - logger.debug("invalid getEntries: parameter: {} > {}", low, high); - return Collections.emptyList(); - } - long dummyIndex = getDummyIndex(); - if (low <= dummyIndex) { - logger.debug( - "entries low ({}) is out of bound dummyIndex ({}), adjust parameter 'low' to {}", - low, - dummyIndex, - dummyIndex); - low = dummyIndex + 1; - } - long lastIndex = getLastIndex(); - if (high > lastIndex + 1) { - logger.debug( - "entries high ({}) is out of bound lastIndex ({}), adjust parameter 'high' to {}", - high, - lastIndex, - lastIndex); - high = lastIndex + 1; - } - return entries.subList((int) (low - dummyIndex), (int) (high - dummyIndex)); - } - - /** - * Return the entry's log for given index. Note that the called should ensure index <= - * entries[entries.size()-1].index. - * - * @param index request entry index - * @return null if index > entries[entries.size()-1].index, throw EntryCompactedException if index - * < dummyIndex, or return the entry's log for given index - * @throws EntryCompactedException - */ - Log getEntry(long index) throws EntryCompactedException { - long dummyIndex = getDummyIndex(); - if (index < dummyIndex) { - logger.debug( - "invalid committedEntryManager getEntry: parameter: index({}) < compactIndex({})", - index, - dummyIndex); - throw new EntryCompactedException(index, dummyIndex); - } - if ((int) (index - dummyIndex) >= entries.size()) { - if (logger.isDebugEnabled()) { - logger.debug( - "invalid committedEntryManager getEntry : parameter: index({}) > lastIndex({})", - index, - getLastIndex()); - } - return null; - } - return entries.get((int) (index - dummyIndex)); - } - - /** - * Discards all log entries prior to compactIndex. - * - * @param compactIndex request compactIndex - * @throws EntryUnavailableException - */ - void compactEntries(long compactIndex) throws EntryUnavailableException { - long dummyIndex = getDummyIndex(); - if (compactIndex < dummyIndex) { - logger.info( - "entries before request index ({}) have been compacted, and the compactIndex is ({})", - compactIndex, - dummyIndex); - return; - } - if (compactIndex > getLastIndex()) { - logger.info("compact ({}) is out of bound lastIndex ({})", compactIndex, getLastIndex()); - throw new EntryUnavailableException(compactIndex, getLastIndex()); - } - int index = (int) (compactIndex - dummyIndex); - for (int i = 1; i <= index; i++) { - entryTotalMemSize -= entries.get(i).getByteSize(); - } - // The following two lines of code should be tightly linked, - // because the check apply thread will read the entry also, and there will be concurrency - // problems, - // but please rest assured that we have done concurrency security check in the check apply - // thread. - // They are put together just to reduce the probability of concurrency. - entries.set( - 0, - new EmptyContentLog( - entries.get(index).getCurrLogIndex(), entries.get(index).getCurrLogTerm())); - entries.subList(1, index + 1).clear(); - } - - /** - * Append committed entries. This method will truncate conflict entries if it finds - * inconsistencies. - * - * @param appendingEntries request entries - * @throws TruncateCommittedEntryException - */ - public void append(List appendingEntries) throws TruncateCommittedEntryException { - if (appendingEntries.isEmpty()) { - return; - } - long offset = appendingEntries.get(0).getCurrLogIndex() - getDummyIndex(); - if (entries.size() - offset == 0) { - for (int i = 0; i < appendingEntries.size(); i++) { - entryTotalMemSize += appendingEntries.get(i).getByteSize(); - } - entries.addAll(appendingEntries); - } else if (entries.size() - offset > 0) { - throw new TruncateCommittedEntryException( - appendingEntries.get(0).getCurrLogIndex(), getLastIndex()); - } else { - logger.error( - "missing log entry [last: {}, append at: {}]", - getLastIndex(), - appendingEntries.get(0).getCurrLogIndex()); - } - } - - @TestOnly - CommittedEntryManager(List entries) { - this.entries = entries; - } - - @TestOnly - List getAllEntries() { - return entries; - } - - public long getEntryTotalMemSize() { - return entryTotalMemSize; - } - - public void setEntryTotalMemSize(long entryTotalMemSize) { - this.entryTotalMemSize = entryTotalMemSize; - } - - /** - * check how many logs could be reserved in memory. - * - * @param maxMemSize the max memory size for old committed log - * @return max num to reserve old committed log - */ - public int maxLogNumShouldReserve(long maxMemSize) { - long totalSize = 0; - for (int i = entries.size() - 1; i >= 1; i--) { - if (totalSize + entries.get(i).getByteSize() > maxMemSize) { - return entries.size() - 1 - i; - } - totalSize += entries.get(i).getByteSize(); - } - return entries.size() - 1; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java deleted file mode 100644 index 3f25525ad60c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManager.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot.Factory; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.tsfile.utils.Pair; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * Different from PartitionedSnapshotLogManager, FilePartitionedSnapshotLogManager does not store - * the committed in memory after snapshots, it considers the logs are contained in the TsFiles so it - * will record every TsFiles in the slot instead. - * - *

FilePartitionedSnapshotLogManager is used for dataGroup - */ -public class FilePartitionedSnapshotLogManager extends PartitionedSnapshotLogManager { - - private static final Logger logger = - LoggerFactory.getLogger(FilePartitionedSnapshotLogManager.class); - - public FilePartitionedSnapshotLogManager( - LogApplier logApplier, - PartitionTable partitionTable, - Node header, - Node thisNode, - DataGroupMember dataGroupMember) { - super(logApplier, partitionTable, header, thisNode, Factory.INSTANCE, dataGroupMember); - } - - /** send FlushPlan to all nodes in one dataGroup */ - private void syncFlushAllProcessor(List requiredSlots, boolean needLeader) { - logger.info("{}: Start flush all storage group processor in one data group", getName()); - Map>> storageGroupPartitions = - StorageEngine.getInstance().getWorkingStorageGroupPartitions(); - if (storageGroupPartitions.size() == 0) { - logger.info("{}: no need to flush processor", getName()); - return; - } - dataGroupMember.flushFileWhenDoSnapshot(storageGroupPartitions, requiredSlots, needLeader); - } - - @Override - @SuppressWarnings("java:S1135") // ignore todos - public void takeSnapshot() throws IOException { - takeSnapshotForSpecificSlots( - ((SlotPartitionTable) partitionTable).getNodeSlots(dataGroupMember.getHeader()), true); - } - - @Override - public void takeSnapshotForSpecificSlots(List requiredSlots, boolean needLeader) - throws IOException { - try { - logger.info("{}: Taking snapshots, flushing IoTDB", getName()); - // record current commit index and prevent further logs from being applied, so the - // underlying state machine will not change during the snapshotting - setBlockAppliedCommitIndex(getCommitLogIndex()); - // wait until all logs before BlockAppliedCommitIndex are applied - super.takeSnapshot(); - // flush data to disk so that the disk files will represent a complete state - syncFlushAllProcessor(requiredSlots, needLeader); - logger.info("{}: Taking snapshots, IoTDB is flushed", getName()); - // TODO-cluster https://issues.apache.org/jira/browse/IOTDB-820 - synchronized (this) { - collectTimeseriesSchemas(requiredSlots); - snapshotLastLogIndex = getBlockAppliedCommitIndex(); - snapshotLastLogTerm = getTerm(snapshotLastLogIndex); - collectTsFilesAndFillTimeseriesSchemas(requiredSlots); - logger.info("{}: Snapshot is taken", getName()); - } - } catch (EntryCompactedException e) { - logger.error("failed to do snapshot.", e); - } finally { - // now further logs can be applied - super.resetBlockAppliedCommitIndex(); - } - } - - /** - * IMPORTANT, separate the collection timeseries schema from tsfile to avoid the following - * situations: If the tsfile is empty at this time (only the metadata is registered, but the - * tsfile has not been written yet), then the timeseries schema snapshot can still be generated - * and sent to the followers. - * - * @throws IOException - */ - private void collectTsFilesAndFillTimeseriesSchemas(List requiredSlots) - throws IOException { - // 1.collect tsfile - collectTsFiles(requiredSlots); - - // 2.register the measurement - boolean slotExistsInPartition; - HashSet slots = null; - if (dataGroupMember.getMetaGroupMember() != null) { - // if header node in raft group has removed, the result may be null - List nodeSlots = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getNodeSlots(dataGroupMember.getHeader()); - // the method of 'HashSet(Collection c)' throws NPE,so we need check this part - if (nodeSlots != null) { - slots = new HashSet<>(nodeSlots); - } - } - - for (Map.Entry> entry : slotTimeseries.entrySet()) { - int slotNum = entry.getKey(); - slotExistsInPartition = slots == null || slots.contains(slotNum); - - if (slotExistsInPartition) { - FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum, s -> new FileSnapshot()); - if (snapshot.getTimeseriesSchemas().isEmpty()) { - snapshot.setTimeseriesSchemas(entry.getValue()); - } - } - } - } - - private void collectTsFiles(List requiredSlots) throws IOException { - slotSnapshots.clear(); - Map>> allClosedStorageGroupTsFile = - StorageEngine.getInstance().getAllClosedStorageGroupTsFile(); - List createdHardlinks = new LinkedList<>(); - // group the TsFiles by their slots - for (Entry>> entry : - allClosedStorageGroupTsFile.entrySet()) { - PartialPath storageGroupName = entry.getKey(); - Map> storageGroupsFiles = entry.getValue(); - for (Entry> storageGroupFiles : storageGroupsFiles.entrySet()) { - Long partitionNum = storageGroupFiles.getKey(); - List resourceList = storageGroupFiles.getValue(); - if (!collectTsFiles( - partitionNum, resourceList, storageGroupName, createdHardlinks, requiredSlots)) { - // some file is deleted during the collecting, clean created hardlinks and restart - // from the beginning - for (TsFileResource createdHardlink : createdHardlinks) { - createdHardlink.remove(); - } - collectTsFiles(requiredSlots); - return; - } - } - } - } - - /** - * Create hardlinks for files in one partition and add them into the corresponding snapshot. - * - * @param partitionNum - * @param resourceList - * @param storageGroupName - * @param createdHardlinks - * @return true if all hardlinks are created successfully or false if some of them failed to - * create - * @throws IOException - */ - private boolean collectTsFiles( - Long partitionNum, - List resourceList, - PartialPath storageGroupName, - List createdHardlinks, - List requiredSlots) - throws IOException { - int slotNum = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum( - storageGroupName.getFullPath(), partitionNum, ClusterConstant.SLOT_NUM); - if (!requiredSlots.contains(slotNum)) { - return true; - } - FileSnapshot snapshot = slotSnapshots.computeIfAbsent(slotNum, s -> new FileSnapshot()); - for (TsFileResource tsFileResource : resourceList) { - TsFileResource hardlink = tsFileResource.createHardlink(); - if (hardlink == null) { - return false; - } - createdHardlinks.add(hardlink); - logger.debug("{}: File {} is put into snapshot #{}", getName(), tsFileResource, slotNum); - snapshot.addFile(hardlink, thisNode, isPlanIndexRangeUnique(tsFileResource, resourceList)); - } - snapshot.getDataFiles().sort(Comparator.comparingLong(TsFileResource::getMaxPlanIndex)); - return true; - } - - /** - * Check if the plan index of 'resource' overlaps any one in 'others' from the same time - * partition. For example, we have plan {1,2,3,4,5,6}, plan 1 and 6 are written into an unsequnce - * file Unseq1, and {2,3} and {4,5} are written to sequence files Seq1 and Seq2 respectively - * (notice the numbers are just indexes, not timestamps, so they can be written anywhere if - * properly constructed). So Unseq1 both overlaps Seq1 and Seq2. If Unseq1 merges with Seq1 and - * generated Seq1' (ranges [1, 6]), it will also overlap with Seq2. But if Seq1' further merge - * with Seq2, its range remains to be [1,6], and we cannot find any other files that overlap with - * it, so we can conclude with confidence that the file contains all plans within [1,6]. - * - * @param resource - * @param others - * @return - */ - private boolean isPlanIndexRangeUnique(TsFileResource resource, List others) { - for (TsFileResource other : others) { - if (other != resource && other.isPlanIndexOverlap(resource)) { - return false; - } - } - return true; - } - - @Override - public long append(Log entry) { - long res = super.append(entry); - // For data group, it's necessary to apply remove/add log immediately after append - if (entry instanceof AddNodeLog || entry instanceof RemoveNodeLog) { - applyEntry(entry); - } - return res; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java deleted file mode 100644 index d9bc9e8f9fb7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManager.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; -import org.apache.iotdb.cluster.log.snapshot.MetaSimpleSnapshot; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.auth.AuthException; -import org.apache.iotdb.commons.auth.authorizer.BasicAuthorizer; -import org.apache.iotdb.commons.auth.authorizer.IAuthorizer; -import org.apache.iotdb.commons.auth.entity.Role; -import org.apache.iotdb.commons.auth.entity.User; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.metadata.template.Template; -import org.apache.iotdb.db.metadata.template.TemplateManager; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; - -/** MetaSingleSnapshotLogManager provides a MetaSimpleSnapshot as snapshot. */ -public class MetaSingleSnapshotLogManager extends RaftLogManager { - - private static final Logger logger = LoggerFactory.getLogger(MetaSingleSnapshotLogManager.class); - private Map storageGroupTTLMap; - private Map userMap; - private Map roleMap; - private Map templateMap; - private MetaGroupMember metaGroupMember; - private long commitIndex; - private long term; - - public MetaSingleSnapshotLogManager(LogApplier logApplier, MetaGroupMember metaGroupMember) { - super(new SyncLogDequeSerializer(0), logApplier, metaGroupMember.getName()); - this.metaGroupMember = metaGroupMember; - } - - @Override - @SuppressWarnings("java:S1135") // ignore todos - public void takeSnapshot() throws IOException { - // TODO-cluster https://issues.apache.org/jira/browse/IOTDB-820 - super.takeSnapshot(); - synchronized (this) { - storageGroupTTLMap = IoTDB.schemaProcessor.getStorageGroupsTTL(); - try { - IAuthorizer authorizer = BasicAuthorizer.getInstance(); - userMap = authorizer.getAllUsers(); - roleMap = authorizer.getAllRoles(); - templateMap = TemplateManager.getInstance().getTemplateMap(); - commitIndex = getCommitLogIndex(); - term = getCommitLogTerm(); - } catch (AuthException e) { - logger.error("get user or role info failed", e); - } - } - } - - @Override - public Snapshot getSnapshot(long minIndex) { - MetaSimpleSnapshot snapshot = - new MetaSimpleSnapshot( - storageGroupTTLMap, - userMap, - roleMap, - templateMap, - metaGroupMember.getPartitionTable().serialize()); - snapshot.setLastLogIndex(commitIndex); - snapshot.setLastLogTerm(term); - return snapshot; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java deleted file mode 100644 index 64ad0e17c865..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/PartitionedSnapshotLogManager.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; -import org.apache.iotdb.cluster.log.snapshot.PartitionedSnapshot; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.service.metrics.Metric; -import org.apache.iotdb.db.service.metrics.MetricsService; -import org.apache.iotdb.db.service.metrics.Tag; -import org.apache.iotdb.metrics.config.MetricConfigDescriptor; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -/** - * PartitionedSnapshotLogManager provides a PartitionedSnapshot as snapshot, dividing each log to a - * sub-snapshot according to its slot and stores timeseries schemas of each slot. - */ -public abstract class PartitionedSnapshotLogManager extends RaftLogManager { - - private static final Logger logger = LoggerFactory.getLogger(PartitionedSnapshotLogManager.class); - - Map slotSnapshots = new HashMap<>(); - private SnapshotFactory factory; - Map> slotTimeseries = new HashMap<>(); - long snapshotLastLogIndex; - long snapshotLastLogTerm; - PartitionTable partitionTable; - Node thisNode; - DataGroupMember dataGroupMember; - - protected PartitionedSnapshotLogManager( - LogApplier logApplier, - PartitionTable partitionTable, - Node header, - Node thisNode, - SnapshotFactory factory, - DataGroupMember dataGroupMember) { - super( - new SyncLogDequeSerializer(header.nodeIdentifier), - logApplier, - Integer.toString(header.getNodeIdentifier())); - this.partitionTable = partitionTable; - this.factory = factory; - this.thisNode = thisNode; - this.dataGroupMember = dataGroupMember; - - if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { - MetricsService.getInstance() - .getMetricManager() - .getOrCreateAutoGauge( - Metric.CLUSTER_UNCOMMITTED_LOG.toString(), - MetricLevel.IMPORTANT, - getUnCommittedEntryManager().getAllEntries(), - List::size, - Tag.NAME.toString(), - thisNode.internalIp + "_" + dataGroupMember.getName()); - } - } - - public void takeSnapshotForSpecificSlots(List requiredSlots, boolean needLeader) - throws IOException {} - - @Override - public Snapshot getSnapshot(long minIndex) { - // copy snapshots - synchronized (slotSnapshots) { - PartitionedSnapshot partitionedSnapshot = new PartitionedSnapshot<>(factory); - for (Entry entry : slotSnapshots.entrySet()) { - partitionedSnapshot.putSnapshot(entry.getKey(), entry.getValue()); - } - partitionedSnapshot.setLastLogIndex(snapshotLastLogIndex); - partitionedSnapshot.setLastLogTerm(snapshotLastLogTerm); - partitionedSnapshot.truncateBefore(minIndex); - return partitionedSnapshot; - } - } - - void collectTimeseriesSchemas(List requiredSlots) { - slotTimeseries.clear(); - List allSgPaths = IoTDB.schemaProcessor.getAllStorageGroupPaths(); - - Set requiredSlotsSet = new HashSet(requiredSlots); - for (PartialPath sgPath : allSgPaths) { - String storageGroupName = sgPath.getFullPath(); - int slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByTime(storageGroupName, 0, ClusterConstant.SLOT_NUM); - - if (!requiredSlotsSet.contains(slot)) { - continue; - } - Collection schemas = - slotTimeseries.computeIfAbsent(slot, s -> new HashSet<>()); - // IoTDB.schemaProcessor.collectTimeseriesSchema(sgPath, schemas); - logger.debug("{}: {} timeseries are snapshot in slot {}", getName(), schemas.size(), slot); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java deleted file mode 100644 index 3a675afa4fba..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/RaftLogManager.java +++ /dev/null @@ -1,1035 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.exception.EntryUnavailableException; -import org.apache.iotdb.cluster.exception.GetEntriesWrongParametersException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.TruncateCommittedEntryException; -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.StableEntryManager; -import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta; -import org.apache.iotdb.cluster.server.monitor.Timer.Statistic; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.tsfile.utils.RamUsageEstimator; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; - -public abstract class RaftLogManager { - - private static final Logger logger = LoggerFactory.getLogger(RaftLogManager.class); - - /** manage uncommitted entries */ - private UnCommittedEntryManager unCommittedEntryManager; - - /** manage committed entries in memory as a cache */ - private CommittedEntryManager committedEntryManager; - - /** manage committed entries in disk for safety */ - private StableEntryManager stableEntryManager; - - private long commitIndex; - - /** - * The committed logs whose index is smaller than this are all have been applied, for example, - * suppose there are 5 committed logs, whose log index is 1,2,3,4,5; if the applied sequence is - * 1,3,2,5,4, then the maxHaveAppliedCommitIndex according is 1,1,3,3,5. This attributed is only - * used for asyncLogApplier - */ - private volatile long maxHaveAppliedCommitIndex; - - private final Object changeApplyCommitIndexCond = new Object(); - - /** - * The committed log whose index is larger than blockAppliedCommitIndex will be blocked. if - * blockAppliedCommitIndex < 0(default is -1), will not block any operation. - */ - protected volatile long blockAppliedCommitIndex; - - protected LogApplier logApplier; - - /** to distinguish managers of different members */ - private String name; - - private ScheduledExecutorService deleteLogExecutorService; - private ScheduledFuture deleteLogFuture; - - private ExecutorService checkLogApplierExecutorService; - private Future checkLogApplierFuture; - - /** minimum number of committed logs in memory */ - private int minNumOfLogsInMem = - ClusterDescriptor.getInstance().getConfig().getMinNumOfLogsInMem(); - - /** maximum number of committed logs in memory */ - private int maxNumOfLogsInMem = - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem(); - - private long maxLogMemSize = - ClusterDescriptor.getInstance().getConfig().getMaxMemorySizeForRaftLog(); - - /** - * Each time new logs are appended, this condition will be notified so logs that have larger - * indices but arrived earlier can proceed. - */ - private final Object[] logUpdateConditions = new Object[1024]; - - protected List blockedUnappliedLogList; - - protected RaftLogManager(StableEntryManager stableEntryManager, LogApplier applier, String name) { - this.logApplier = applier; - this.name = name; - LogManagerMeta meta = stableEntryManager.getMeta(); - this.setCommittedEntryManager(new CommittedEntryManager(maxNumOfLogsInMem, meta)); - this.setStableEntryManager(stableEntryManager); - try { - this.getCommittedEntryManager().append(stableEntryManager.getAllEntriesAfterAppliedIndex()); - } catch (TruncateCommittedEntryException e) { - logger.error("{}: Unexpected error:", name, e); - } - long first = getCommittedEntryManager().getDummyIndex(); - long last = getCommittedEntryManager().getLastIndex(); - this.setUnCommittedEntryManager(new UnCommittedEntryManager(last + 1)); - this.getUnCommittedEntryManager() - .truncateAndAppend(stableEntryManager.getAllEntriesAfterCommittedIndex()); - - /** must have applied entry [compactIndex,last] to state machine */ - this.commitIndex = last; - - /** - * due to the log operation is idempotent, so we can just reapply the log from the first index - * of committed logs - */ - this.maxHaveAppliedCommitIndex = first; - - this.blockAppliedCommitIndex = -1; - - this.blockedUnappliedLogList = new CopyOnWriteArrayList<>(); - - this.deleteLogExecutorService = - IoTDBThreadPoolFactory.newScheduledThreadPoolWithDaemon(1, "raft-log-delete-" + name); - - this.checkLogApplierExecutorService = - IoTDBThreadPoolFactory.newSingleThreadExecutorWithDaemon("check-log-applier-" + name); - - /** deletion check period of the submitted log */ - int logDeleteCheckIntervalSecond = - ClusterDescriptor.getInstance().getConfig().getLogDeleteCheckIntervalSecond(); - - if (logDeleteCheckIntervalSecond > 0) { - this.deleteLogFuture = - deleteLogExecutorService.scheduleAtFixedRate( - this::checkDeleteLog, - logDeleteCheckIntervalSecond, - logDeleteCheckIntervalSecond, - TimeUnit.SECONDS); - } - - this.checkLogApplierFuture = checkLogApplierExecutorService.submit(this::checkAppliedLogIndex); - - /** flush log to file periodically */ - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) { - this.applyAllCommittedLogWhenStartUp(); - } - - for (int i = 0; i < logUpdateConditions.length; i++) { - logUpdateConditions[i] = new Object(); - } - } - - public Snapshot getSnapshot() { - return getSnapshot(-1); - } - - public abstract Snapshot getSnapshot(long minLogIndex); - - /** - * IMPORTANT!!! - * - *

The subclass's takeSnapshot() must call this method to insure that all logs have been - * applied before take snapshot - * - *

- * - * @throws IOException timeout exception - */ - public void takeSnapshot() throws IOException { - if (commitIndex <= 0) { - return; - } - long startTime = System.currentTimeMillis(); - if (blockAppliedCommitIndex < 0) { - return; - } - logger.info( - "{}: before take snapshot, blockAppliedCommitIndex={}, maxHaveAppliedCommitIndex={}, commitIndex={}", - name, - blockAppliedCommitIndex, - maxHaveAppliedCommitIndex, - commitIndex); - while (blockAppliedCommitIndex > maxHaveAppliedCommitIndex) { - long waitTime = System.currentTimeMillis() - startTime; - if (waitTime > ClusterDescriptor.getInstance().getConfig().getCatchUpTimeoutMS()) { - logger.error( - "{}: wait all log applied time out, time cost={}, blockAppliedCommitIndex={}, maxHaveAppliedCommitIndex={},commitIndex={}", - name, - waitTime, - blockAppliedCommitIndex, - maxHaveAppliedCommitIndex, - commitIndex); - throw new IOException("wait all log applied time out"); - } - } - } - - /** - * Update the raftNode's hardState(currentTerm,voteFor) and flush to disk. - * - * @param state - */ - public void updateHardState(HardState state) { - getStableEntryManager().setHardStateAndFlush(state); - } - - /** - * Return the raftNode's hardState(currentTerm,voteFor). - * - * @return state - */ - public HardState getHardState() { - return getStableEntryManager().getHardState(); - } - - /** - * Return the raftNode's commitIndex. - * - * @return commitIndex - */ - public long getCommitLogIndex() { - return commitIndex; - } - - /** - * Return the first entry's index which have not been compacted. - * - * @return firstIndex - */ - public long getFirstIndex() { - return getCommittedEntryManager().getFirstIndex(); - } - - /** - * Return the last entry's index which have been added into log module. - * - * @return lastIndex - */ - public long getLastLogIndex() { - long last = getUnCommittedEntryManager().maybeLastIndex(); - if (last != -1) { - return last; - } - return getCommittedEntryManager().getLastIndex(); - } - - /** - * Returns the term for given index. - * - * @param index request entry index - * @return throw EntryCompactedException if index < dummyIndex, -1 if index > lastIndex or the - * entry is compacted, otherwise return the entry's term for given index - * @throws EntryCompactedException - */ - public long getTerm(long index) throws EntryCompactedException { - long dummyIndex = getFirstIndex() - 1; - if (index < dummyIndex) { - // search in disk - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) { - List logsInDisk = getStableEntryManager().getLogs(index, index); - if (logsInDisk.isEmpty()) { - return -1; - } else { - return logsInDisk.get(0).getCurrLogTerm(); - } - } - return -1; - } - - long lastIndex = getLastLogIndex(); - if (index > lastIndex) { - return -1; - } - - if (index >= getUnCommittedEntryManager().getFirstUnCommittedIndex()) { - long term = getUnCommittedEntryManager().maybeTerm(index); - if (term != -1) { - return term; - } - } - - // search in memory - return getCommittedEntryManager().maybeTerm(index); - } - - /** - * Return the last entry's term. If it goes wrong, there must be an unexpected exception. - * - * @return last entry's term - */ - public long getLastLogTerm() { - long term = -1; - try { - term = getTerm(getLastLogIndex()); - } catch (Exception e) { - logger.error("{}: unexpected error when getting the last term : {}", name, e.getMessage()); - } - return term; - } - - /** - * Return the commitIndex's term. If it goes wrong, there must be an unexpected exception. - * - * @return commitIndex's term - */ - public long getCommitLogTerm() { - long term = -1; - try { - term = getTerm(getCommitLogIndex()); - } catch (Exception e) { - logger.error("{}: unexpected error when getting the last term : {}", name, e.getMessage()); - } - return term; - } - - /** - * Used by follower node to support leader's complicated log replication rpc parameters and try to - * commit entries. - * - * @param lastIndex leader's matchIndex for this follower node - * @param lastTerm the entry's term which index is leader's matchIndex for this follower node - * @param leaderCommit leader's commitIndex - * @param entries entries sent from the leader node Note that the leader must ensure - * entries[0].index = lastIndex + 1 - * @return -1 if the entries cannot be appended, otherwise the last index of new entries - */ - public long maybeAppend(long lastIndex, long lastTerm, long leaderCommit, List entries) { - if (matchTerm(lastTerm, lastIndex)) { - long newLastIndex = lastIndex + entries.size(); - long ci = findConflict(entries); - if (ci <= commitIndex) { - if (ci != -1) { - logger.error( - "{}: entry {} conflict with committed entry [commitIndex({})]", - name, - ci, - commitIndex); - } else { - if (logger.isDebugEnabled() && !entries.isEmpty()) { - logger.debug( - "{}: Appending entries [{} and other {} logs] all exist locally", - name, - entries.get(0), - entries.size() - 1); - } - } - - } else { - long offset = lastIndex + 1; - append(entries.subList((int) (ci - offset), entries.size())); - } - try { - commitTo(Math.min(leaderCommit, newLastIndex)); - } catch (LogExecutionException e) { - // exceptions are ignored on follower side - } - return newLastIndex; - } - return -1; - } - - /** - * Used by follower node to support leader's complicated log replication rpc parameters and try to - * commit entry. - * - * @param lastIndex leader's matchIndex for this follower node - * @param lastTerm the entry's term which index is leader's matchIndex for this follower node - * @param leaderCommit leader's commitIndex - * @param entry entry sent from the leader node - * @return -1 if the entries cannot be appended, otherwise the last index of new entries - */ - public long maybeAppend(long lastIndex, long lastTerm, long leaderCommit, Log entry) { - if (matchTerm(lastTerm, lastIndex)) { - long newLastIndex = lastIndex + 1; - if (entry.getCurrLogIndex() <= commitIndex) { - logger.debug( - "{}: entry {} conflict with committed entry [commitIndex({})]", - name, - entry.getCurrLogIndex(), - commitIndex); - } else { - append(entry); - } - try { - commitTo(Math.min(leaderCommit, newLastIndex)); - } catch (LogExecutionException e) { - // exceptions are ignored on follower side - } - return newLastIndex; - } - return -1; - } - - /** - * Used by leader node or MaybeAppend to directly append to unCommittedEntryManager. Note that the - * caller should ensure entries[0].index > committed. - * - * @param entries appendingEntries - * @return the newly generated lastIndex - */ - public long append(List entries) { - if (entries.isEmpty()) { - return getLastLogIndex(); - } - long after = entries.get(0).getCurrLogIndex(); - if (after <= commitIndex) { - logger.error("{}: after({}) is out of range [commitIndex({})]", name, after, commitIndex); - return -1; - } - getUnCommittedEntryManager().truncateAndAppend(entries); - Object logUpdateCondition = - getLogUpdateCondition(entries.get(entries.size() - 1).getCurrLogIndex()); - synchronized (logUpdateCondition) { - logUpdateCondition.notifyAll(); - } - return getLastLogIndex(); - } - - /** - * Used by leader node to directly append to unCommittedEntryManager. Note that the caller should - * ensure entry.index > committed. - * - * @param entry appendingEntry - * @return the newly generated lastIndex - */ - public long append(Log entry) { - long after = entry.getCurrLogIndex(); - if (after <= commitIndex) { - logger.error("{}: after({}) is out of range [commitIndex({})]", name, after, commitIndex); - return -1; - } - getUnCommittedEntryManager().truncateAndAppend(entry); - Object logUpdateCondition = getLogUpdateCondition(entry.getCurrLogIndex()); - synchronized (logUpdateCondition) { - logUpdateCondition.notifyAll(); - } - return getLastLogIndex(); - } - - /** - * Used by leader node to try to commit entries. - * - * @param leaderCommit leader's commitIndex - * @param term the entry's term which index is leaderCommit in leader's log module - * @return true or false - */ - public synchronized boolean maybeCommit(long leaderCommit, long term) { - if (leaderCommit > commitIndex && matchTerm(term, leaderCommit)) { - try { - commitTo(leaderCommit); - } catch (LogExecutionException e) { - // exceptions are ignored on follower side - } - return true; - } - return false; - } - - /** - * Overwrites the contents of this object with those of the given snapshot. - * - * @param snapshot leader's snapshot - */ - public void applySnapshot(Snapshot snapshot) { - logger.info( - "{}: log module starts to restore snapshot [index: {}, term: {}]", - name, - snapshot.getLastLogIndex(), - snapshot.getLastLogTerm()); - try { - getCommittedEntryManager().compactEntries(snapshot.getLastLogIndex()); - getStableEntryManager().removeCompactedEntries(snapshot.getLastLogIndex()); - } catch (EntryUnavailableException e) { - getCommittedEntryManager().applyingSnapshot(snapshot); - getUnCommittedEntryManager().applyingSnapshot(snapshot); - } - if (this.commitIndex < snapshot.getLastLogIndex()) { - this.commitIndex = snapshot.getLastLogIndex(); - } - - // as the follower receives a snapshot, the logs persisted is not complete, so remove them - getStableEntryManager().clearAllLogs(commitIndex); - - synchronized (changeApplyCommitIndexCond) { - if (this.maxHaveAppliedCommitIndex < snapshot.getLastLogIndex()) { - this.maxHaveAppliedCommitIndex = snapshot.getLastLogIndex(); - } - } - } - - /** - * Determines if the given (lastTerm, lastIndex) log is more up-to-date by comparing the index and - * term of the last entries in the existing logs. If the logs have last entries with different - * terms, then the log with the later term is more up-to-date. If the logs end with the same term, - * then whichever log has the larger lastIndex is more up-to-date. If the logs are the same, the - * given log is up-to-date. - * - * @param lastTerm candidate's lastTerm - * @param lastIndex candidate's lastIndex - * @return true or false - */ - public boolean isLogUpToDate(long lastTerm, long lastIndex) { - return lastTerm > getLastLogTerm() - || (lastTerm == getLastLogTerm() && lastIndex >= getLastLogIndex()); - } - - /** - * Pack entries from low through high - 1, just like slice (entries[low:high]). firstIndex <= low - * <= high <= lastIndex. - * - * @param low request index low bound - * @param high request index upper bound - */ - public List getEntries(long low, long high) { - if (low >= high) { - return Collections.emptyList(); - } - List entries = new ArrayList<>(); - long offset = getUnCommittedEntryManager().getFirstUnCommittedIndex(); - if (low < offset) { - entries.addAll(getCommittedEntryManager().getEntries(low, Math.min(high, offset))); - } - if (high > offset) { - entries.addAll(getUnCommittedEntryManager().getEntries(Math.max(low, offset), high)); - } - return entries; - } - - /** - * Used by MaybeCommit or MaybeAppend or follower to commit newly committed entries. - * - * @param newCommitIndex request commitIndex - */ - public void commitTo(long newCommitIndex) throws LogExecutionException { - if (commitIndex >= newCommitIndex) { - return; - } - long startTime = Statistic.RAFT_SENDER_COMMIT_GET_LOGS.getOperationStartTime(); - long lo = getUnCommittedEntryManager().getFirstUnCommittedIndex(); - long hi = newCommitIndex + 1; - List entries = new ArrayList<>(getUnCommittedEntryManager().getEntries(lo, hi)); - Statistic.RAFT_SENDER_COMMIT_GET_LOGS.calOperationCostTimeFromStart(startTime); - - if (entries.isEmpty()) { - return; - } - - long commitLogIndex = getCommitLogIndex(); - long firstLogIndex = entries.get(0).getCurrLogIndex(); - if (commitLogIndex >= firstLogIndex) { - logger.warn( - "Committing logs that has already been committed: {} >= {}", - commitLogIndex, - firstLogIndex); - entries - .subList(0, (int) (getCommitLogIndex() - entries.get(0).getCurrLogIndex() + 1)) - .clear(); - } - - boolean needToCompactLog = false; - int numToReserveForNew = minNumOfLogsInMem; - if (committedEntryManager.getTotalSize() + entries.size() > maxNumOfLogsInMem) { - needToCompactLog = true; - numToReserveForNew = maxNumOfLogsInMem - entries.size(); - } - - long newEntryMemSize = 0; - for (Log entry : entries) { - if (entry.getByteSize() == 0) { - logger.debug( - "{} should not go here, must be send to the follower, " - + "so the log has been serialized exclude single node mode", - entry); - entry.setByteSize((int) RamUsageEstimator.sizeOf(entry)); - } - newEntryMemSize += entry.getByteSize(); - } - int sizeToReserveForNew = minNumOfLogsInMem; - if (newEntryMemSize + committedEntryManager.getEntryTotalMemSize() > maxLogMemSize) { - needToCompactLog = true; - sizeToReserveForNew = - committedEntryManager.maxLogNumShouldReserve(maxLogMemSize - newEntryMemSize); - } - - if (needToCompactLog) { - int numForNew = Math.min(numToReserveForNew, sizeToReserveForNew); - int sizeToReserveForConfig = minNumOfLogsInMem; - startTime = Statistic.RAFT_SENDER_COMMIT_DELETE_EXCEEDING_LOGS.getOperationStartTime(); - synchronized (this) { - innerDeleteLog(Math.min(sizeToReserveForConfig, numForNew)); - } - Statistic.RAFT_SENDER_COMMIT_DELETE_EXCEEDING_LOGS.calOperationCostTimeFromStart(startTime); - } - - startTime = Statistic.RAFT_SENDER_COMMIT_APPEND_AND_STABLE_LOGS.getOperationStartTime(); - try { - // Operations here are so simple that the execution could be thought - // success or fail together approximately. - // TODO: make it real atomic - getCommittedEntryManager().append(entries); - Log lastLog = entries.get(entries.size() - 1); - getUnCommittedEntryManager().stableTo(lastLog.getCurrLogIndex()); - commitIndex = lastLog.getCurrLogIndex(); - - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) { - // Cluster could continue provide service when exception is thrown here - getStableEntryManager().append(entries, maxHaveAppliedCommitIndex); - } - } catch (TruncateCommittedEntryException e) { - // fatal error, node won't recover from the error anymore - // TODO: let node quit the raft group once encounter the error - logger.error("{}: Unexpected error:", name, e); - } catch (IOException e) { - // The exception will block the raft service continue accept log. - // TODO: Notify user that the persisted logs before these entries(include) are corrupted. - // TODO: An idea is that we can degrade the service by disable raft log persistent for - // TODO: the group. It needs fine-grained control for the config of Raft log persistence. - logger.error("{}: persistent raft log error:", name, e); - throw new LogExecutionException(e); - } finally { - Statistic.RAFT_SENDER_COMMIT_APPEND_AND_STABLE_LOGS.calOperationCostTimeFromStart(startTime); - } - - startTime = Statistic.RAFT_SENDER_COMMIT_APPLY_LOGS.getOperationStartTime(); - applyEntries(entries); - Statistic.RAFT_SENDER_COMMIT_APPLY_LOGS.calOperationCostTimeFromStart(startTime); - - long unappliedLogSize = commitLogIndex - maxHaveAppliedCommitIndex; - if (unappliedLogSize > ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()) { - logger.debug( - "There are too many unapplied logs [{}], wait for a while to avoid memory overflow", - unappliedLogSize); - try { - Thread.sleep( - unappliedLogSize - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - - /** - * Returns whether the index and term passed in match. - * - * @param term request entry term - * @param index request entry index - * @return true or false - */ - public boolean matchTerm(long term, long index) { - long t; - try { - t = getTerm(index); - } catch (Exception e) { - return false; - } - return t == term; - } - - /** - * Used by commitTo to apply newly committed entries - * - * @param entries applying entries - */ - void applyEntries(List entries) { - for (Log entry : entries) { - applyEntry(entry); - } - } - - public void applyEntry(Log entry) { - // For add/remove logs in data groups, this log will be applied immediately when it is - // appended to the raft log. - // In this case, it will apply a log that has been applied. - if (entry.isApplied()) { - return; - } - if (blockAppliedCommitIndex > 0 && entry.getCurrLogIndex() > blockAppliedCommitIndex) { - blockedUnappliedLogList.add(entry); - return; - } - try { - logApplier.apply(entry); - } catch (Exception e) { - entry.setException(e); - entry.setApplied(true); - } - } - - /** - * Check whether the parameters passed in satisfy the following properties. firstIndex <= low <= - * high. - * - * @param low request index low bound - * @param high request index upper bound - * @throws EntryCompactedException - * @throws GetEntriesWrongParametersException - */ - void checkBound(long low, long high) - throws EntryCompactedException, GetEntriesWrongParametersException { - if (low > high) { - logger.error("{}: invalid getEntries: parameter: {} > {}", name, low, high); - throw new GetEntriesWrongParametersException(low, high); - } - long first = getFirstIndex(); - if (low < first) { - logger.error( - "{}: CheckBound out of index: parameter: {} , lower bound: {} ", name, low, high); - throw new EntryCompactedException(low, first); - } - } - - /** - * findConflict finds the index of the conflict. It returns the first pair of conflicting entries - * between the existing entries and the given entries, if there are any. If there is no - * conflicting entries, and the existing entries contains all the given entries, -1 will be - * returned. If there is no conflicting entries, but the given entries contains new entries, the - * index of the first new entry will be returned. An entry is considered to be conflicting if it - * has the same index but a different term. The index of the given entries MUST be continuously - * increasing. - * - * @param entries request entries - * @return -1 or conflictIndex - */ - long findConflict(List entries) { - for (Log entry : entries) { - if (!matchTerm(entry.getCurrLogTerm(), entry.getCurrLogIndex())) { - if (entry.getCurrLogIndex() <= getLastLogIndex()) { - logger.info("found conflict at index {}", entry.getCurrLogIndex()); - } - return entry.getCurrLogIndex(); - } - } - return -1; - } - - @TestOnly - protected RaftLogManager( - CommittedEntryManager committedEntryManager, - StableEntryManager stableEntryManager, - LogApplier applier) { - this.setCommittedEntryManager(committedEntryManager); - this.setStableEntryManager(stableEntryManager); - this.logApplier = applier; - long first = committedEntryManager.getFirstIndex(); - long last = committedEntryManager.getLastIndex(); - this.setUnCommittedEntryManager(new UnCommittedEntryManager(last + 1)); - this.commitIndex = last; - this.maxHaveAppliedCommitIndex = first; - this.blockAppliedCommitIndex = -1; - this.blockedUnappliedLogList = new CopyOnWriteArrayList<>(); - this.checkLogApplierExecutorService = - IoTDBThreadPoolFactory.newSingleThreadExecutorWithDaemon("check-log-applier-" + name); - this.checkLogApplierFuture = checkLogApplierExecutorService.submit(this::checkAppliedLogIndex); - for (int i = 0; i < logUpdateConditions.length; i++) { - logUpdateConditions[i] = new Object(); - } - } - - @TestOnly - void setMinNumOfLogsInMem(int minNumOfLogsInMem) { - this.minNumOfLogsInMem = minNumOfLogsInMem; - } - - @TestOnly - public void setMaxHaveAppliedCommitIndex(long maxHaveAppliedCommitIndex) { - this.checkLogApplierExecutorService.shutdownNow(); - try { - this.checkLogApplierExecutorService.awaitTermination(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - this.maxHaveAppliedCommitIndex = maxHaveAppliedCommitIndex; - } - - public void close() { - getStableEntryManager().close(); - if (deleteLogExecutorService != null) { - deleteLogExecutorService.shutdownNow(); - if (deleteLogFuture != null) { - deleteLogFuture.cancel(true); - } - - try { - deleteLogExecutorService.awaitTermination(20, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Close delete log thread interrupted"); - } - deleteLogExecutorService = null; - } - - if (checkLogApplierExecutorService != null) { - checkLogApplierExecutorService.shutdownNow(); - checkLogApplierFuture.cancel(true); - try { - checkLogApplierExecutorService.awaitTermination(20, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Close check log applier thread interrupted"); - } - checkLogApplierExecutorService = null; - } - - if (logApplier != null) { - logApplier.close(); - } - } - - UnCommittedEntryManager getUnCommittedEntryManager() { - return unCommittedEntryManager; - } - - private void setUnCommittedEntryManager(UnCommittedEntryManager unCommittedEntryManager) { - this.unCommittedEntryManager = unCommittedEntryManager; - } - - CommittedEntryManager getCommittedEntryManager() { - return committedEntryManager; - } - - private void setCommittedEntryManager(CommittedEntryManager committedEntryManager) { - this.committedEntryManager = committedEntryManager; - } - - public StableEntryManager getStableEntryManager() { - return stableEntryManager; - } - - private void setStableEntryManager(StableEntryManager stableEntryManager) { - this.stableEntryManager = stableEntryManager; - } - - public long getMaxHaveAppliedCommitIndex() { - return maxHaveAppliedCommitIndex; - } - - /** check whether delete the committed log */ - void checkDeleteLog() { - try { - synchronized (this) { - if (committedEntryManager.getTotalSize() <= minNumOfLogsInMem) { - return; - } - innerDeleteLog(minNumOfLogsInMem); - } - } catch (Exception e) { - logger.error("{}, error occurred when checking delete log", name, e); - } - } - - private void innerDeleteLog(int sizeToReserve) { - long removeSize = (long) committedEntryManager.getTotalSize() - sizeToReserve; - if (removeSize <= 0) { - return; - } - - long compactIndex = - Math.min(committedEntryManager.getDummyIndex() + removeSize, maxHaveAppliedCommitIndex - 1); - try { - logger.debug( - "{}: Before compaction index {}-{}, compactIndex {}, removeSize {}, committedLogSize " - + "{}, maxAppliedLog {}", - name, - getFirstIndex(), - getLastLogIndex(), - compactIndex, - removeSize, - committedEntryManager.getTotalSize(), - maxHaveAppliedCommitIndex); - getCommittedEntryManager().compactEntries(compactIndex); - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence()) { - getStableEntryManager().removeCompactedEntries(compactIndex); - } - logger.debug( - "{}: After compaction index {}-{}, committedLogSize {}", - name, - getFirstIndex(), - getLastLogIndex(), - committedEntryManager.getTotalSize()); - } catch (EntryUnavailableException e) { - logger.error("{}: regular compact log entries failed, error={}", name, e.getMessage()); - } - } - - public Object getLogUpdateCondition(long logIndex) { - return logUpdateConditions[(int) (logIndex % logUpdateConditions.length)]; - } - - void applyAllCommittedLogWhenStartUp() { - long lo = maxHaveAppliedCommitIndex; - long hi = getCommittedEntryManager().getLastIndex() + 1; - if (lo >= hi) { - logger.info( - "{}: the maxHaveAppliedCommitIndex={}, lastIndex={}, no need to reapply", - name, - maxHaveAppliedCommitIndex, - hi); - return; - } - - List entries = new ArrayList<>(getCommittedEntryManager().getEntries(lo, hi)); - applyEntries(entries); - } - - public void checkAppliedLogIndex() { - while (!Thread.currentThread().isInterrupted()) { - try { - doCheckAppliedLogIndex(); - } catch (Exception e) { - logger.error("{}, an exception occurred when checking the applied log index", name, e); - } - } - logger.info( - "{}, the check-log-applier thread {} is interrupted", - name, - Thread.currentThread().getName()); - } - - void doCheckAppliedLogIndex() { - long nextToCheckIndex = maxHaveAppliedCommitIndex + 1; - try { - if (nextToCheckIndex > commitIndex - || nextToCheckIndex > getCommittedEntryManager().getLastIndex() - || (blockAppliedCommitIndex > 0 && blockAppliedCommitIndex < nextToCheckIndex)) { - // avoid spinning - Thread.sleep(5); - return; - } - Log log = getCommittedEntryManager().getEntry(nextToCheckIndex); - if (log == null || log.getCurrLogIndex() != nextToCheckIndex) { - logger.warn( - "{}, get log error when checking the applied log index, log={}, nextToCheckIndex={}", - name, - log, - nextToCheckIndex); - return; - } - synchronized (log) { - while (!log.isApplied() && maxHaveAppliedCommitIndex < log.getCurrLogIndex()) { - // wait until the log is applied or a newer snapshot is installed - log.wait(5); - } - } - synchronized (changeApplyCommitIndexCond) { - // maxHaveAppliedCommitIndex may change if a snapshot is applied concurrently - maxHaveAppliedCommitIndex = Math.max(maxHaveAppliedCommitIndex, nextToCheckIndex); - } - logger.debug( - "{}: log={} is applied, nextToCheckIndex={}, commitIndex={}, maxHaveAppliedCommitIndex={}", - name, - log, - nextToCheckIndex, - commitIndex, - maxHaveAppliedCommitIndex); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.info("{}: do check applied log index is interrupt", name); - } catch (EntryCompactedException e) { - synchronized (changeApplyCommitIndexCond) { - // maxHaveAppliedCommitIndex may change if a snapshot is applied concurrently - maxHaveAppliedCommitIndex = Math.max(maxHaveAppliedCommitIndex, nextToCheckIndex); - } - logger.debug( - "{}: compacted log is assumed applied, nextToCheckIndex={}, commitIndex={}, " - + "maxHaveAppliedCommitIndex={}", - name, - nextToCheckIndex, - commitIndex, - maxHaveAppliedCommitIndex); - } - } - - /** - * Clear the fence that blocks the application of new logs, and continue to apply the cached - * unapplied logs. - */ - public void resetBlockAppliedCommitIndex() { - this.blockAppliedCommitIndex = -1; - this.reapplyBlockedLogs(); - } - - /** - * Set a fence to prevent newer logs, which have larger indexes than `blockAppliedCommitIndex`, - * from being applied, so the underlying state machine may remain unchanged for a while for - * snapshots. New committed logs will be cached until `resetBlockAppliedCommitIndex()` is called. - * - * @param blockAppliedCommitIndex - */ - public void setBlockAppliedCommitIndex(long blockAppliedCommitIndex) { - this.blockAppliedCommitIndex = blockAppliedCommitIndex; - } - - /** Apply the committed logs that were previously blocked by `blockAppliedCommitIndex` if any. */ - private void reapplyBlockedLogs() { - if (!blockedUnappliedLogList.isEmpty()) { - applyEntries(blockedUnappliedLogList); - logger.info("{}: reapply {} number of logs", name, blockedUnappliedLogList.size()); - } - blockedUnappliedLogList.clear(); - } - - public String getName() { - return name; - } - - public long getBlockAppliedCommitIndex() { - return blockAppliedCommitIndex; - } - - public RaftLogManager(LogApplier logApplier) { - this.logApplier = logApplier; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java deleted file mode 100644 index 826e0762a6e7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManager.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.exception.EntryUnavailableException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.commons.utils.TestOnly; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class UnCommittedEntryManager { - - private static final Logger logger = LoggerFactory.getLogger(UnCommittedEntryManager.class); - // all entries that have not been committed. - private List entries; - // the first uncommitted entry index. - private long offset; - - UnCommittedEntryManager(long offset) { - this.offset = offset; - this.entries = Collections.synchronizedList(new ArrayList<>()); - } - - /** - * Return the first uncommitted index. - * - * @return offset - */ - long getFirstUnCommittedIndex() { - return offset; - } - - /** - * Return last entry's index if this instance has at least one uncommitted entry. - * - * @return -1 if entries are empty, or last entry's index - */ - long maybeLastIndex() { - int entryNum = entries.size(); - while (entryNum != 0) { - try { - return entries.get(entryNum - 1).getCurrLogIndex(); - } catch (IndexOutOfBoundsException e) { - // the exception is thrown when there is a concurrent deletion, which is rare, so a retry - // is usually enough and it is not likely that we will get stuck here - entryNum = entries.size(); - } - } - return -1; - } - - /** - * Return the entry's term for given index. Note that the called should ensure index >= offset. - * - * @param index request entry index - * @return -1 if index < offset, or index > last or entries is empty, or return the entry's term - * for given index - * @throws EntryUnavailableException - */ - @SuppressWarnings("java:S1135") // ignore todos - long maybeTerm(long index) { - while (true) { - int entryPos = (int) (index - offset); - if (entryPos < 0) { - logger.debug( - "invalid unCommittedEntryManager maybeTerm : parameter: index({}) < offset({})", - index, - index - entryPos); - return -1; - } - long last = maybeLastIndex(); - if (last == -1 || index > last) { - return -1; - } - - Log log; - // TODO-Cluster: improve concurrent safety - // the following condition only holds when there are concurrent log deletions, which are - // rare, so we are confident that we will not be stuck here forever - try { - log = entries.get(entryPos); - if (log.getCurrLogIndex() == index) { - return log.getCurrLogTerm(); - } - } catch (IndexOutOfBoundsException e) { - // continue - } - } - } - - /** - * Remove useless prefix entries as long as these entries has been committed and persisted. This - * method is called after persisting newly committed entries or applying a snapshot. - * - * @param index request entry's index - */ - void stableTo(long index) { - if (index < offset + entries.size() && index >= offset) { - entries.subList(0, (int) (index + 1 - offset)).clear(); - offset = index + 1; - } - } - - /** - * Update offset and clear entries because leader's snapshot is more up-to-date. This method is - * only called for applying snapshot from leader. - * - * @param snapshot leader's snapshot - */ - void applyingSnapshot(Snapshot snapshot) { - this.offset = snapshot.getLastLogIndex() + 1; - this.entries.clear(); - } - - /** - * TruncateAndAppend uncommitted entries. This method will truncate conflict entries if it finds - * inconsistencies. Note that the caller should ensure appendingEntries[0].index <= - * entries[entries.size()-1].index + 1. Note that the caller should ensure not to truncate entries - * which have been committed. - * - * @param appendingEntries request entries - */ - void truncateAndAppend(List appendingEntries) { - if (appendingEntries.isEmpty()) { - return; - } - Log firstAppendingEntry = appendingEntries.get(0); - Log lastAppendingEntry = appendingEntries.get(appendingEntries.size() - 1); - if (maybeTerm(firstAppendingEntry.getCurrLogIndex()) == firstAppendingEntry.getCurrLogTerm() - && maybeTerm(lastAppendingEntry.getCurrLogIndex()) == lastAppendingEntry.getCurrLogTerm()) { - // skip existing entry - return; - } - - long after = appendingEntries.get(0).getCurrLogIndex(); - long len = after - offset; - if (len < 0) { - // the logs are being truncated to before our current offset portion, which is committed - // entries - logger.error("The logs which first index is {} are going to truncate committed logs", after); - } else if (len == entries.size()) { - // after is the next index in the entries - // directly append - entries.addAll(appendingEntries); - } else { - // clear conflict entries - // then append - logger.info("truncate the entries after index {}", after); - int truncateIndex = (int) (after - offset); - if (truncateIndex < entries.size()) { - entries.subList(truncateIndex, entries.size()).clear(); - } - entries.addAll(appendingEntries); - } - } - - /** - * TruncateAndAppend uncommitted entry. This method will truncate conflict entries if it finds - * inconsistencies. Note that the caller should ensure not to truncate entries which have been - * committed. - * - * @param appendingEntry request entry - */ - void truncateAndAppend(Log appendingEntry) { - if (maybeTerm(appendingEntry.getCurrLogIndex()) == appendingEntry.getCurrLogTerm()) { - // skip existing entry - return; - } - - long after = appendingEntry.getCurrLogIndex(); - long len = after - offset; - if (len < 0) { - // the logs are being truncated to before our current offset portion, which is committed - // entries - logger.error("The logs which first index is {} are going to truncate committed logs", after); - } else if (len == entries.size()) { - // after is the next index in the entries - // directly append - entries.add(appendingEntry); - } else { - // clear conflict entries - // then append - logger.info( - "truncate the entries after index {}, append a new entry {}", after, appendingEntry); - int truncateIndex = (int) (after - offset); - if (truncateIndex < entries.size()) { - entries.subList(truncateIndex, entries.size()).clear(); - } - entries.add(appendingEntry); - } - } - - /** - * Pack entries from low through high - 1, just like slice (entries[low:high]). offset <= low <= - * high. Note that caller must ensure low <= high. - * - * @param low request index low bound - * @param high request index upper bound - */ - public List getEntries(long low, long high) { - if (low > high) { - if (logger.isDebugEnabled()) { - logger.debug( - "invalid unCommittedEntryManager getEntries: parameter: low({}) > high({})", low, high); - } - return Collections.emptyList(); - } - long upper = offset + entries.size(); - if (low > upper) { - // don't throw a exception to support - // getEntries(low, Integer.MAX_VALUE) if low is larger than lastIndex. - logger.info( - "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}] , return empty ArrayList", - low, - high, - offset, - upper); - return Collections.emptyList(); - } - if (low < offset) { - logger.debug( - "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}]", - low, - high, - offset, - upper); - low = offset; - } - if (high > upper) { - logger.info( - "unCommittedEntryManager getEntries[{},{}) out of bound : [{},{}] , adjust parameter 'high' to {}", - low, - high, - offset, - upper, - upper); - // don't throw a exception to support getEntries(low, Integer.MAX_VALUE). - high = upper; - } - return entries.subList((int) (low - offset), (int) (high - offset)); - } - - @TestOnly - UnCommittedEntryManager(long offset, List entries) { - this.offset = offset; - this.entries = entries; - } - - @TestOnly - List getAllEntries() { - return entries; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java deleted file mode 100644 index 409aabfd5817..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/LogManagerMeta.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.log.manage.serializable; - -import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -import java.nio.ByteBuffer; - -public class LogManagerMeta { - - private long commitLogTerm = -1; - private long commitLogIndex = -1; - private long lastLogIndex = -1; - private long lastLogTerm = -1; - private long maxHaveAppliedCommitIndex = -1; - - public static LogManagerMeta deserialize(ByteBuffer buffer) { - LogManagerMeta res = new LogManagerMeta(); - res.commitLogTerm = ReadWriteIOUtils.readLong(buffer); - res.commitLogIndex = ReadWriteIOUtils.readLong(buffer); - res.lastLogIndex = ReadWriteIOUtils.readLong(buffer); - res.lastLogTerm = ReadWriteIOUtils.readLong(buffer); - res.maxHaveAppliedCommitIndex = ReadWriteIOUtils.readLong(buffer); - - return res; - } - - public long getCommitLogIndex() { - return commitLogIndex; - } - - void setCommitLogIndex(long commitLogIndex) { - this.commitLogIndex = commitLogIndex; - } - - public ByteBuffer serialize() { - // 5 is the number of attributes in class LogManagerMeta - ByteBuffer byteBuffer = ByteBuffer.allocate(Long.BYTES * 5); - byteBuffer.putLong(commitLogTerm); - byteBuffer.putLong(commitLogIndex); - byteBuffer.putLong(lastLogIndex); - byteBuffer.putLong(lastLogTerm); - byteBuffer.putLong(maxHaveAppliedCommitIndex); - - byteBuffer.flip(); - return byteBuffer; - } - - @Override - public String toString() { - return "LogManagerMeta{" - + " commitLogTerm=" - + commitLogTerm - + ", commitLogIndex=" - + commitLogIndex - + ", lastLogIndex=" - + lastLogIndex - + ", lastLogTerm=" - + lastLogTerm - + ", maxHaveAppliedCommitIndex=" - + maxHaveAppliedCommitIndex - + "}"; - } - - public long getLastLogIndex() { - return lastLogIndex; - } - - public void setLastLogIndex(long lastLogIndex) { - this.lastLogIndex = lastLogIndex; - } - - public long getLastLogTerm() { - return lastLogTerm; - } - - public void setLastLogTerm(long lastLogTerm) { - this.lastLogTerm = lastLogTerm; - } - - public void setCommitLogTerm(long commitLogTerm) { - this.commitLogTerm = commitLogTerm; - } - - public long getMaxHaveAppliedCommitIndex() { - return maxHaveAppliedCommitIndex; - } - - public void setMaxHaveAppliedCommitIndex(long maxHaveAppliedCommitIndex) { - this.maxHaveAppliedCommitIndex = maxHaveAppliedCommitIndex; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (!(o instanceof LogManagerMeta)) { - return false; - } - - LogManagerMeta that = (LogManagerMeta) o; - - return new EqualsBuilder() - .append(commitLogIndex, that.commitLogIndex) - .append(lastLogIndex, that.lastLogIndex) - .append(lastLogTerm, that.lastLogTerm) - .append(commitLogTerm, that.commitLogTerm) - .append(maxHaveAppliedCommitIndex, that.maxHaveAppliedCommitIndex) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(commitLogIndex) - .append(lastLogIndex) - .append(lastLogTerm) - .append(commitLogTerm) - .append(maxHaveAppliedCommitIndex) - .toHashCode(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java deleted file mode 100644 index beacdf5c237c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializer.java +++ /dev/null @@ -1,1407 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.log.manage.serializable; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.StableEntryManager; -import org.apache.iotdb.commons.file.SystemFileFactory; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.version.SimpleFileVersionController; -import org.apache.iotdb.db.engine.version.VersionController; -import org.apache.iotdb.tsfile.utils.BytesUtils; -import org.apache.iotdb.tsfile.utils.Pair; -import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.concurrent.BasicThreadFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileFilter; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.BufferOverflowException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedByInterruptException; -import java.nio.file.Files; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.apache.iotdb.commons.conf.IoTDBConstant.FILE_NAME_SEPARATOR; - -public class SyncLogDequeSerializer implements StableEntryManager { - - private static final Logger logger = LoggerFactory.getLogger(SyncLogDequeSerializer.class); - private static final String LOG_DATA_FILE_SUFFIX = "data"; - private static final String LOG_INDEX_FILE_SUFFIX = "idx"; - - /** the log data files */ - private List logDataFileList; - - /** the log index files */ - private List logIndexFileList; - - private LogParser parser = LogParser.getINSTANCE(); - private File metaFile; - private FileOutputStream currentLogDataOutputStream; - private FileOutputStream currentLogIndexOutputStream; - private LogManagerMeta meta; - private HardState state; - - /** min version of available log */ - private long minAvailableVersion = 0; - - /** max version of available log */ - private long maxAvailableVersion = Long.MAX_VALUE; - - private String logDir; - - private VersionController versionController; - - private ByteBuffer logDataBuffer = - ByteBuffer.allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize()); - private ByteBuffer logIndexBuffer = - ByteBuffer.allocate(ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize()); - - private long offsetOfTheCurrentLogDataOutputStream = 0; - - private static final int MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK = - ClusterDescriptor.getInstance().getConfig().getMaxNumberOfLogsPerFetchOnDisk(); - - private static final String LOG_META = "logMeta"; - private static final String LOG_META_TMP = "logMeta.tmp"; - - /** - * file name pattern: - * - *

for log data file: ${startLogIndex}-${endLogIndex}-{version}-data - * - *

for log index file: ${startLogIndex}-${endLogIndex}-{version}-idx - */ - private static final int FILE_NAME_PART_LENGTH = 4; - - private int maxRaftLogIndexSizeInMemory = - ClusterDescriptor.getInstance().getConfig().getMaxRaftLogIndexSizeInMemory(); - - private int maxRaftLogPersistDataSizePerFile = - ClusterDescriptor.getInstance().getConfig().getMaxRaftLogPersistDataSizePerFile(); - - private int maxNumberOfPersistRaftLogFiles = - ClusterDescriptor.getInstance().getConfig().getMaxNumberOfPersistRaftLogFiles(); - - private int maxPersistRaftLogNumberOnDisk = - ClusterDescriptor.getInstance().getConfig().getMaxPersistRaftLogNumberOnDisk(); - - private ScheduledExecutorService persistLogDeleteExecutorService; - private ScheduledFuture persistLogDeleteLogFuture; - - /** - * indicate the first raft log's index of {@link SyncLogDequeSerializer#logIndexOffsetList}, for - * example, if firstLogIndex=1000, then the offset of the log index 1000 equals - * logIndexOffsetList[0], the offset of the log index 1001 equals logIndexOffsetList[1], and so - * on. - */ - private long firstLogIndex = 0; - - /** - * the offset of the log's index, for example, the first value is the offset of index - * ${firstLogIndex}, the second value is the offset of index ${firstLogIndex+1} - */ - private List logIndexOffsetList; - - private static final int LOG_DELETE_CHECK_INTERVAL_SECOND = 5; - - /** the lock uses when change the log data files or log index files */ - private final Lock lock = new ReentrantLock(); - - private volatile boolean isClosed = false; - - private void initCommonProperties() { - this.logDataFileList = new ArrayList<>(); - this.logIndexFileList = new ArrayList<>(); - this.logIndexOffsetList = new ArrayList<>(maxRaftLogIndexSizeInMemory); - try { - versionController = new SimpleFileVersionController(logDir); - } catch (IOException e) { - logger.error("log serializer build version controller failed", e); - } - this.persistLogDeleteExecutorService = - new ScheduledThreadPoolExecutor( - 1, - new BasicThreadFactory.Builder() - .namingPattern("persist-log-delete-" + logDir) - .daemon(true) - .build()); - - this.persistLogDeleteLogFuture = - persistLogDeleteExecutorService.scheduleAtFixedRate( - this::checkDeletePersistRaftLog, - LOG_DELETE_CHECK_INTERVAL_SECOND, - LOG_DELETE_CHECK_INTERVAL_SECOND, - TimeUnit.SECONDS); - } - - /** - * for log tools - * - * @param logPath log dir path - */ - public SyncLogDequeSerializer(String logPath) { - logDir = logPath + File.separator; - initCommonProperties(); - initMetaAndLogFiles(); - } - - /** - * log in disk is [size of log1 | log1 buffer] [size of log2 | log2 buffer] - * - *

build serializer with node id - */ - public SyncLogDequeSerializer(int nodeIdentifier) { - logDir = getLogDir(nodeIdentifier); - initCommonProperties(); - initMetaAndLogFiles(); - } - - public static String getLogDir(int nodeIdentifier) { - String systemDir = IoTDBDescriptor.getInstance().getConfig().getSystemDir(); - return systemDir - + File.separator - + "raftLog" - + File.separator - + nodeIdentifier - + File.separator; - } - - @TestOnly - String getLogDir() { - return logDir; - } - - @TestOnly - File getMetaFile() { - return metaFile; - } - - /** for log tools */ - @Override - public LogManagerMeta getMeta() { - return meta; - } - - /** Recover all the logs in disk. This function will be called once this instance is created. */ - @Override - public List getAllEntriesAfterAppliedIndex() { - logger.debug( - "getAllEntriesBeforeAppliedIndex, maxHaveAppliedCommitIndex={}, commitLogIndex={}", - meta.getMaxHaveAppliedCommitIndex(), - meta.getCommitLogIndex()); - if (meta.getMaxHaveAppliedCommitIndex() >= meta.getCommitLogIndex()) { - return Collections.emptyList(); - } - return getLogs(meta.getMaxHaveAppliedCommitIndex(), meta.getCommitLogIndex()); - } - - /** - * When raft log files flushed,meta would not be flushed synchronously.So data has flushed to disk - * is uncommitted for persistent LogManagerMeta(meta's info is stale).We need to recover these - * already persistent logs. - * - *

For example,commitIndex is 5 in persistent LogManagerMeta,But the log file has actually been - * flushed to 7,when we restart cluster,we need to recover 6 and 7. - * - *

Maybe,we can extract getAllEntriesAfterAppliedIndex and getAllEntriesAfterCommittedIndex - * into getAllEntriesByIndex,but now there are too many test cases using it. - */ - @Override - public List getAllEntriesAfterCommittedIndex() { - long lastIndex = firstLogIndex + logIndexOffsetList.size() - 1; - logger.debug( - "getAllEntriesAfterCommittedIndex, firstUnCommitIndex={}, lastIndexBeforeStart={}", - meta.getCommitLogIndex() + 1, - lastIndex); - if (meta.getCommitLogIndex() >= lastIndex) { - return Collections.emptyList(); - } - return getLogs(meta.getCommitLogIndex() + 1, lastIndex); - } - - @Override - public void append(List entries, long maxHaveAppliedCommitIndex) throws IOException { - lock.lock(); - try { - putLogs(entries); - Log entry = entries.get(entries.size() - 1); - meta.setCommitLogIndex(entry.getCurrLogIndex()); - meta.setCommitLogTerm(entry.getCurrLogTerm()); - meta.setLastLogIndex(entry.getCurrLogIndex()); - meta.setLastLogTerm(entry.getCurrLogTerm()); - meta.setMaxHaveAppliedCommitIndex(maxHaveAppliedCommitIndex); - logger.debug( - "maxHaveAppliedCommitIndex={}, commitLogIndex={},lastLogIndex={}", - maxHaveAppliedCommitIndex, - meta.getCommitLogIndex(), - meta.getLastLogIndex()); - } catch (BufferOverflowException e) { - throw new IOException( - "Log cannot fit into buffer, please increase raft_log_buffer_size;" - + "otherwise, please increase the JVM memory", - e); - } finally { - lock.unlock(); - } - } - - /** - * Put each log in entries to local buffer. If the buffer overflows, flush the buffer to the disk, - * and try to push the log again. - * - * @param entries logs to put to buffer - */ - private void putLogs(List entries) { - for (Log log : entries) { - logDataBuffer.mark(); - logIndexBuffer.mark(); - ByteBuffer logData = log.serialize(); - int size = logData.capacity() + Integer.BYTES; - try { - logDataBuffer.putInt(logData.capacity()); - logDataBuffer.put(logData); - logIndexBuffer.putLong(offsetOfTheCurrentLogDataOutputStream); - logIndexOffsetList.add(offsetOfTheCurrentLogDataOutputStream); - offsetOfTheCurrentLogDataOutputStream += size; - } catch (BufferOverflowException e) { - logger.info("Raft log buffer overflow!"); - logDataBuffer.reset(); - logIndexBuffer.reset(); - flushLogBuffer(); - checkCloseCurrentFile(log.getCurrLogIndex() - 1); - logDataBuffer.putInt(logData.capacity()); - logDataBuffer.put(logData); - logIndexBuffer.putLong(offsetOfTheCurrentLogDataOutputStream); - logIndexOffsetList.add(offsetOfTheCurrentLogDataOutputStream); - offsetOfTheCurrentLogDataOutputStream += size; - } - } - } - - private void checkCloseCurrentFile(long commitIndex) { - if (offsetOfTheCurrentLogDataOutputStream > maxRaftLogPersistDataSizePerFile) { - try { - closeCurrentFile(commitIndex); - serializeMeta(meta); - createNewLogFile(logDir, commitIndex + 1); - } catch (IOException e) { - logger.error("check close current file failed", e); - } - } - } - - private void closeCurrentFile(long commitIndex) throws IOException { - if (currentLogDataOutputStream != null) { - currentLogDataOutputStream.close(); - logger.info("{}: Closed a log data file {}", this, getCurrentLogDataFile()); - currentLogDataOutputStream = null; - - File currentLogDataFile = getCurrentLogDataFile(); - String newDataFileName = - currentLogDataFile - .getName() - .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex)); - File newCurrentLogDatFile = - SystemFileFactory.INSTANCE.getFile( - currentLogDataFile.getParent() + File.separator + newDataFileName); - if (!currentLogDataFile.renameTo(newCurrentLogDatFile)) { - logger.error( - "rename log data file={} to {} failed", - currentLogDataFile.getAbsoluteFile(), - newCurrentLogDatFile); - } - logDataFileList.set(logDataFileList.size() - 1, newCurrentLogDatFile); - - logger.debug( - "rename data file={} to file={}", - currentLogDataFile.getAbsoluteFile(), - newCurrentLogDatFile.getAbsoluteFile()); - } - - if (currentLogIndexOutputStream != null) { - currentLogIndexOutputStream.close(); - logger.info("{}: Closed a log index file {}", this, getCurrentLogIndexFile()); - currentLogIndexOutputStream = null; - - File currentLogIndexFile = getCurrentLogIndexFile(); - String newIndexFileName = - currentLogIndexFile - .getName() - .replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(commitIndex)); - File newCurrentLogIndexFile = - SystemFileFactory.INSTANCE.getFile( - currentLogIndexFile.getParent() + File.separator + newIndexFileName); - if (!currentLogIndexFile.renameTo(newCurrentLogIndexFile)) { - logger.error("rename log index file={} failed", currentLogIndexFile.getAbsoluteFile()); - } - logger.debug( - "rename index file={} to file={}", - currentLogIndexFile.getAbsoluteFile(), - newCurrentLogIndexFile.getAbsoluteFile()); - - logIndexFileList.set(logIndexFileList.size() - 1, newCurrentLogIndexFile); - } - - offsetOfTheCurrentLogDataOutputStream = 0; - } - - @Override - public void flushLogBuffer() { - if (isClosed || logDataBuffer.position() == 0) { - return; - } - lock.lock(); - try { - // write into disk - try { - checkStream(); - // 1. write to the log data file - ReadWriteIOUtils.writeWithoutSize( - logDataBuffer, 0, logDataBuffer.position(), currentLogDataOutputStream); - ReadWriteIOUtils.writeWithoutSize( - logIndexBuffer, 0, logIndexBuffer.position(), currentLogIndexOutputStream); - if (ClusterDescriptor.getInstance().getConfig().getFlushRaftLogThreshold() == 0) { - currentLogDataOutputStream.getChannel().force(true); - currentLogIndexOutputStream.getChannel().force(true); - } - } catch (IOException e) { - logger.error("Error in logs serialization: ", e); - return; - } - logDataBuffer.clear(); - logIndexBuffer.clear(); - logger.debug("End flushing log buffer."); - } finally { - lock.unlock(); - } - } - - private void forceFlushLogBufferWithoutCloseFile() { - if (isClosed) { - return; - } - lock.lock(); - flushLogBuffer(); - serializeMeta(meta); - try { - if (currentLogDataOutputStream != null) { - currentLogDataOutputStream.getChannel().force(true); - } - if (currentLogIndexOutputStream != null) { - currentLogIndexOutputStream.getChannel().force(true); - } - } catch (ClosedByInterruptException e) { - // ignore - } catch (IOException e) { - logger.error("Error when force flushing logs serialization: ", e); - } finally { - lock.unlock(); - } - } - - /** flush the log buffer and check if the file needs to be closed */ - @Override - public void forceFlushLogBuffer() { - lock.lock(); - try { - forceFlushLogBufferWithoutCloseFile(); - checkCloseCurrentFile(meta.getCommitLogIndex()); - } finally { - lock.unlock(); - } - } - - @Override - public void setHardStateAndFlush(HardState state) { - this.state = state; - serializeMeta(meta); - } - - @Override - public HardState getHardState() { - return state; - } - - @Override - public void removeCompactedEntries(long index) { - // do nothing - } - - private void initMetaAndLogFiles() { - recoverMetaFile(); - recoverMeta(); - this.firstLogIndex = meta.getCommitLogIndex() + 1; - try { - recoverLogFiles(); - // add init log file - if (logDataFileList.isEmpty()) { - createNewLogFile(metaFile.getParentFile().getPath(), meta.getCommitLogIndex() + 1); - } - - } catch (IOException e) { - logger.error("Error in init log file: ", e); - } - } - - /** The file name rules are as follows: ${startLogIndex}-${endLogIndex}-${version}.data */ - private void recoverLogFiles() { - // 1. first we should recover the log index file - recoverLogFiles(LOG_INDEX_FILE_SUFFIX); - - // 2. recover the log data file - recoverLogFiles(LOG_DATA_FILE_SUFFIX); - - // sort by name before recover - logDataFileList.sort(this::comparePersistLogFileName); - logIndexFileList.sort(this::comparePersistLogFileName); - - // 3. recover the last log file in case of abnormal exit - recoverTheLastLogFile(); - } - - private void recoverLogFiles(String logFileType) { - FileFilter logFilter = - pathname -> { - String s = pathname.getName(); - return s.endsWith(logFileType); - }; - - List logFiles = Arrays.asList(metaFile.getParentFile().listFiles(logFilter)); - logger.info("Find log type ={} log files {}", logFileType, logFiles); - - for (File file : logFiles) { - if (checkLogFile(file, logFileType)) { - switch (logFileType) { - case LOG_DATA_FILE_SUFFIX: - logDataFileList.add(file); - break; - case LOG_INDEX_FILE_SUFFIX: - logIndexFileList.add(file); - break; - default: - logger.error("unknown file type={}", logFileType); - } - } - } - } - - /** - * Check that the file is legal or not - * - * @param file file needs to be check - * @param fileType {@link SyncLogDequeSerializer#LOG_DATA_FILE_SUFFIX} or {@link - * SyncLogDequeSerializer#LOG_INDEX_FILE_SUFFIX} - * @return true if the file legal otherwise false - */ - private boolean checkLogFile(File file, String fileType) { - if (file.length() == 0 || !file.getName().endsWith(fileType)) { - try { - if (file.exists() && !file.isDirectory() && file.length() == 0) { - Files.delete(file.toPath()); - } - } catch (IOException e) { - logger.warn("Cannot delete empty log file {}", file, e); - } - return false; - } - - long fileVersion = getFileVersion(file); - // this means system down between save meta and data - if (fileVersion <= minAvailableVersion || fileVersion >= maxAvailableVersion) { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete outdated log file {}", file); - } - return false; - } - - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - // start index should be smaller than end index - if (Long.parseLong(splits[0]) > Long.parseLong(splits[1])) { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete incorrect log file {}", file); - } - return false; - } - return true; - } - - private void recoverTheLastLogFile() { - if (logIndexFileList.isEmpty()) { - logger.info("no log index file to recover"); - return; - } - - File lastIndexFile = logIndexFileList.get(logIndexFileList.size() - 1); - long endIndex = Long.parseLong(lastIndexFile.getName().split(FILE_NAME_SEPARATOR)[1]); - boolean success = true; - if (endIndex != Long.MAX_VALUE) { - logger.info("last log index file={} no need to recover", lastIndexFile.getAbsoluteFile()); - } else { - success = recoverTheLastLogIndexFile(lastIndexFile); - } - - if (!success) { - logger.error( - "recover log index file failed, clear all logs in disk, {}", - lastIndexFile.getAbsoluteFile()); - forceDeleteAllLogFiles(); - clearFirstLogIndex(); - return; - } - - File lastDataFile = logDataFileList.get(logDataFileList.size() - 1); - endIndex = Long.parseLong(lastDataFile.getName().split(FILE_NAME_SEPARATOR)[1]); - if (endIndex != Long.MAX_VALUE) { - logger.info("last log data file={} no need to recover", lastDataFile.getAbsoluteFile()); - return; - } - - success = recoverTheLastLogDataFile(logDataFileList.get(logDataFileList.size() - 1)); - if (!success) { - logger.error( - "recover log data file failed, clear all logs in disk,{}", - lastDataFile.getAbsoluteFile()); - forceDeleteAllLogFiles(); - clearFirstLogIndex(); - } - } - - private boolean recoverTheLastLogDataFile(File file) { - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - long startIndex = Long.parseLong(splits[0]); - Pair> fileStartAndEndIndex = getLogIndexFile(startIndex); - if (fileStartAndEndIndex.right.left == startIndex) { - long endIndex = fileStartAndEndIndex.right.right; - String newDataFileName = - file.getName().replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex)); - File newLogDataFile = - SystemFileFactory.INSTANCE.getFile(file.getParent() + File.separator + newDataFileName); - if (!file.renameTo(newLogDataFile)) { - logger.error("rename log data file={} failed when recover", file.getAbsoluteFile()); - } - logDataFileList.remove(logDataFileList.size() - 1); - logDataFileList.add(newLogDataFile); - return true; - } - return false; - } - - private boolean recoverTheLastLogIndexFile(File file) { - logger.debug("start to recover the last log index file={}", file.getAbsoluteFile()); - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - long startIndex = Long.parseLong(splits[0]); - int longLength = 8; - byte[] bytes = new byte[longLength]; - - int totalCount = 0; - long offset = 0; - try (FileInputStream fileInputStream = new FileInputStream(file); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { - firstLogIndex = startIndex; - while (bufferedInputStream.read(bytes) != -1) { - offset = BytesUtils.bytesToLong(bytes); - logIndexOffsetList.add(offset); - totalCount++; - } - } catch (IOException e) { - logger.error("recover log index file failed,", e); - } - long endIndex = startIndex + totalCount - 1; - logger.debug( - "recover log index file={}, startIndex={}, endIndex={}", - file.getAbsoluteFile(), - startIndex, - endIndex); - - if (endIndex < meta.getCommitLogIndex()) { - logger.error( - "due to the last abnormal exit, part of the raft logs are lost. " - + "The commit index saved by the meta shall prevail, and all logs will be deleted" - + "meta commitLogIndex={}, endIndex={}", - meta.getCommitLogIndex(), - endIndex); - return false; - } - if (endIndex >= startIndex) { - String newIndexFileName = - file.getName().replaceAll(String.valueOf(Long.MAX_VALUE), String.valueOf(endIndex)); - File newLogIndexFile = - SystemFileFactory.INSTANCE.getFile(file.getParent() + File.separator + newIndexFileName); - if (!file.renameTo(newLogIndexFile)) { - logger.error("rename log index file={} failed when recover", file.getAbsoluteFile()); - } - logIndexFileList.set(logIndexFileList.size() - 1, newLogIndexFile); - } else { - logger.error("recover log index file failed,{}", file.getAbsoluteFile()); - return false; - } - return true; - } - - private void clearFirstLogIndex() { - firstLogIndex = meta.getCommitLogIndex() + 1; - logIndexOffsetList.clear(); - } - - private void recoverMetaFile() { - metaFile = SystemFileFactory.INSTANCE.getFile(logDir + LOG_META); - - // build dir - if (!metaFile.getParentFile().exists()) { - metaFile.getParentFile().mkdirs(); - } - - File tempMetaFile = SystemFileFactory.INSTANCE.getFile(logDir + LOG_META_TMP); - // if we have temp file - if (tempMetaFile.exists()) { - recoverMetaFileFromTemp(tempMetaFile); - } else if (!metaFile.exists()) { - createNewMetaFile(); - } - } - - private void recoverMetaFileFromTemp(File tempMetaFile) { - // if temp file is empty, just return - if (tempMetaFile.length() == 0) { - try { - Files.delete(tempMetaFile.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete file {}", tempMetaFile); - } - } - // else use temp file rather than meta file - else { - try { - Files.deleteIfExists(metaFile.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete file {}", metaFile); - } - if (!tempMetaFile.renameTo(metaFile)) { - logger.warn("Failed to rename log meta file"); - } - } - } - - private void createNewMetaFile() { - try { - if (!metaFile.createNewFile()) { - logger.warn("Cannot create log meta file"); - } - } catch (IOException e) { - logger.error("Cannot create new log meta file ", e); - } - } - - private void checkStream() throws FileNotFoundException { - if (currentLogDataOutputStream == null) { - currentLogDataOutputStream = new FileOutputStream(getCurrentLogDataFile(), true); - logger.info("{}: Opened a new log data file: {}", this, getCurrentLogDataFile()); - } - - if (currentLogIndexOutputStream == null) { - currentLogIndexOutputStream = new FileOutputStream(getCurrentLogIndexFile(), true); - logger.info("{}: Opened a new index data file: {}", this, getCurrentLogIndexFile()); - } - } - - /** for unclosed file, the file name is ${startIndex}-${Long.MAX_VALUE}-{version} */ - private void createNewLogFile(String dirName, long startLogIndex) throws IOException { - lock.lock(); - try { - long nextVersion = versionController.nextVersion(); - long endLogIndex = Long.MAX_VALUE; - - String fileNamePrefix = - dirName - + File.separator - + startLogIndex - + FILE_NAME_SEPARATOR - + endLogIndex - + FILE_NAME_SEPARATOR - + nextVersion - + FILE_NAME_SEPARATOR; - File logDataFile = SystemFileFactory.INSTANCE.getFile(fileNamePrefix + LOG_DATA_FILE_SUFFIX); - File logIndexFile = - SystemFileFactory.INSTANCE.getFile(fileNamePrefix + LOG_INDEX_FILE_SUFFIX); - - if (!logDataFile.createNewFile()) { - logger.warn("Cannot create new log data file {}", logDataFile); - } - - if (!logIndexFile.createNewFile()) { - logger.warn("Cannot create new log index file {}", logDataFile); - } - logDataFileList.add(logDataFile); - logIndexFileList.add(logIndexFile); - } finally { - lock.unlock(); - } - } - - private File getCurrentLogDataFile() { - return logDataFileList.get(logDataFileList.size() - 1); - } - - private File getCurrentLogIndexFile() { - return logIndexFileList.get(logIndexFileList.size() - 1); - } - - private void recoverMeta() { - if (meta != null) { - return; - } - - if (metaFile.exists() && metaFile.length() > 0) { - if (logger.isInfoEnabled()) { - SimpleDateFormat format = new SimpleDateFormat(); - logger.info( - "MetaFile {} exists, last modified: {}", - metaFile.getPath(), - format.format(new Date(metaFile.lastModified()))); - } - try (FileInputStream fileInputStream = new FileInputStream(metaFile); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { - minAvailableVersion = ReadWriteIOUtils.readLong(bufferedInputStream); - maxAvailableVersion = ReadWriteIOUtils.readLong(bufferedInputStream); - meta = - LogManagerMeta.deserialize( - ByteBuffer.wrap( - ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream))); - state = - HardState.deserialize( - ByteBuffer.wrap( - ReadWriteIOUtils.readBytesWithSelfDescriptionLength(bufferedInputStream))); - } catch (IOException e) { - logger.error("Cannot recover log meta: ", e); - meta = new LogManagerMeta(); - state = new HardState(); - } - } else { - meta = new LogManagerMeta(); - state = new HardState(); - } - logger.info( - "Recovered log meta: {}, availableVersion: [{},{}], state: {}", - meta, - minAvailableVersion, - maxAvailableVersion, - state); - } - - private void serializeMeta(LogManagerMeta meta) { - File tempMetaFile = SystemFileFactory.INSTANCE.getFile(logDir + LOG_META_TMP); - tempMetaFile.getParentFile().mkdirs(); - logger.trace("Serializing log meta into {}", tempMetaFile.getPath()); - try (FileOutputStream tempMetaFileOutputStream = new FileOutputStream(tempMetaFile)) { - ReadWriteIOUtils.write(minAvailableVersion, tempMetaFileOutputStream); - ReadWriteIOUtils.write(maxAvailableVersion, tempMetaFileOutputStream); - ReadWriteIOUtils.write(meta.serialize(), tempMetaFileOutputStream); - ReadWriteIOUtils.write(state.serialize(), tempMetaFileOutputStream); - - } catch (IOException e) { - logger.error("Error in serializing log meta: ", e); - } - // rename - try { - Files.deleteIfExists(metaFile.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete old log meta file {}", metaFile, e); - } - if (!tempMetaFile.renameTo(metaFile)) { - logger.warn("Cannot rename new log meta file {}", tempMetaFile); - } - - // rebuild meta stream - this.meta = meta; - logger.trace("Serialized log meta into {}", tempMetaFile.getPath()); - } - - @Override - public void close() { - logger.info("{} is closing", this); - lock.lock(); - forceFlushLogBuffer(); - try { - closeCurrentFile(meta.getCommitLogIndex()); - if (persistLogDeleteExecutorService != null) { - persistLogDeleteExecutorService.shutdownNow(); - persistLogDeleteLogFuture.cancel(true); - persistLogDeleteExecutorService.awaitTermination(20, TimeUnit.SECONDS); - persistLogDeleteExecutorService = null; - } - } catch (IOException e) { - logger.error("Error in log serialization: ", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Close persist log delete thread interrupted"); - } finally { - logger.info("{} is closed", this); - isClosed = true; - lock.unlock(); - } - } - - @Override - public void clearAllLogs(long commitIndex) { - lock.lock(); - try { - // 1. delete - forceFlushLogBuffer(); - closeCurrentFile(meta.getCommitLogIndex()); - forceDeleteAllLogFiles(); - deleteMetaFile(); - - logDataFileList.clear(); - logIndexFileList.clear(); - - // 2. init - if (!logIndexOffsetList.isEmpty()) { - this.firstLogIndex = Math.max(commitIndex + 1, firstLogIndex + logIndexOffsetList.size()); - } else { - this.firstLogIndex = commitIndex + 1; - } - this.logIndexOffsetList.clear(); - recoverMetaFile(); - meta = new LogManagerMeta(); - createNewLogFile(logDir, firstLogIndex); - logger.info("{}, clean all logs success, the new firstLogIndex={}", this, firstLogIndex); - } catch (IOException e) { - logger.error("clear all logs failed,", e); - } finally { - lock.unlock(); - } - } - - private void deleteMetaFile() { - lock.lock(); - try { - File tmpMetaFile = SystemFileFactory.INSTANCE.getFile(logDir + LOG_META_TMP); - Files.deleteIfExists(tmpMetaFile.toPath()); - File localMetaFile = SystemFileFactory.INSTANCE.getFile(logDir + LOG_META); - Files.deleteIfExists(localMetaFile.toPath()); - } catch (IOException e) { - logger.error("{}: delete meta log files failed", this, e); - } finally { - lock.unlock(); - } - } - - /** - * get file version from file The file name structure is as follows: - * {startLogIndex}-{endLogIndex}-{version}-data) - * - * @param file file - * @return version from file - */ - private long getFileVersion(File file) { - return Long.parseLong(file.getName().split(FILE_NAME_SEPARATOR)[2]); - } - - public void checkDeletePersistRaftLog() { - // 1. check the log index offset list size - lock.lock(); - try { - if (logIndexOffsetList.size() > maxRaftLogIndexSizeInMemory) { - int compactIndex = logIndexOffsetList.size() - maxRaftLogIndexSizeInMemory; - logIndexOffsetList.subList(0, compactIndex).clear(); - firstLogIndex += compactIndex; - } - } finally { - lock.unlock(); - } - - // 2. check the persist log file number - lock.lock(); - try { - while (logDataFileList.size() > maxNumberOfPersistRaftLogFiles) { - deleteTheFirstLogDataAndIndexFile(); - } - } finally { - lock.unlock(); - } - - // 3. check the persist log index number - lock.lock(); - try { - while (logDataFileList.size() > 1) { - File firstFile = logDataFileList.get(0); - String[] splits = firstFile.getName().split(FILE_NAME_SEPARATOR); - if (meta.getCommitLogIndex() - Long.parseLong(splits[1]) > maxPersistRaftLogNumberOnDisk) { - deleteTheFirstLogDataAndIndexFile(); - } else { - return; - } - } - } finally { - lock.unlock(); - } - } - - private void forceDeleteAllLogDataFiles() { - FileFilter logFilter = - pathname -> { - String s = pathname.getName(); - return s.endsWith(LOG_DATA_FILE_SUFFIX); - }; - List logFiles = Arrays.asList(metaFile.getParentFile().listFiles(logFilter)); - logger.info("get log data files {} when forcing delete all logs", logFiles); - for (File logFile : logFiles) { - try { - FileUtils.forceDelete(logFile); - } catch (IOException e) { - logger.error("forcing delete log data file={} failed", logFile.getAbsoluteFile(), e); - } - } - logDataFileList.clear(); - } - - private void forceDeleteAllLogIndexFiles() { - FileFilter logIndexFilter = - pathname -> { - String s = pathname.getName(); - return s.endsWith(LOG_INDEX_FILE_SUFFIX); - }; - - List logIndexFiles = Arrays.asList(metaFile.getParentFile().listFiles(logIndexFilter)); - logger.info("get log index files {} when forcing delete all logs", logIndexFiles); - for (File logFile : logIndexFiles) { - try { - FileUtils.forceDelete(logFile); - } catch (IOException e) { - logger.error("forcing delete log index file={} failed", logFile.getAbsoluteFile(), e); - } - } - logIndexFileList.clear(); - } - - private void forceDeleteAllLogFiles() { - while (!logDataFileList.isEmpty()) { - boolean success = deleteTheFirstLogDataAndIndexFile(); - if (!success) { - forceDeleteAllLogDataFiles(); - forceDeleteAllLogIndexFiles(); - } - } - } - - @SuppressWarnings("ConstantConditions") - private boolean deleteTheFirstLogDataAndIndexFile() { - if (logDataFileList.isEmpty()) { - return true; - } - - File logDataFile = null; - File logIndexFile = null; - - lock.lock(); - try { - logDataFile = logDataFileList.get(0); - logIndexFile = logIndexFileList.get(0); - if (logDataFile == null || logIndexFile == null) { - logger.error("the log data or index file is null, some error occurred"); - return false; - } - Files.delete(logDataFile.toPath()); - Files.delete(logIndexFile.toPath()); - logDataFileList.remove(0); - logIndexFileList.remove(0); - logger.debug( - "delete date file={}, index file={}", - logDataFile.getAbsoluteFile(), - logIndexFile.getAbsoluteFile()); - } catch (IOException e) { - logger.error( - "delete file failed, data file={}, index file={}", - logDataFile.getAbsoluteFile(), - logIndexFile.getAbsoluteFile()); - return false; - } finally { - lock.unlock(); - } - return true; - } - - /** - * The file name structure is as follows: {startLogIndex}-{endLogIndex}-{version}-data) - * - * @param file1 File to compare - * @param file2 File to compare - */ - private int comparePersistLogFileName(File file1, File file2) { - String[] items1 = file1.getName().split(FILE_NAME_SEPARATOR); - String[] items2 = file2.getName().split(FILE_NAME_SEPARATOR); - if (items1.length != FILE_NAME_PART_LENGTH || items2.length != FILE_NAME_PART_LENGTH) { - logger.error( - "file1={}, file2={} name should be in the following format: startLogIndex-endLogIndex-version-data", - file1.getAbsoluteFile(), - file2.getAbsoluteFile()); - } - long startLogIndex1 = Long.parseLong(items1[0]); - long startLogIndex2 = Long.parseLong(items2[0]); - int res = Long.compare(startLogIndex1, startLogIndex2); - if (res == 0) { - return Long.compare(Long.parseLong(items1[1]), Long.parseLong(items2[1])); - } - return res; - } - - /** - * @param startIndex the log start index - * @param endIndex the log end index - * @return the raft log which index between [startIndex, endIndex] or empty if not found - */ - @Override - public List getLogs(long startIndex, long endIndex) { - if (startIndex > endIndex) { - logger.error( - "startIndex={} should be less than or equal to endIndex={}", startIndex, endIndex); - return Collections.emptyList(); - } - if (startIndex < 0 || endIndex < 0) { - logger.error( - "startIndex={} and endIndex={} should be larger than zero", startIndex, endIndex); - return Collections.emptyList(); - } - - long newEndIndex = endIndex; - if (endIndex - startIndex > MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK) { - newEndIndex = startIndex + MAX_NUMBER_OF_LOGS_PER_FETCH_ON_DISK; - } - logger.debug( - "intend to get logs between[{}, {}], actually get logs between[{},{}]", - startIndex, - endIndex, - startIndex, - newEndIndex); - - // maybe the logs will be deleted during checkDeletePersistRaftLog or clearAllLogs, - // use lock for two reasons: - // 1.if the log file to read is the last log file, we need to get write lock to flush logBuffer, - // 2.prevent these log files from being deleted - lock.lock(); - try { - List>> logDataFileAndOffsetList = - getLogDataFileAndOffset(startIndex, newEndIndex); - if (logDataFileAndOffsetList.isEmpty()) { - return Collections.emptyList(); - } - - List result = new ArrayList<>(); - for (Pair> pair : logDataFileAndOffsetList) { - result.addAll(getLogsFromOneLogDataFile(pair.left, pair.right)); - } - - return result; - } finally { - lock.unlock(); - } - } - - /** - * @param logIndex the log's index - * @return The offset of the data file corresponding to the log index, -1 if not found - */ - public long getOffsetAccordingToLogIndex(long logIndex) { - long offset = -1; - - long maxLogIndex = firstLogIndex + logIndexOffsetList.size(); - if (logIndex >= maxLogIndex) { - logger.error( - "given log index={} exceed the max log index={}, firstLogIndex={}", - logIndex, - maxLogIndex, - firstLogIndex); - return -1; - } - // 1. first find in memory - if (logIndex >= firstLogIndex) { - int arrayIndex = (int) (logIndex - firstLogIndex); - if (arrayIndex < logIndexOffsetList.size()) { - offset = logIndexOffsetList.get(arrayIndex); - logger.debug( - "found the offset in memory, logIndex={}, firstLogIndex={}, logIndexOffsetList size={}, offset={}", - logIndex, - firstLogIndex, - logIndexOffsetList.size(), - offset); - return offset; - } - } - - logger.debug( - "can not found the offset in memory, logIndex={}, firstLogIndex={}, logIndexOffsetList size={}", - logIndex, - firstLogIndex, - logIndexOffsetList.size()); - - // 2. second read the log index file - Pair> fileWithStartAndEndIndex = getLogIndexFile(logIndex); - if (fileWithStartAndEndIndex == null) { - return -1; - } - File file = fileWithStartAndEndIndex.left; - Pair startAndEndIndex = fileWithStartAndEndIndex.right; - logger.debug( - "start to read the log index file={} for log index={}, file size={}", - file.getAbsoluteFile(), - logIndex, - file.length()); - try (FileInputStream fileInputStream = new FileInputStream(file); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { - long bytesNeedToSkip = (logIndex - startAndEndIndex.left) * (Long.BYTES); - long bytesActuallySkip = bufferedInputStream.skip(bytesNeedToSkip); - logger.debug("skip {} bytes when read file={}", bytesActuallySkip, file.getAbsoluteFile()); - if (bytesNeedToSkip != bytesActuallySkip) { - logger.error( - "read file={} failed, should skip={}, actually skip={}", - file.getAbsoluteFile(), - bytesNeedToSkip, - bytesActuallySkip); - return -1; - } - offset = ReadWriteIOUtils.readLong(bufferedInputStream); - return offset; - } catch (IOException e) { - logger.error("can not read the log index file={}", file.getAbsoluteFile(), e); - return -1; - } - } - - /** - * @param startIndex the log start index - * @param endIndex the log end index - * @return first value-> the log data file, second value-> the left value is the start offset of - * the file, the right is the end offset of the file - */ - private List>> getLogDataFileAndOffset( - long startIndex, long endIndex) { - long startIndexInOneFile = startIndex; - long endIndexInOneFile = 0; - List>> fileNameWithStartAndEndOffset = new ArrayList<>(); - // 1. get the start offset with the startIndex - long startOffset = getOffsetAccordingToLogIndex(startIndexInOneFile); - if (startOffset == -1) { - return Collections.emptyList(); - } - Pair> logDataFileWithStartAndEndLogIndex = - getLogDataFile(startIndexInOneFile); - if (logDataFileWithStartAndEndLogIndex == null) { - return Collections.emptyList(); - } - endIndexInOneFile = logDataFileWithStartAndEndLogIndex.right.right; - // 2. judge whether the fileEndLogIndex>=endIndex - while (endIndex > endIndexInOneFile) { - // this means the endIndex's offset can not be found in the file - // logDataFileWithStartAndEndLogIndex.left; and should be find in the next log data file. - // 3. get the file's end offset - long endOffset = getOffsetAccordingToLogIndex(endIndexInOneFile); - fileNameWithStartAndEndOffset.add( - new Pair<>(logDataFileWithStartAndEndLogIndex.left, new Pair<>(startOffset, endOffset))); - - logger.debug( - "get log data offset=[{},{}] according to log index=[{},{}], file={}", - startOffset, - endOffset, - startIndexInOneFile, - endIndexInOneFile, - logDataFileWithStartAndEndLogIndex.left); - // 4. search the next file to get the log index of fileEndLogIndex + 1 - startIndexInOneFile = endIndexInOneFile + 1; - startOffset = getOffsetAccordingToLogIndex(startIndexInOneFile); - if (startOffset == -1) { - return Collections.emptyList(); - } - logDataFileWithStartAndEndLogIndex = getLogDataFile(startIndexInOneFile); - if (logDataFileWithStartAndEndLogIndex == null) { - return Collections.emptyList(); - } - endIndexInOneFile = logDataFileWithStartAndEndLogIndex.right.right; - } - // this means the endIndex's offset can not be found in the file - // logDataFileWithStartAndEndLogIndex.left - long endOffset = getOffsetAccordingToLogIndex(endIndex); - fileNameWithStartAndEndOffset.add( - new Pair<>(logDataFileWithStartAndEndLogIndex.left, new Pair<>(startOffset, endOffset))); - logger.debug( - "get log data offset=[{},{}] according to log index=[{},{}], file={}", - startOffset, - endOffset, - startIndexInOneFile, - endIndex, - logDataFileWithStartAndEndLogIndex.left); - return fileNameWithStartAndEndOffset; - } - - /** - * @param startIndex the start log index - * @return the first value of the pair is the log index file which contains the start index; the - * second pair's first value is the file's start log index. the second pair's second value is - * the file's end log index. null if not found - */ - public Pair> getLogIndexFile(long startIndex) { - for (File file : logIndexFileList) { - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - if (splits.length != FILE_NAME_PART_LENGTH) { - logger.error( - "file={} name should be in the following format: startLogIndex-endLogIndex-version-idx", - file.getAbsoluteFile()); - } - if (Long.parseLong(splits[0]) <= startIndex && startIndex <= Long.parseLong(splits[1])) { - return new Pair<>(file, new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1]))); - } - } - logger.debug("can not found the log index file for startIndex={}", startIndex); - return null; - } - - /** - * @param startIndex the start log index - * @return the first value of the pair is the log data file which contains the start index; the - * second pair's first value is the file's start log index. the second pair's second value is - * the file's end log index. null if not found - */ - public Pair> getLogDataFile(long startIndex) { - for (File file : logDataFileList) { - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - if (splits.length != FILE_NAME_PART_LENGTH) { - logger.error( - "file={} name should be in the following format: startLogIndex-endLogIndex-version-data", - file.getAbsoluteFile()); - } - if (Long.parseLong(splits[0]) <= startIndex && startIndex <= Long.parseLong(splits[1])) { - return new Pair<>(file, new Pair<>(Long.parseLong(splits[0]), Long.parseLong(splits[1]))); - } - } - logger.debug("can not found the log data file for startIndex={}", startIndex); - return null; - } - - /** - * @param file the log data file - * @param startAndEndOffset the left value is the start offset of the file, the right is the end - * offset of the file - * @return the logs between start offset and end offset - */ - private List getLogsFromOneLogDataFile(File file, Pair startAndEndOffset) { - List result = new ArrayList<>(); - if (file.getName().equals(getCurrentLogDataFile().getName())) { - forceFlushLogBufferWithoutCloseFile(); - } - try (FileInputStream fileInputStream = new FileInputStream(file); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { - long bytesSkip = bufferedInputStream.skip(startAndEndOffset.left); - if (bytesSkip != startAndEndOffset.left) { - logger.error( - "read file={} failed when skip {} bytes, actual skip bytes={}", - file.getAbsoluteFile(), - startAndEndOffset.left, - bytesSkip); - return result; - } - - logger.debug( - "start to read file={} and skip {} bytes, startOffset={}, endOffset={}, fileLength={}", - file.getAbsoluteFile(), - bytesSkip, - startAndEndOffset.left, - startAndEndOffset.right, - file.length()); - - long currentReadOffset = bytesSkip; - // because we want to get all the logs whose offset between [startAndEndOffset.left, - // startAndEndOffset.right] - // which means, the last offset's value should be still read, in other words, - // the first log index of the offset starting with startAndEndOffset.right also needs to be - // read. - while (currentReadOffset <= startAndEndOffset.right) { - logger.debug( - "read file={}, currentReadOffset={}, end offset={}", - file.getAbsoluteFile(), - currentReadOffset, - startAndEndOffset.right); - int logSize = ReadWriteIOUtils.readInt(bufferedInputStream); - Log log = null; - log = - parser.parse(ByteBuffer.wrap(ReadWriteIOUtils.readBytes(bufferedInputStream, logSize))); - result.add(log); - currentReadOffset = currentReadOffset + Integer.BYTES + logSize; - } - } catch (UnknownLogTypeException e) { - logger.error("Unknown log detected ", e); - } catch (IOException e) { - logger.error("Cannot read log from file={} ", file.getAbsoluteFile(), e); - } - return result; - } - - @TestOnly - public void setLogDataBuffer(ByteBuffer logDataBuffer) { - this.logDataBuffer = logDataBuffer; - } - - @TestOnly - public void setMaxRaftLogPersistDataSizePerFile(int maxRaftLogPersistDataSizePerFile) { - this.maxRaftLogPersistDataSizePerFile = maxRaftLogPersistDataSizePerFile; - } - - @TestOnly - public void setMaxNumberOfPersistRaftLogFiles(int maxNumberOfPersistRaftLogFiles) { - this.maxNumberOfPersistRaftLogFiles = maxNumberOfPersistRaftLogFiles; - } - - @TestOnly - public List getLogDataFileList() { - return logDataFileList; - } - - @TestOnly - public List getLogIndexFileList() { - return logIndexFileList; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java deleted file mode 100644 index d280bef34652..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshot.java +++ /dev/null @@ -1,641 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.RemoteTsFileResource; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.PullFileException; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.partition.slot.SlotManager; -import org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.modification.ModificationFile; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.LoadFileException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.SchemaUtils; -import org.apache.iotdb.tsfile.utils.FilePathUtils; -import org.apache.iotdb.tsfile.utils.Pair; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedOutputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; - -/** - * FileSnapshot records the data files in a slot and their md5 (or other verification). When the - * snapshot is used to perform a catch-up, the receiver should: - * - *

1. create a remote snapshot indicating that the slot is being pulled from the remote - * - *

2. traverse the file list, for each file: - * - *

2.1 if the file exists locally and the md5 is correct, skip it. - * - *

2.2 otherwise pull the file from the remote. - * - *

3. replace the remote snapshot with a FileSnapshot indicating that the slot of this node is - * synchronized with the remote one. - */ -@SuppressWarnings("java:S1135") // ignore todos -public class FileSnapshot extends Snapshot implements TimeseriesSchemaSnapshot { - - private static final Logger logger = LoggerFactory.getLogger(FileSnapshot.class); - - public static final int PULL_FILE_RETRY_INTERVAL_MS = 5000; - private Collection timeseriesSchemas; - private List dataFiles; - - public FileSnapshot() { - dataFiles = new ArrayList<>(); - timeseriesSchemas = new ArrayList<>(); - } - - public void addFile(TsFileResource resource, Node header) throws IOException { - addFile(resource, header, false); - } - - public void addFile(TsFileResource resource, Node header, boolean isRangeUnique) - throws IOException { - RemoteTsFileResource remoteTsFileResource = new RemoteTsFileResource(resource, header); - remoteTsFileResource.setPlanRangeUnique(isRangeUnique); - dataFiles.add(remoteTsFileResource); - } - - @Override - public ByteBuffer serialize() { - logger.info("Start to serialize a snapshot {}", this); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - - try { - logger.info("Start to serialize {} schemas", timeseriesSchemas.size()); - dataOutputStream.writeInt(timeseriesSchemas.size()); - for (TimeseriesSchema measurementSchema : timeseriesSchemas) { - measurementSchema.serializeTo(dataOutputStream); - } - - logger.info("Start to serialize {} data files", dataFiles.size()); - dataOutputStream.writeInt(dataFiles.size()); - for (RemoteTsFileResource dataFile : dataFiles) { - dataFile.serialize(dataOutputStream); - } - } catch (IOException ignored) { - // unreachable - } - - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - int timeseriesNum = buffer.getInt(); - for (int i = 0; i < timeseriesNum; i++) { - timeseriesSchemas.add(TimeseriesSchema.deserializeFrom(buffer)); - } - int fileNum = buffer.getInt(); - for (int i = 0; i < fileNum; i++) { - RemoteTsFileResource resource = new RemoteTsFileResource(); - resource.deserialize(buffer); - dataFiles.add(resource); - } - } - - public List getDataFiles() { - return dataFiles; - } - - @Override - public Collection getTimeseriesSchemas() { - return timeseriesSchemas; - } - - @Override - public void setTimeseriesSchemas(Collection timeseriesSchemas) { - this.timeseriesSchemas = timeseriesSchemas; - } - - @Override - public SnapshotInstaller getDefaultInstaller(RaftMember member) { - return new Installer((DataGroupMember) member); - } - - @Override - public String toString() { - return String.format( - "FileSnapshot{%d files, %d series, index-term: %d-%d}", - dataFiles.size(), timeseriesSchemas.size(), lastLogIndex, lastLogTerm); - } - - public static class Installer implements SnapshotInstaller { - - /** - * When a DataGroupMember pulls data from another node, the data files will be firstly stored in - * the "REMOTE_FILE_TEMP_DIR", and then load file functionality of IoTDB will be used to load - * the files into the IoTDB instance. - */ - private static final String REMOTE_FILE_TEMP_DIR = - IoTDBDescriptor.getInstance().getConfig().getSystemDir() + File.separator + "remote"; - - private static final Logger logger = LoggerFactory.getLogger(Installer.class); - private DataGroupMember dataGroupMember; - private SlotManager slotManager; - private String name; - - Installer(DataGroupMember dataGroupMember) { - this.dataGroupMember = dataGroupMember; - this.slotManager = dataGroupMember.getSlotManager(); - this.name = dataGroupMember.getName(); - } - - @Override - public void install(FileSnapshot snapshot, int slot, boolean isDataMigration) - throws SnapshotInstallationException { - try { - logger.info("Starting to install a snapshot {} into slot[{}]", snapshot, slot); - installFileSnapshotSchema(snapshot); - logger.info("Schemas in snapshot are registered"); - if (isDataMigration) { - SlotStatus status = slotManager.getStatus(slot); - if (status == SlotStatus.PULLING) { - // as the schemas are set, writes can proceed - slotManager.setToPullingWritable(slot); - logger.debug("{}: slot {} is now pulling writable", name, slot); - } - } - installFileSnapshotFiles(snapshot, slot, isDataMigration); - } catch (PullFileException e) { - throw new SnapshotInstallationException(e); - } - } - - @Override - public void install(Map snapshotMap, boolean isDataMigration) - throws SnapshotInstallationException { - logger.info("Starting to install snapshots {}", snapshotMap); - installSnapshot(snapshotMap, isDataMigration); - } - - private void installSnapshot(Map snapshotMap, boolean isDataMigration) - throws SnapshotInstallationException { - // In data migration, meta group member other than new node does not need to synchronize the - // leader, because data migration must be carried out after meta group applied add/remove node - // log. - dataGroupMember - .getMetaGroupMember() - .syncLocalApply( - dataGroupMember.getMetaGroupMember().getPartitionTable().getLastMetaLogIndex() - 1, - false); - for (Entry integerSnapshotEntry : snapshotMap.entrySet()) { - Integer slot = integerSnapshotEntry.getKey(); - FileSnapshot snapshot = integerSnapshotEntry.getValue(); - installFileSnapshotSchema(snapshot); - if (isDataMigration) { - SlotStatus status = slotManager.getStatus(slot); - if (status == SlotStatus.PULLING) { - // as schemas are set, writes can proceed - slotManager.setToPullingWritable(slot, false); - logger.debug("{}: slot {} is now pulling writable", name, slot); - } - } - } - if (isDataMigration) { - slotManager.save(); - } - - for (Entry integerSnapshotEntry : snapshotMap.entrySet()) { - Integer slot = integerSnapshotEntry.getKey(); - FileSnapshot snapshot = integerSnapshotEntry.getValue(); - try { - installFileSnapshotFiles(snapshot, slot, isDataMigration); - } catch (PullFileException e) { - throw new SnapshotInstallationException(e); - } - } - slotManager.save(); - } - - private void installFileSnapshotSchema(FileSnapshot snapshot) { - // load metadata in the snapshot - for (TimeseriesSchema schema : snapshot.getTimeseriesSchemas()) { - // notice: the measurement in the schema is the full path here - SchemaUtils.registerTimeseries(schema); - } - } - - private void installFileSnapshotFiles(FileSnapshot snapshot, int slot, boolean isDataMigration) - throws PullFileException { - List remoteTsFileResources = snapshot.getDataFiles(); - // pull file - for (int i = 0, remoteTsFileResourcesSize = remoteTsFileResources.size(); - i < remoteTsFileResourcesSize; - i++) { - RemoteTsFileResource resource = remoteTsFileResources.get(i); - logger.info( - "Pulling {}/{} files, current: {}", i + 1, remoteTsFileResources.size(), resource); - try { - if (isDataMigration) { - // This means that the minimum plan index and maximum plan index of some files are the - // same, - // so the logic of judging index coincidence needs to remove the case of equal - resource.setMinPlanIndex(dataGroupMember.getLogManager().getLastLogIndex()); - resource.setMaxPlanIndex(dataGroupMember.getLogManager().getLastLogIndex()); - loadRemoteFile(resource); - } else { - if (!isFileAlreadyPulled(resource)) { - loadRemoteFile(resource); - } else { - // notify the snapshot provider to remove the hardlink - removeRemoteHardLink(resource); - } - } - } catch (IllegalPathException e) { - throw new PullFileException(resource.getTsFilePath(), resource.getSource(), e); - } - } - - // all files are loaded, the slot can be queried without accessing the previous holder - slotManager.setToNull(slot, !isDataMigration); - logger.info("{}: slot {} is ready", name, slot); - } - - /** - * Check if the file "resource" is a duplication of some local files. As all data file close is - * controlled by the data group leader, the files with the same version should contain identical - * data if without merge. Even with merge, the files that the merged file is from are recorded - * so we can still find out if the data of a file is already replicated in this member. - * - * @param resource - * @return - */ - private boolean isFileAlreadyPulled(RemoteTsFileResource resource) throws IllegalPathException { - Pair sgNameAndTimePartitionIdPair = - FilePathUtils.getLogicalSgNameAndTimePartitionIdPair( - resource.getTsFile().getAbsolutePath()); - return StorageEngine.getInstance() - .isFileAlreadyExist( - resource, - new PartialPath(sgNameAndTimePartitionIdPair.left), - sgNameAndTimePartitionIdPair.right); - } - - private void removeRemoteHardLink(RemoteTsFileResource resource) { - Node sourceNode = resource.getSource(); - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = (AsyncDataClient) dataGroupMember.getAsyncClient(sourceNode); - if (client != null) { - try { - client.removeHardLink( - resource.getTsFile().getAbsolutePath(), new GenericHandler<>(sourceNode, null)); - } catch (TException e) { - logger.error( - "Cannot remove hardlink {} from {}", - resource.getTsFile().getAbsolutePath(), - sourceNode); - } - } - } else { - SyncDataClient client = (SyncDataClient) dataGroupMember.getSyncClient(sourceNode); - if (client == null) { - logger.error( - "Cannot remove hardlink {} from {}, due to can not get client", - resource.getTsFile().getAbsolutePath(), - sourceNode); - return; - } - try { - client.removeHardLink(resource.getTsFile().getAbsolutePath()); - } catch (TException te) { - client.close(); - logger.error( - "Cannot remove hardlink {} from {}", - resource.getTsFile().getAbsolutePath(), - sourceNode); - } finally { - client.returnSelf(); - } - } - } - - /** - * Load a remote file from the header of the data group that the file is in. As different IoTDB - * instances will name the file with the same version differently, we can only pull the file - * from the header currently. - * - * @param resource - */ - private void loadRemoteFile(RemoteTsFileResource resource) throws PullFileException { - Node sourceNode = resource.getSource(); - // pull the file to a temporary directory - File tempFile; - try { - tempFile = pullRemoteFile(resource, sourceNode); - } catch (IOException e) { - throw new PullFileException(resource.toString(), sourceNode, e); - } - if (tempFile != null) { - resource.setFile(tempFile); - try { - // save the resource and load the file into IoTDB - resource.serialize(); - loadRemoteResource(resource); - logger.info("{}: Remote file {} is successfully loaded", name, resource); - return; - } catch (IOException e) { - logger.error("{}: Cannot serialize {}", name, resource, e); - } catch (IllegalPathException e) { - logger.error("Illegal path when loading file {}", resource, e); - } - } - logger.error("{}: Cannot load remote file {} from node {}", name, resource, sourceNode); - throw new PullFileException(resource.toString(), sourceNode); - } - - /** - * When a file is successfully pulled to the local storage, load it into IoTDB with the resource - * and remove the files that is a subset of the new file. Also change the modification file if - * the new file is with one. - * - * @param resource - */ - private void loadRemoteResource(RemoteTsFileResource resource) throws IllegalPathException { - // the new file is stored at: - // remote/// - // you can see FilePathUtils.splitTsFilePath() method for details. - PartialPath storageGroupName = - new PartialPath( - FilePathUtils.getLogicalStorageGroupName(resource.getTsFile().getAbsolutePath())); - try { - StorageEngine.getInstance().getProcessor(storageGroupName).loadNewTsFile(resource, true); - if (resource.isPlanRangeUnique()) { - // only when a file has a unique range can we remove other files that over lap with it, - // otherwise we may remove data that is not contained in the file - StorageEngine.getInstance() - .getProcessor(storageGroupName) - .removeFullyOverlapFiles(resource); - } - } catch (StorageEngineException | LoadFileException e) { - logger.error("{}: Cannot load remote file {} into storage group", name, resource, e); - return; - } - resource.setRemote(false); - } - - /** - * Download the remote file of "resource" from "node" to a local temporary directory. If the - * resource has modification file, also download it. - * - * @param resource the TsFile to be downloaded - * @param node where to download the file - * @return the downloaded file or null if the file cannot be downloaded or its MD5 is not right - * @throws IOException - */ - private File pullRemoteFile(RemoteTsFileResource resource, Node node) throws IOException { - logger.info( - "{}: pulling remote file {} from {}, plan index [{}, {}]", - name, - resource, - node, - resource.getMinPlanIndex(), - resource.getMaxPlanIndex()); - // the new file is stored at: - // remote/// - // you can see FilePathUtils.splitTsFilePath() method for details. - String tempFileName = - FilePathUtils.getTsFileNameWithoutHardLink(resource.getTsFile().getAbsolutePath()); - String tempFilePath = - node.getNodeIdentifier() - + File.separator - + FilePathUtils.getTsFilePrefixPath(resource.getTsFile().getAbsolutePath()) - + File.separator - + tempFileName; - File tempFile = new File(REMOTE_FILE_TEMP_DIR, tempFilePath); - tempFile.getParentFile().mkdirs(); - if (pullRemoteFile(resource.getTsFile().getAbsolutePath(), node, tempFile)) { - // TODO-Cluster#353: implement file examination, may be replaced with other algorithm - if (resource.isWithModification()) { - File tempModFile = - new File(REMOTE_FILE_TEMP_DIR, tempFilePath + ModificationFile.FILE_SUFFIX); - pullRemoteFile(resource.getModFile().getFilePath(), node, tempModFile); - } - return tempFile; - } - return null; - } - - /** - * Download the file "remotePath" from "node" and store it to "dest" using up to 64KB chunks. If - * the network is bad, this method will retry upto 5 times before returning a failure. - * - * @param remotePath the file to be downloaded - * @param node where to download the file - * @param dest where to store the file - * @return true if the file is successfully downloaded, false otherwise - * @throws IOException - */ - private boolean pullRemoteFile(String remotePath, Node node, File dest) throws IOException { - int pullFileRetry = 5; - for (int i = 0; i < pullFileRetry; i++) { - try (BufferedOutputStream bufferedOutputStream = - new BufferedOutputStream(new FileOutputStream(dest))) { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - downloadFileAsync(node, remotePath, bufferedOutputStream); - } else { - downloadFileSync(node, remotePath, bufferedOutputStream); - } - - if (logger.isInfoEnabled()) { - logger.info( - "{}: remote file {} is pulled at {}, length: {}", - name, - remotePath, - dest, - dest.length()); - } - return true; - } catch (TException e) { - logger.warn( - "{}: Cannot pull file {} from {}, wait 5s to retry", name, remotePath, node, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("{}: Pulling file {} from {} interrupted", name, remotePath, node, e); - return false; - } - - try { - Files.delete(dest.toPath()); - Thread.sleep(PULL_FILE_RETRY_INTERVAL_MS); - } catch (IOException e) { - logger.warn("Cannot delete file when pulling {} from {} failed", remotePath, node); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - logger.warn("{}: Pulling file {} from {} interrupted", name, remotePath, node, ex); - return false; - } - // next try - } - return false; - } - - private void downloadFileAsync(Node node, String remotePath, OutputStream dest) - throws IOException, TException, InterruptedException { - long offset = 0; - // TODO-Cluster: use elaborate downloading techniques - int fetchSize = 64 * 1024; - - while (true) { - AsyncDataClient client = (AsyncDataClient) dataGroupMember.getAsyncClient(node); - if (client == null) { - throw new IOException("No available client for " + node.toString()); - } - ByteBuffer buffer; - buffer = SyncClientAdaptor.readFile(client, remotePath, offset, fetchSize); - int len = writeBuffer(buffer, dest); - if (len == 0) { - break; - } - offset += len; - } - dest.flush(); - } - - private int writeBuffer(ByteBuffer buffer, OutputStream dest) throws IOException { - if (buffer == null || buffer.limit() - buffer.position() == 0) { - return 0; - } - - // notice: the buffer returned by thrift is a slice of a larger buffer which contains - // the whole response, so buffer.position() is not 0 initially and buffer.limit() is - // not the size of the downloaded chunk - dest.write( - buffer.array(), - buffer.position() + buffer.arrayOffset(), - buffer.limit() - buffer.position()); - return buffer.limit() - buffer.position(); - } - - private void downloadFileSync(Node node, String remotePath, OutputStream dest) - throws IOException { - SyncDataClient client = (SyncDataClient) dataGroupMember.getSyncClient(node); - if (client == null) { - throw new IOException("No available client for " + node.toString()); - } - - long offset = 0; - // TODO-Cluster: use elaborate downloading techniques - int fetchSize = 64 * 1024; - - try { - while (true) { - ByteBuffer buffer = client.readFile(remotePath, offset, fetchSize); - int len = writeBuffer(buffer, dest); - if (len == 0) { - break; - } - offset += len; - } - } catch (TException e) { - client.close(); - } finally { - client.returnSelf(); - } - dest.flush(); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - FileSnapshot snapshot = (FileSnapshot) o; - return Objects.equals(timeseriesSchemas, snapshot.timeseriesSchemas) - && Objects.equals(dataFiles, snapshot.dataFiles); - } - - @Override - public void truncateBefore(long minIndex) { - dataFiles.removeIf( - res -> { - boolean toBeTruncated = res.getMaxPlanIndex() < minIndex; - if (toBeTruncated) { - // also remove the hardlink - res.remove(); - } - return toBeTruncated; - }); - } - - @Override - public int hashCode() { - return Objects.hash(timeseriesSchemas, dataFiles); - } - - public static class Factory implements SnapshotFactory { - - public static final Factory INSTANCE = new Factory(); - - @Override - public FileSnapshot create() { - return new FileSnapshot(); - } - - @Override - public FileSnapshot copy(FileSnapshot origin) { - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.setLastLogIndex(origin.lastLogIndex); - fileSnapshot.setLastLogTerm(origin.lastLogTerm); - fileSnapshot.dataFiles = origin.dataFiles == null ? null : new ArrayList<>(origin.dataFiles); - fileSnapshot.timeseriesSchemas = - origin.timeseriesSchemas == null ? null : new ArrayList<>(origin.timeseriesSchemas); - return fileSnapshot; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java deleted file mode 100644 index 0d109a9b23c3..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshot.java +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.commons.auth.AuthException; -import org.apache.iotdb.commons.auth.authorizer.BasicAuthorizer; -import org.apache.iotdb.commons.auth.authorizer.IAuthorizer; -import org.apache.iotdb.commons.auth.entity.Role; -import org.apache.iotdb.commons.auth.entity.User; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.metadata.StorageGroupAlreadySetException; -import org.apache.iotdb.db.metadata.template.Template; -import org.apache.iotdb.db.metadata.template.TemplateManager; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.locks.Lock; - -/** MetaSimpleSnapshot also records all storage groups. */ -public class MetaSimpleSnapshot extends Snapshot { - - private static final Logger logger = LoggerFactory.getLogger(MetaSimpleSnapshot.class); - private Map storageGroupTTLMap; - private Map userMap; - private Map roleMap; - private Map templateMap; - private ByteBuffer partitionTableBuffer; - - public MetaSimpleSnapshot() { - storageGroupTTLMap = Collections.emptyMap(); - userMap = Collections.emptyMap(); - roleMap = Collections.emptyMap(); - templateMap = Collections.emptyMap(); - partitionTableBuffer = null; - } - - public MetaSimpleSnapshot( - Map storageGroupTTLMap, - Map userMap, - Map roleMap, - Map templateMap, - ByteBuffer partitionTableBuffer) { - this.storageGroupTTLMap = storageGroupTTLMap; - this.userMap = userMap; - this.roleMap = roleMap; - this.templateMap = templateMap; - this.partitionTableBuffer = partitionTableBuffer; - } - - public Map getStorageGroupTTLMap() { - return storageGroupTTLMap; - } - - public Map getUserMap() { - return userMap; - } - - public Map getRoleMap() { - return roleMap; - } - - public Map getTemplateMap() { - return templateMap; - } - - public ByteBuffer getPartitionTableBuffer() { - return partitionTableBuffer; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream); - try { - dataOutputStream.writeInt(storageGroupTTLMap.size()); - for (Map.Entry entry : storageGroupTTLMap.entrySet()) { - SerializeUtils.serialize(entry.getKey().getFullPath(), dataOutputStream); - dataOutputStream.writeLong(entry.getValue()); - } - - dataOutputStream.writeInt(userMap.size()); - for (Map.Entry entry : userMap.entrySet()) { - SerializeUtils.serialize(entry.getKey(), dataOutputStream); - logger.debug("A user into snapshot: {}", entry.getValue()); - dataOutputStream.write(entry.getValue().serialize().array()); - } - - dataOutputStream.writeInt(roleMap.size()); - for (Map.Entry entry : roleMap.entrySet()) { - SerializeUtils.serialize(entry.getKey(), dataOutputStream); - logger.debug("A role into snapshot: {}", entry.getValue()); - dataOutputStream.write(entry.getValue().serialize().array()); - } - - dataOutputStream.writeInt(templateMap.size()); - for (Map.Entry entry : templateMap.entrySet()) { - SerializeUtils.serialize(entry.getKey(), dataOutputStream); - logger.debug("A template into snapshot: {}", entry.getValue()); - dataOutputStream.write(entry.getValue().serialize().array()); - } - - dataOutputStream.writeLong(lastLogIndex); - dataOutputStream.writeLong(lastLogTerm); - - dataOutputStream.write(partitionTableBuffer.array()); - - } catch (IOException e) { - // unreachable - } - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - int storageGroupTTLMapSize = buffer.getInt(); - storageGroupTTLMap = new HashMap<>(storageGroupTTLMapSize); - for (int i = 0; i < storageGroupTTLMapSize; i++) { - try { - storageGroupTTLMap.put( - new PartialPath(SerializeUtils.deserializeString(buffer)), buffer.getLong()); - } catch (IllegalPathException e) { - // ignore - } - } - - int userMapSize = buffer.getInt(); - userMap = new HashMap<>(userMapSize); - for (int i = 0; i < userMapSize; i++) { - String userName = SerializeUtils.deserializeString(buffer); - User user = new User(); - user.deserialize(buffer); - userMap.put(userName, user); - } - - int roleMapSize = buffer.getInt(); - roleMap = new HashMap<>(roleMapSize); - for (int i = 0; i < roleMapSize; i++) { - String userName = SerializeUtils.deserializeString(buffer); - Role role = new Role(); - role.deserialize(buffer); - roleMap.put(userName, role); - } - - int templateSize = buffer.getInt(); - templateMap = new HashMap<>(templateSize); - for (int i = 0; i < templateSize; i++) { - String templateName = SerializeUtils.deserializeString(buffer); - Template template = new Template(); - template.deserialize(buffer); - templateMap.put(templateName, template); - } - - setLastLogIndex(buffer.getLong()); - setLastLogTerm(buffer.getLong()); - - partitionTableBuffer = buffer; - } - - @Override - public SnapshotInstaller getDefaultInstaller(RaftMember member) { - return new Installer((MetaGroupMember) member); - } - - public static class Installer implements SnapshotInstaller { - - private static final Logger logger = LoggerFactory.getLogger(Installer.class); - private MetaGroupMember metaGroupMember; - - public Installer(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - @Override - public void install(MetaSimpleSnapshot snapshot, int slot, boolean isDataMigration) { - installSnapshot(snapshot); - } - - @Override - public void install(Map snapshotMap, boolean isDataMigration) { - throw new UnsupportedOperationException("Method unimplemented"); - } - - /** - * Install a meta snapshot to IoTDB. The snapshot contains: all storage groups, partition table, - * authentication info, and last log term/index in the snapshot. - */ - private void installSnapshot(MetaSimpleSnapshot snapshot) { - Lock lock = metaGroupMember.getSnapshotApplyLock(); - if (lock.tryLock()) { - try { - // 1. register all storage groups - for (Map.Entry entry : snapshot.getStorageGroupTTLMap().entrySet()) { - PartialPath sgPath = entry.getKey(); - try { - IoTDB.schemaProcessor.setStorageGroup(sgPath); - } catch (StorageGroupAlreadySetException e) { - // ignore - } catch (MetadataException e) { - logger.error( - "{}: Cannot add storage group {} in snapshot, errMessage:{}", - metaGroupMember.getName(), - entry.getKey(), - e.getMessage()); - } - - // 2. register ttl in the snapshot - try { - IoTDB.schemaProcessor.setTTL(sgPath, entry.getValue()); - StorageEngine.getInstance().setTTL(sgPath, entry.getValue()); - } catch (MetadataException | IOException e) { - logger.error( - "{}: Cannot set ttl in storage group {} , errMessage: {}", - metaGroupMember.getName(), - entry.getKey(), - e.getMessage()); - } - } - - // 3. replace all users and roles - try { - IAuthorizer authorizer = BasicAuthorizer.getInstance(); - installSnapshotUsers(authorizer, snapshot); - installSnapshotRoles(authorizer, snapshot); - } catch (AuthException e) { - logger.error( - "{}: Cannot get authorizer instance, error is: ", metaGroupMember.getName(), e); - } - // 4. accept template map - TemplateManager.getInstance().setTemplateMap(snapshot.templateMap); - - // 5. accept partition table - metaGroupMember.acceptVerifiedPartitionTable(snapshot.getPartitionTableBuffer(), true); - - synchronized (metaGroupMember.getLogManager()) { - metaGroupMember.getLogManager().applySnapshot(snapshot); - } - } finally { - lock.unlock(); - } - } else { - logger.info( - "{}: is under snapshot installation now. This request is omitted. MetaSimpleSnapshot: {}", - metaGroupMember.getName(), - snapshot); - } - } - - private void installSnapshotUsers(IAuthorizer authorizer, MetaSimpleSnapshot snapshot) { - try { - authorizer.replaceAllUsers(snapshot.getUserMap()); - } catch (AuthException e) { - logger.error("{}:replace users failed", metaGroupMember.getName(), e); - } - } - - private void installSnapshotRoles(IAuthorizer authorizer, MetaSimpleSnapshot snapshot) { - try { - authorizer.replaceAllRoles(snapshot.getRoleMap()); - } catch (AuthException e) { - logger.error("{}:replace roles failed", metaGroupMember.getName(), e); - } - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MetaSimpleSnapshot that = (MetaSimpleSnapshot) o; - return Objects.equals(storageGroupTTLMap, that.storageGroupTTLMap) - && Objects.equals(userMap, that.userMap) - && Objects.equals(roleMap, that.roleMap) - && Objects.equals(templateMap, that.templateMap) - && Objects.equals(partitionTableBuffer, that.partitionTableBuffer); - } - - @Override - public int hashCode() { - return Objects.hash(storageGroupTTLMap, userMap, roleMap, templateMap, partitionTableBuffer); - } - - public static class Factory implements SnapshotFactory { - - public static final FileSnapshot.Factory INSTANCE = new FileSnapshot.Factory(); - - @Override - public MetaSimpleSnapshot create() { - return new MetaSimpleSnapshot(); - } - - @Override - public MetaSimpleSnapshot copy(MetaSimpleSnapshot origin) { - MetaSimpleSnapshot metaSimpleSnapshot = create(); - metaSimpleSnapshot.lastLogIndex = origin.lastLogIndex; - metaSimpleSnapshot.lastLogTerm = origin.lastLogTerm; - metaSimpleSnapshot.partitionTableBuffer = origin.partitionTableBuffer.duplicate(); - metaSimpleSnapshot.roleMap = new HashMap<>(origin.roleMap); - metaSimpleSnapshot.userMap = new HashMap<>(origin.userMap); - metaSimpleSnapshot.templateMap = new HashMap<>(origin.templateMap); - metaSimpleSnapshot.storageGroupTTLMap = new HashMap<>(origin.storageGroupTTLMap); - return metaSimpleSnapshot; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java deleted file mode 100644 index e243e4c4a43a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshot.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.concurrent.locks.Lock; - -/** PartitionedSnapshot stores the snapshot of each slot in a map. */ -public class PartitionedSnapshot extends Snapshot { - - private static final Logger logger = LoggerFactory.getLogger(PartitionedSnapshot.class); - - private Map slotSnapshots; - private SnapshotFactory factory; - - public PartitionedSnapshot(SnapshotFactory factory) { - this(new HashMap<>(), factory); - } - - private PartitionedSnapshot(Map slotSnapshots, SnapshotFactory factory) { - this.slotSnapshots = slotSnapshots; - this.factory = factory; - } - - public void putSnapshot(int slot, T snapshot) { - slotSnapshots.put(slot, snapshot); - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - - logger.info("Start to serialize a snapshot of {} sub-snapshots", slotSnapshots.size()); - try (DataOutputStream dataOutputStream = new DataOutputStream(outputStream)) { - dataOutputStream.writeInt(slotSnapshots.size()); - for (Entry entry : slotSnapshots.entrySet()) { - dataOutputStream.writeInt(entry.getKey()); - dataOutputStream.write(entry.getValue().serialize().array()); - } - dataOutputStream.writeLong(getLastLogIndex()); - dataOutputStream.writeLong(getLastLogTerm()); - } catch (IOException e) { - // unreachable - } - - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - int size = buffer.getInt(); - for (int i = 0; i < size; i++) { - int slot = buffer.getInt(); - T snapshot = factory.create(); - snapshot.deserialize(buffer); - slotSnapshots.put(slot, snapshot); - } - setLastLogIndex(buffer.getLong()); - setLastLogTerm(buffer.getLong()); - } - - public T getSnapshot(int slot) { - return slotSnapshots.get(slot); - } - - @Override - public String toString() { - return "PartitionedSnapshot{" - + "slotSnapshots=" - + slotSnapshots.size() - + ", lastLogIndex=" - + lastLogIndex - + ", lastLogTerm=" - + lastLogTerm - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - PartitionedSnapshot snapshot = (PartitionedSnapshot) o; - return Objects.equals(slotSnapshots, snapshot.slotSnapshots); - } - - @Override - public SnapshotInstaller getDefaultInstaller(RaftMember member) { - return new Installer((DataGroupMember) member); - } - - @Override - public int hashCode() { - return Objects.hash(slotSnapshots); - } - - @SuppressWarnings("java:S3740") - public class Installer implements SnapshotInstaller { - - private DataGroupMember dataGroupMember; - private String name; - - public Installer(DataGroupMember dataGroupMember) { - this.dataGroupMember = dataGroupMember; - this.name = dataGroupMember.getName(); - } - - @Override - public void install(PartitionedSnapshot snapshot, int slot, boolean isDataMigration) - throws SnapshotInstallationException { - installPartitionedSnapshot(snapshot); - } - - @Override - public void install(Map snapshotMap, boolean isDataMigration) { - throw new IllegalStateException("Method unimplemented"); - } - - /** - * Install a PartitionedSnapshot, which is a slotNumber -> FileSnapshot map. Only the slots that - * are managed by the the group will be applied. The lastLogId and lastLogTerm are also updated - * according to the snapshot. - * - * @param snapshot - */ - private void installPartitionedSnapshot(PartitionedSnapshot snapshot) - throws SnapshotInstallationException { - logger.info("{}: start to install a snapshot of {}", dataGroupMember.getName(), snapshot); - Lock lock = dataGroupMember.getSnapshotApplyLock(); - if (lock.tryLock()) { - try { - List slots = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getNodeSlots(dataGroupMember.getHeader()); - for (Integer slot : slots) { - T subSnapshot = snapshot.getSnapshot(slot); - if (subSnapshot != null) { - installSnapshot(subSnapshot, slot); - } - } - synchronized (dataGroupMember.getLogManager()) { - dataGroupMember.getLogManager().applySnapshot(snapshot); - } - } finally { - lock.unlock(); - } - } else { - logger.info( - "{}: is under snapshot installation now. This request is omitted. PartitionedSnapshot: {}", - dataGroupMember.getName(), - snapshot); - } - } - - /** - * Apply a snapshot to the state machine, i.e., load the data and meta data contained in the - * snapshot into the IoTDB instance. Currently the type of the snapshot should be ony - * FileSnapshot, but more types may be supported in the future. - * - * @param snapshot - */ - @SuppressWarnings("java:S1905") // cast is necessary - void installSnapshot(T snapshot, int slot) throws SnapshotInstallationException { - if (logger.isDebugEnabled()) { - logger.debug("{}: applying snapshot {}", name, snapshot); - } - // ensure storage groups are synchronized - try { - dataGroupMember.getMetaGroupMember().syncLeaderWithConsistencyCheck(true); - } catch (CheckConsistencyException e) { - throw new SnapshotInstallationException(e); - } - SnapshotInstaller defaultInstaller = - (SnapshotInstaller) snapshot.getDefaultInstaller(dataGroupMember); - defaultInstaller.install(snapshot, slot, false); - } - } - - @Override - public void truncateBefore(long minIndex) { - for (T value : slotSnapshots.values()) { - value.truncateBefore(minIndex); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java deleted file mode 100644 index 50a5d990f7b8..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTask.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.server.member.DataGroupMember; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedOutputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.concurrent.Callable; - -/** - * When a new node joins the cluster, a new data group is formed and some partitions are assigned to - * the group. All members of the group should pull snapshots from the previous holders to proceed - * the data transition. - */ -public class PullSnapshotTask implements Callable { - - public static final String TASK_SUFFIX = ".task"; - private static final Logger logger = LoggerFactory.getLogger(PullSnapshotTask.class); - - private PullSnapshotTaskDescriptor descriptor; - private DataGroupMember newMember; - - private PullSnapshotRequest request; - private SnapshotFactory snapshotFactory; - - private File snapshotSave; - private Random random = new Random(); - - /** - * @param descriptor - * @param newMember - * @param snapshotFactory - * @param snapshotSave if the task is resumed from a disk file, this should that file, otherwise - * it should bu null - */ - public PullSnapshotTask( - PullSnapshotTaskDescriptor descriptor, - DataGroupMember newMember, - SnapshotFactory snapshotFactory, - File snapshotSave) { - this.descriptor = descriptor; - this.newMember = newMember; - this.snapshotFactory = snapshotFactory; - this.snapshotSave = snapshotSave; - persistTask(); - } - - @SuppressWarnings("java:S3740") // type cannot be known ahead - private boolean pullSnapshot(int nodeIndex) throws InterruptedException, TException { - Node node = descriptor.getPreviousHolders().get(nodeIndex); - if (logger.isDebugEnabled()) { - logger.debug( - "Pulling slot {} and other {} snapshots from {} of {} for {}", - descriptor.getSlots().get(0), - descriptor.getSlots().size() - 1, - node, - descriptor.getPreviousHolders().getHeader(), - newMember.getName()); - } - - Map result = pullSnapshot(node); - - if (result != null) { - // unlock slots that have no snapshots - List noSnapshotSlots = new ArrayList<>(); - for (Integer slot : descriptor.getSlots()) { - if (!result.containsKey(slot)) { - newMember.getSlotManager().setToNull(slot, false); - noSnapshotSlots.add(slot); - } - } - newMember.getSlotManager().save(); - if (!noSnapshotSlots.isEmpty() && logger.isInfoEnabled()) { - logger.info( - "{}: {} and other {} slots do not have snapshot", - newMember.getName(), - noSnapshotSlots.get(0), - noSnapshotSlots.size() - 1); - } - - if (logger.isInfoEnabled()) { - logger.info( - "{}: Received a snapshot {} from {}", - newMember.getName(), - result, - descriptor.getPreviousHolders().get(nodeIndex)); - } - try { - if (result.size() > 0) { - Snapshot snapshot = result.values().iterator().next(); - SnapshotInstaller installer = snapshot.getDefaultInstaller(newMember); - installer.install(result, true); - } - // inform the previous holders that one member has successfully pulled snapshot - newMember.registerPullSnapshotHint(descriptor); - return true; - } catch (SnapshotInstallationException e) { - logger.error("Apply snapshot failed, retry...", e); - } - } - return false; - } - - private Map pullSnapshot(Node node) throws TException, InterruptedException { - Map result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = (AsyncDataClient) newMember.getAsyncClient(node); - if (client == null) { - return null; - } - result = - SyncClientAdaptor.pullSnapshot(client, request, descriptor.getSlots(), snapshotFactory); - } else { - SyncDataClient client = (SyncDataClient) newMember.getSyncClient(node); - if (client == null) { - return null; - } - PullSnapshotResp pullSnapshotResp; - try { - pullSnapshotResp = client.pullSnapshot(request); - } catch (TException e) { - client.close(); - throw e; - } finally { - client.returnSelf(); - } - result = new HashMap<>(); - for (Entry integerByteBufferEntry : - pullSnapshotResp.snapshotBytes.entrySet()) { - T snapshot = snapshotFactory.create(); - snapshot.deserialize(integerByteBufferEntry.getValue()); - result.put(integerByteBufferEntry.getKey(), snapshot); - } - } - return result; - } - - @Override - public Void call() { - request = new PullSnapshotRequest(); - request.setHeader(descriptor.getPreviousHolders().getHeader()); - request.setRequiredSlots(descriptor.getSlots()); - request.setRequireReadOnly(descriptor.isRequireReadOnly()); - - logger.info("{}: data migration starts.", newMember.getName()); - boolean finished = false; - int nodeIndex = ((PartitionGroup) newMember.getAllNodes()).indexOf(newMember.getThisNode()) - 1; - while (!finished) { - try { - // sequentially pick up a node that may have this slot - nodeIndex = (nodeIndex + 1) % descriptor.getPreviousHolders().size(); - long startTime = System.currentTimeMillis(); - finished = pullSnapshot(nodeIndex); - if (!finished) { - if (logger.isDebugEnabled()) { - logger.debug( - "Cannot pull slot {} from {}, retry", - descriptor.getSlots(), - descriptor.getPreviousHolders().get(nodeIndex)); - } - Thread.sleep( - ClusterDescriptor.getInstance().getConfig().getPullSnapshotRetryIntervalMs()); - } else { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Data migration ends, cost {}ms", - newMember, - (System.currentTimeMillis() - startTime)); - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - finished = true; - } catch (TException e) { - if (logger.isDebugEnabled()) { - logger.debug( - "Cannot pull slot {} from {}, retry", - descriptor.getSlots(), - descriptor.getPreviousHolders().get(nodeIndex), - e); - } - } - } - removeTask(); - return null; - } - - private void persistTask() { - if (snapshotSave != null) { - // the task is resumed from disk, do not persist it again - return; - } - - while (true) { - String saveName = System.currentTimeMillis() + "_" + random.nextLong() + TASK_SUFFIX; - snapshotSave = new File(newMember.getPullSnapshotTaskDir(), saveName); - if (!snapshotSave.exists()) { - snapshotSave.getParentFile().mkdirs(); - break; - } - } - - try (DataOutputStream dataOutputStream = - new DataOutputStream(new BufferedOutputStream(new FileOutputStream(snapshotSave)))) { - descriptor.serialize(dataOutputStream); - } catch (IOException e) { - logger.error( - "Cannot save the pulling task: pull {} from {}", - descriptor.getSlots(), - descriptor.getPreviousHolders(), - e); - } - } - - private void removeTask() { - try { - Files.delete(snapshotSave.toPath()); - } catch (IOException e) { - logger.warn("Cannot remove pull snapshot task file {}", snapshotSave, e); - } - } - - public File getSnapshotSave() { - return snapshotSave; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java deleted file mode 100644 index 66925c73aa49..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptor.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * PullSnapshotTaskDescriptor describes a pull-snapshot-task with the slots to pull, the previous - * owners and does this pulling require the provider to become read-only. So the task can be resumed - * when system crashes. - */ -public class PullSnapshotTaskDescriptor { - private PartitionGroup previousHolders; - private List slots; - - // set to true if the previous holder has been removed from the cluster. - // This will make the previous holder read-only so that different new - // replicas can pull the same snapshot. - private boolean requireReadOnly; - - public PullSnapshotTaskDescriptor() {} - - public PullSnapshotTaskDescriptor( - PartitionGroup previousOwners, List slots, boolean requireReadOnly) { - this.previousHolders = previousOwners; - this.slots = slots; - this.requireReadOnly = requireReadOnly; - } - - public PartitionGroup getPreviousHolders() { - return previousHolders; - } - - public List getSlots() { - return slots; - } - - public void setSlots(List slots) { - this.slots = slots; - } - - boolean isRequireReadOnly() { - return requireReadOnly; - } - - public void serialize(DataOutputStream dataOutputStream) throws IOException { - dataOutputStream.writeInt(slots.size()); - for (Integer slot : slots) { - dataOutputStream.writeInt(slot); - } - - dataOutputStream.writeInt(previousHolders.getRaftId()); - dataOutputStream.writeInt(previousHolders.size()); - for (Node previousHolder : previousHolders) { - NodeSerializeUtils.serialize(previousHolder, dataOutputStream); - } - - dataOutputStream.writeBoolean(requireReadOnly); - } - - public void deserialize(DataInputStream dataInputStream) throws IOException { - int slotSize = dataInputStream.readInt(); - slots = new ArrayList<>(slotSize); - for (int i = 0; i < slotSize; i++) { - slots.add(dataInputStream.readInt()); - } - - previousHolders = new PartitionGroup(dataInputStream.readInt()); - int holderSize = dataInputStream.readInt(); - for (int i = 0; i < holderSize; i++) { - Node node = new Node(); - NodeSerializeUtils.deserialize(node, dataInputStream); - previousHolders.add(node); - } - - requireReadOnly = dataInputStream.readBoolean(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - PullSnapshotTaskDescriptor that = (PullSnapshotTaskDescriptor) o; - return requireReadOnly == that.requireReadOnly - && Objects.equals(previousHolders, that.previousHolders) - && Objects.equals(slots, that.slots); - } - - @Override - public String toString() { - return "PullSnapshotTaskDescriptor{" - + " previousHolders=" - + previousHolders - + ", slots=" - + slots - + ", requireReadOnly=" - + requireReadOnly - + "}"; - } - - @Override - public int hashCode() { - return Objects.hash(previousHolders, slots, requireReadOnly); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotFactory.java deleted file mode 100644 index ac679140a082..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.log.Snapshot; - -public interface SnapshotFactory { - T create(); - - T copy(T origin); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java deleted file mode 100644 index 363d97fac41f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/SnapshotInstaller.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.log.Snapshot; - -import java.util.Map; - -public interface SnapshotInstaller { - - void install(T snapshot, int slot, boolean isDataMigration) throws SnapshotInstallationException; - - void install(Map snapshotMap, boolean isDataMigration) - throws SnapshotInstallationException; -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/TimeseriesSchemaSnapshot.java b/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/TimeseriesSchemaSnapshot.java deleted file mode 100644 index 2193ba291600..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/log/snapshot/TimeseriesSchemaSnapshot.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import java.util.Collection; - -public interface TimeseriesSchemaSnapshot { - - Collection getTimeseriesSchemas(); - - void setTimeseriesSchemas(Collection schemas); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CSchemaProcessor.java b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CSchemaProcessor.java deleted file mode 100644 index 87b490b8dea5..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/CSchemaProcessor.java +++ /dev/null @@ -1,1741 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.metadata; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.UnsupportedPlanException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.manage.QueryCoordinator; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.handlers.caller.ShowTimeSeriesHandler; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClusterQueryUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.metadata.LocalSchemaProcessor; -import org.apache.iotdb.db.metadata.lastCache.LastCacheManager; -import org.apache.iotdb.db.metadata.mnode.IMNode; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.metadata.mnode.InternalMNode; -import org.apache.iotdb.db.metadata.mnode.MeasurementMNode; -import org.apache.iotdb.db.metadata.path.AlignedPath; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.metadata.utils.MetaUtils; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.qp.physical.BatchPlan; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsOfOneDevicePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateAlignedTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.qp.physical.sys.SetTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.ShowDevicesPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowTimeSeriesPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.ShowDevicesResult; -import org.apache.iotdb.db.query.dataset.ShowTimeSeriesResult; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.TypeInferenceUtils; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.common.cache.LRUCache; -import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor; -import org.apache.iotdb.tsfile.common.constant.TsFileConstant; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.utils.Pair; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.VectorMeasurementSchema; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.lang.reflect.Array; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import static org.apache.iotdb.cluster.query.ClusterPlanExecutor.LOG_FAIL_CONNECT; -import static org.apache.iotdb.cluster.query.ClusterPlanExecutor.THREAD_POOL_SIZE; -import static org.apache.iotdb.cluster.query.ClusterPlanExecutor.waitForThreadPool; -import static org.apache.iotdb.db.utils.EncodingInferenceUtils.getDefaultEncoding; - -@SuppressWarnings("java:S1135") // ignore todos -public class CSchemaProcessor extends LocalSchemaProcessor { - - private static final Logger logger = LoggerFactory.getLogger(CSchemaProcessor.class); - - private ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(); - // only cache the series who is writing, we need not to cache series who is reading - // because the read is slow, so pull from remote is little cost comparing to the disk io - private RemoteMetaCache mRemoteMetaCache; - private MetaPuller metaPuller; - private MetaGroupMember metaGroupMember; - private Coordinator coordinator; - - private CSchemaProcessor() { - super(); - metaPuller = MetaPuller.getInstance(); - int remoteCacheSize = config.getmRemoteSchemaCacheSize(); - mRemoteMetaCache = new RemoteMetaCache(remoteCacheSize); - } - - private static class CSchemaProcessorHolder { - - private CSchemaProcessorHolder() { - // allowed to do nothing - } - - private static final CSchemaProcessor INSTANCE = new CSchemaProcessor(); - } - - /** - * we should not use this function in other place, but only in IoTDB class - * - * @return - */ - public static CSchemaProcessor getInstance() { - return CSchemaProcessorHolder.INSTANCE; - } - - /** - * sync meta leader to get the newest partition table and storage groups. - * - * @throws MetadataException throws MetadataException if necessary - */ - public void syncMetaLeader() throws MetadataException { - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new MetadataException(e); - } - } - - @Override - public String deleteTimeseries(PartialPath pathPattern, boolean isPrefixMatch) - throws MetadataException { - cacheLock.writeLock().lock(); - mRemoteMetaCache.removeItem(pathPattern, isPrefixMatch); - cacheLock.writeLock().unlock(); - return super.deleteTimeseries(pathPattern, isPrefixMatch); - } - - @Override - public void deleteStorageGroups(List storageGroups) throws MetadataException { - cacheLock.writeLock().lock(); - for (PartialPath storageGroup : storageGroups) { - mRemoteMetaCache.removeItem(storageGroup, true); - } - cacheLock.writeLock().unlock(); - super.deleteStorageGroups(storageGroups); - } - - @Override - public TSDataType getSeriesType(PartialPath fullPath) throws MetadataException { - - if (fullPath.equals(SQLConstant.TIME_PATH)) { - return TSDataType.INT64; - } - - String measurement = fullPath.getMeasurement(); - if (fullPath instanceof AlignedPath) { - if (((AlignedPath) fullPath).getMeasurementList().size() != 1) { - return TSDataType.VECTOR; - } else { - measurement = ((AlignedPath) fullPath).getMeasurement(0); - } - } - - // try remote cache first - try { - cacheLock.readLock().lock(); - IMeasurementMNode measurementMNode = mRemoteMetaCache.get(fullPath); - if (measurementMNode != null) { - return measurementMNode.getDataType(measurement); - } - } finally { - cacheLock.readLock().unlock(); - } - - // try local MTree - TSDataType seriesType; - try { - seriesType = super.getSeriesType(fullPath); - } catch (PathNotExistException e) { - // pull from remote node - List schemas = - metaPuller.pullMeasurementSchemas(Collections.singletonList(fullPath)); - if (!schemas.isEmpty()) { - IMeasurementSchema measurementSchema = schemas.get(0); - IMeasurementMNode measurementMNode = - MeasurementMNode.getMeasurementMNode( - null, measurementSchema.getMeasurementId(), measurementSchema, null); - if (measurementSchema instanceof VectorMeasurementSchema) { - for (String subMeasurement : measurementSchema.getSubMeasurementsList()) { - cacheMeta( - new AlignedPath(fullPath.getDevice(), subMeasurement), measurementMNode, false); - } - } else { - cacheMeta(fullPath, measurementMNode, true); - } - return measurementMNode.getDataType(measurement); - } else { - throw e; - } - } - return seriesType; - } - - @Override - public IMeasurementMNode getMeasurementMNode(PartialPath fullPath) throws MetadataException { - IMeasurementMNode node = null; - // try remote cache first - try { - cacheLock.readLock().lock(); - IMeasurementMNode measurementMNode = mRemoteMetaCache.get(fullPath); - if (measurementMNode != null) { - node = measurementMNode; - } - } finally { - cacheLock.readLock().unlock(); - } - - if (node == null) { - // try local MTree - try { - node = super.getMeasurementMNode(fullPath); - } catch (PathNotExistException e) { - // pull from remote node - List schemas = - metaPuller.pullMeasurementSchemas(Collections.singletonList(fullPath)); - if (!schemas.isEmpty()) { - IMeasurementSchema measurementSchema = schemas.get(0); - IMeasurementMNode measurementMNode = - MeasurementMNode.getMeasurementMNode( - null, measurementSchema.getMeasurementId(), measurementSchema, null); - cacheMeta(fullPath, measurementMNode, true); - node = measurementMNode; - } else { - throw e; - } - } - } - return node; - } - - /** - * Get the first index of non-exist schema in the local cache. - * - * @return -1 if all schemas are found, or the first index of the non-exist schema - */ - private int getMNodesLocally( - PartialPath deviceId, String[] measurements, IMeasurementMNode[] measurementMNodes) { - int failedMeasurementIndex = -1; - cacheLock.readLock().lock(); - try { - for (int i = 0; i < measurements.length && failedMeasurementIndex == -1; i++) { - IMeasurementMNode measurementMNode = - mRemoteMetaCache.get(deviceId.concatNode(measurements[i])); - if (measurementMNode == null) { - failedMeasurementIndex = i; - } else { - measurementMNodes[i] = measurementMNode; - } - } - } finally { - cacheLock.readLock().unlock(); - } - return failedMeasurementIndex; - } - - private void pullSeriesSchemas(PartialPath deviceId, String[] measurementList) - throws MetadataException { - List schemasToPull = new ArrayList<>(); - for (String s : measurementList) { - schemasToPull.add(deviceId.concatNode(s)); - } - List schemas = metaPuller.pullMeasurementSchemas(schemasToPull); - for (IMeasurementSchema schema : schemas) { - // TODO-Cluster: also pull alias? - // take care, the pulled schema's measurement Id is only series name - IMeasurementMNode measurementMNode = - MeasurementMNode.getMeasurementMNode(null, schema.getMeasurementId(), schema, null); - cacheMeta(deviceId.concatNode(schema.getMeasurementId()), measurementMNode, true); - } - logger.debug("Pulled {}/{} schemas from remote", schemas.size(), measurementList.length); - } - - /* - do not set FullPath for Vector subSensor - */ - @Override - public void cacheMeta( - PartialPath seriesPath, IMeasurementMNode measurementMNode, boolean needSetFullPath) { - if (needSetFullPath) { - measurementMNode.setFullPath(seriesPath.getFullPath()); - } - cacheLock.writeLock().lock(); - mRemoteMetaCache.put(seriesPath, measurementMNode); - cacheLock.writeLock().unlock(); - } - - @Override - public void updateLastCache( - PartialPath seriesPath, - TimeValuePair timeValuePair, - boolean highPriorityUpdate, - Long latestFlushedTime) { - cacheLock.writeLock().lock(); - try { - IMeasurementMNode measurementMNode = mRemoteMetaCache.get(seriesPath); - if (measurementMNode != null) { - LastCacheManager.updateLastCache( - measurementMNode, timeValuePair, highPriorityUpdate, latestFlushedTime); - } - } finally { - cacheLock.writeLock().unlock(); - } - // maybe local also has the timeseries - super.updateLastCache(seriesPath, timeValuePair, highPriorityUpdate, latestFlushedTime); - } - - @Override - public TimeValuePair getLastCache(PartialPath seriesPath) { - IMeasurementMNode measurementMNode = mRemoteMetaCache.get(seriesPath); - if (measurementMNode != null) { - return LastCacheManager.getLastCache(measurementMNode); - } - - return super.getLastCache(seriesPath); - } - - @Override - public IMNode getSeriesSchemasAndReadLockDevice(InsertPlan plan) - throws MetadataException, IOException { - IMeasurementMNode[] measurementMNodes = new IMeasurementMNode[plan.getMeasurements().length]; - int nonExistSchemaIndex = - getMNodesLocally(plan.getDevicePath(), plan.getMeasurements(), measurementMNodes); - if (nonExistSchemaIndex == -1) { - plan.setMeasurementMNodes(measurementMNodes); - return new InternalMNode(null, plan.getDevicePath().getDevice()); - } - // auto-create schema in IoTDBConfig is always disabled in the cluster version, and we have - // another config in ClusterConfig to do this - return super.getSeriesSchemasAndReadLockDevice(plan); - } - - /** - * Check whether the path exists. - * - * @param path a full path or a prefix path - */ - @Override - public boolean isPathExist(PartialPath path) { - boolean localExist = super.isPathExist(path); - if (localExist) { - return true; - } - - // search the cache - cacheLock.readLock().lock(); - try { - return mRemoteMetaCache.containsKey(path); - } finally { - cacheLock.readLock().unlock(); - } - } - - private static class RemoteMetaCache extends LRUCache { - - RemoteMetaCache(int cacheSize) { - super(cacheSize); - } - - @Override - protected IMeasurementMNode loadObjectByKey(PartialPath key) { - return null; - } - - public synchronized void removeItem(PartialPath key, boolean isPrefixMatch) { - cache.keySet().removeIf(s -> isPrefixMatch ? key.matchPrefixPath(s) : key.matchFullPath(s)); - } - - @Override - public synchronized void removeItem(PartialPath key) { - removeItem(key, false); - } - - @Override - public synchronized IMeasurementMNode get(PartialPath key) { - try { - return super.get(key); - } catch (IOException e) { - // not happening - return null; - } - } - - public synchronized boolean containsKey(PartialPath key) { - return cache.containsKey(key); - } - } - - /** - * create storage groups for CreateTimeseriesPlan, CreateMultiTimeseriesPlan and InsertPlan, also - * create timeseries for InsertPlan. Only the three kind of plans can use this method. - */ - public void createSchema(PhysicalPlan plan) throws MetadataException, CheckConsistencyException { - List storageGroups = new ArrayList<>(); - // for InsertPlan, try to just use deviceIds to get related storage groups because there's no - // need to call getPaths to concat deviceId and sensor as they will gain same result, - // for CreateTimeSeriesPlan, use getPath() to get timeseries to get related storage group, - // for CreateMultiTimeSeriesPlan, use getPaths() to get all timeseries to get related storage - // groups. - if (plan instanceof BatchPlan) { - storageGroups.addAll(getStorageGroups(getValidStorageGroups((BatchPlan) plan))); - } else if (plan instanceof InsertRowPlan || plan instanceof InsertTabletPlan) { - storageGroups.addAll( - getStorageGroups(Collections.singletonList(((InsertPlan) plan).getDevicePath()))); - } else if (plan instanceof CreateTimeSeriesPlan) { - storageGroups.addAll( - getStorageGroups(Collections.singletonList(((CreateTimeSeriesPlan) plan).getPath()))); - } else if (plan instanceof CreateAlignedTimeSeriesPlan) { - storageGroups.addAll( - getStorageGroups( - Collections.singletonList(((CreateAlignedTimeSeriesPlan) plan).getPrefixPath()))); - } else if (plan instanceof SetTemplatePlan) { - storageGroups.addAll( - getStorageGroups( - Collections.singletonList( - new PartialPath(((SetTemplatePlan) plan).getPrefixPath())))); - } else { - storageGroups.addAll(getStorageGroups(plan.getPaths())); - } - - // create storage groups - createStorageGroups(storageGroups); - - // need to verify the storage group is created - verifyCreatedSgSuccess(storageGroups, plan); - - // try to create timeseries for insertPlan - if (plan instanceof InsertPlan && !createTimeseries((InsertPlan) plan)) { - throw new MetadataException("Failed to create timeseries from InsertPlan automatically."); - } - } - - private List getValidStorageGroups(BatchPlan plan) { - List paths = new ArrayList<>(); - List originalPaths = plan.getPrefixPaths(); - for (int i = 0; i < originalPaths.size(); i++) { - // has permission to create sg - if (!plan.getResults().containsKey(i)) { - paths.add(originalPaths.get(i)); - } - } - return paths; - } - - /** return storage groups paths for given deviceIds or timeseries. */ - private List getStorageGroups(List paths) - throws MetadataException { - Set storageGroups = new HashSet<>(); - for (PartialPath path : paths) { - storageGroups.add( - MetaUtils.getStorageGroupPathByLevel( - path, IoTDBDescriptor.getInstance().getConfig().getDefaultStorageGroupLevel())); - } - return new ArrayList<>(storageGroups); - } - - @SuppressWarnings("squid:S3776") - private void verifyCreatedSgSuccess(List storageGroups, PhysicalPlan physicalPlan) { - long startTime = System.currentTimeMillis(); - boolean[] ready = new boolean[storageGroups.size()]; - Arrays.fill(ready, false); - while (true) { - boolean allReady = true; - for (int i = 0; i < storageGroups.size(); i++) { - if (ready[i]) { - continue; - } - if (IoTDB.schemaProcessor.isStorageGroup(storageGroups.get(i))) { - ready[i] = true; - } else { - allReady = false; - } - } - - if (allReady - || System.currentTimeMillis() - startTime - > ClusterDescriptor.getInstance().getConfig().getConnectionTimeoutInMS()) { - break; - } else { - try { - Thread.sleep(1); - } catch (InterruptedException e) { - logger.debug("Failed to wait for creating sgs for plan {}", physicalPlan, e); - Thread.currentThread().interrupt(); - } - } - } - } - - /** - * Create storage groups automatically for paths. - * - * @param storageGroups the uncreated storage groups - */ - private void createStorageGroups(List storageGroups) throws MetadataException { - for (PartialPath storageGroup : storageGroups) { - SetStorageGroupPlan setStorageGroupPlan = new SetStorageGroupPlan(storageGroup); - TSStatus setStorageGroupResult = - metaGroupMember.processNonPartitionedMetaPlan(setStorageGroupPlan); - if (setStorageGroupResult.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && setStorageGroupResult.getCode() - != TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode()) { - throw new MetadataException( - String.format( - "Status Code: %d, failed to set storage group %s", - setStorageGroupResult.getCode(), storageGroup)); - } - } - } - - /** - * @param insertMultiTabletsPlan the InsertMultiTabletsPlan - * @return true if all InsertTabletPlan in InsertMultiTabletsPlan create timeseries success, - * otherwise false - */ - public boolean createTimeseries(InsertMultiTabletsPlan insertMultiTabletsPlan) - throws CheckConsistencyException, IllegalPathException { - boolean allSuccess = true; - for (InsertTabletPlan insertTabletPlan : insertMultiTabletsPlan.getInsertTabletPlanList()) { - boolean success = createTimeseries(insertTabletPlan); - allSuccess = allSuccess && success; - if (!success) { - logger.error( - "create timeseries for device={} failed, plan={}", - insertTabletPlan.getDevicePath(), - insertTabletPlan); - } - } - return allSuccess; - } - - public boolean createTimeseries(InsertRowsPlan insertRowsPlan) - throws CheckConsistencyException, IllegalPathException { - boolean allSuccess = true; - for (InsertRowPlan insertRowPlan : insertRowsPlan.getInsertRowPlanList()) { - boolean success = createTimeseries(insertRowPlan); - allSuccess = allSuccess && success; - if (!success) { - logger.error( - "create timeseries for device={} failed, plan={}", - insertRowPlan.getDevicePath(), - insertRowPlan); - } - } - return allSuccess; - } - - public boolean createTimeseries(InsertRowsOfOneDevicePlan insertRowsOfOneDevicePlan) - throws CheckConsistencyException, IllegalPathException { - boolean allSuccess = true; - for (InsertRowPlan insertRowPlan : insertRowsOfOneDevicePlan.getRowPlans()) { - boolean success = createTimeseries(insertRowPlan); - allSuccess = allSuccess && success; - if (!success) { - logger.error( - "create timeseries for device={} failed, plan={}", - insertRowPlan.getDevicePath(), - insertRowPlan); - } - } - return allSuccess; - } - - /** - * Create timeseries automatically for an InsertPlan. - * - * @param insertPlan some of the timeseries in it are not created yet - * @return true of all uncreated timeseries are created - */ - public boolean createTimeseries(InsertPlan insertPlan) - throws IllegalPathException, CheckConsistencyException { - if (insertPlan instanceof InsertMultiTabletsPlan) { - return createTimeseries((InsertMultiTabletsPlan) insertPlan); - } - - if (insertPlan instanceof InsertRowsPlan) { - return createTimeseries((InsertRowsPlan) insertPlan); - } - - if (insertPlan instanceof InsertRowsOfOneDevicePlan) { - return createTimeseries((InsertRowsOfOneDevicePlan) insertPlan); - } - - List seriesList = new ArrayList<>(); - PartialPath deviceId = insertPlan.getDevicePath(); - PartialPath storageGroupName; - try { - storageGroupName = - MetaUtils.getStorageGroupPathByLevel( - deviceId, IoTDBDescriptor.getInstance().getConfig().getDefaultStorageGroupLevel()); - } catch (MetadataException e) { - logger.error("Failed to infer storage group from deviceId {}", deviceId); - return false; - } - for (String measurementId : insertPlan.getMeasurements()) { - seriesList.add(deviceId.getFullPath() + TsFileConstant.PATH_SEPARATOR + measurementId); - } - if (insertPlan.isAligned()) { - return createAlignedTimeseries(seriesList, insertPlan); - } - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().route(storageGroupName.getFullPath(), 0); - List unregisteredSeriesList = getUnregisteredSeriesList(seriesList, partitionGroup); - if (unregisteredSeriesList.isEmpty()) { - return true; - } - logger.debug("Unregisterd series of {} are {}", seriesList, unregisteredSeriesList); - - return createTimeseries(unregisteredSeriesList, seriesList, insertPlan); - } - - private boolean createAlignedTimeseries(List seriesList, InsertPlan insertPlan) - throws IllegalPathException { - List measurements = new ArrayList<>(); - for (String series : seriesList) { - measurements.add((new PartialPath(series)).getMeasurement()); - } - - List dataTypes = new ArrayList<>(measurements.size()); - List encodings = new ArrayList<>(measurements.size()); - List compressors = new ArrayList<>(measurements.size()); - for (int index = 0; index < measurements.size(); index++) { - TSDataType dataType; - if (insertPlan.getDataTypes() != null && insertPlan.getDataTypes()[index] != null) { - dataType = insertPlan.getDataTypes()[index]; - } else { - dataType = - TypeInferenceUtils.getPredictedDataType( - insertPlan instanceof InsertTabletPlan - ? Array.get(((InsertTabletPlan) insertPlan).getColumns()[index], 0) - : ((InsertRowPlan) insertPlan).getValues()[index], - true); - } - dataTypes.add(dataType); - encodings.add(getDefaultEncoding(dataType)); - compressors.add(TSFileDescriptor.getInstance().getConfig().getCompressor()); - } - - CreateAlignedTimeSeriesPlan plan = - new CreateAlignedTimeSeriesPlan( - insertPlan.getDevicePath(), - measurements, - dataTypes, - encodings, - compressors, - null, - null, - null); - TSStatus result; - try { - result = coordinator.processPartitionedPlan(plan); - } catch (UnsupportedPlanException e) { - logger.error( - "Failed to create timeseries {} automatically. Unsupported plan exception {} ", - plan, - e.getMessage()); - return false; - } - if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && result.getCode() != TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode() - && result.getCode() != TSStatusCode.NEED_REDIRECTION.getStatusCode()) { - logger.error( - "{} failed to execute create timeseries {}: {}", - metaGroupMember.getThisNode(), - plan, - result); - return false; - } - return true; - } - - /** - * create timeseries from paths in "unregisteredSeriesList". If data types are provided by the - * InsertPlan, use them, otherwise infer the types from the values. Use default encodings and - * compressions of the corresponding data type. - */ - private boolean createTimeseries( - List unregisteredSeriesList, List seriesList, InsertPlan insertPlan) - throws IllegalPathException { - List paths = new ArrayList<>(); - List dataTypes = new ArrayList<>(); - List encodings = new ArrayList<>(); - List compressors = new ArrayList<>(); - for (String seriesPath : unregisteredSeriesList) { - paths.add(new PartialPath(seriesPath)); - int index = seriesList.indexOf(seriesPath); - TSDataType dataType; - // use data types in insertPlan if provided, otherwise infer them from the values - if (insertPlan.getDataTypes() != null && insertPlan.getDataTypes()[index] != null) { - dataType = insertPlan.getDataTypes()[index]; - } else { - dataType = - TypeInferenceUtils.getPredictedDataType( - insertPlan instanceof InsertTabletPlan - ? Array.get(((InsertTabletPlan) insertPlan).getColumns()[index], 0) - : ((InsertRowPlan) insertPlan).getValues()[index], - true); - } - dataTypes.add(dataType); - // use default encoding and compression from the config - encodings.add(getDefaultEncoding(dataType)); - compressors.add(TSFileDescriptor.getInstance().getConfig().getCompressor()); - } - CreateMultiTimeSeriesPlan plan = new CreateMultiTimeSeriesPlan(); - plan.setPaths(paths); - plan.setDataTypes(dataTypes); - plan.setEncodings(encodings); - plan.setCompressors(compressors); - - TSStatus result; - try { - result = coordinator.processPartitionedPlan(plan); - } catch (UnsupportedPlanException e) { - logger.error( - "Failed to create timeseries {} automatically. Unsupported plan exception {} ", - paths, - e.getMessage()); - return false; - } - if (result.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode() - && result.getCode() != TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode() - && result.getCode() != TSStatusCode.NEED_REDIRECTION.getStatusCode() - && !(result.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode() - && result.getSubStatus().stream() - .allMatch( - s -> s.getCode() == TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode()))) { - logger.error( - "{} failed to execute create timeseries {}: {}", - metaGroupMember.getThisNode(), - paths, - result); - return false; - } - return true; - } - - public void setMetaGroupMember(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - public void setCoordinator(Coordinator coordinator) { - this.coordinator = coordinator; - } - - /** - * To check which timeseries in the input list is unregistered from one node in "partitionGroup". - */ - private List getUnregisteredSeriesList( - List seriesList, PartitionGroup partitionGroup) throws CheckConsistencyException { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - return getUnregisteredSeriesListLocally(seriesList, partitionGroup); - } else { - return getUnregisteredSeriesListRemotely(seriesList, partitionGroup); - } - } - - private List getUnregisteredSeriesListLocally( - List seriesList, PartitionGroup partitionGroup) throws CheckConsistencyException { - DataGroupMember dataMember = - ClusterIoTDB.getInstance() - .getDataGroupEngine() - .getDataMember(partitionGroup.getHeader(), null, null); - return dataMember.getLocalQueryExecutor().getUnregisteredTimeseries(seriesList); - } - - private List getUnregisteredSeriesListRemotely( - List seriesList, PartitionGroup partitionGroup) { - for (Node node : partitionGroup) { - List result = null; - try { - result = getUnregisteredSeriesListRemotelyForOneNode(node, seriesList, partitionGroup); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error( - "{}: getting unregistered series list {} ... {} is interrupted from {}", - metaGroupMember.getName(), - seriesList.get(0), - seriesList.get(seriesList.size() - 1), - node, - e); - } catch (Exception e) { - logger.error( - "{}: cannot getting unregistered {} and other {} paths from {}", - metaGroupMember.getName(), - seriesList.get(0), - seriesList.get(seriesList.size() - 1), - node, - e); - } - if (result != null) { - return result; - } - } - return Collections.emptyList(); - } - - private List getUnregisteredSeriesListRemotelyForOneNode( - Node node, List seriesList, PartitionGroup partitionGroup) - throws IOException, TException, InterruptedException { - List result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - result = - SyncClientAdaptor.getUnregisteredMeasurements( - client, partitionGroup.getHeader(), seriesList); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - result = syncDataClient.getUnregisteredTimeseries(partitionGroup.getHeader(), seriesList); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return result; - } - - /** - * Get all devices after removing wildcards in the path - * - * @param originPath a path potentially with wildcard. - * @param isPrefixMatch if true, the path pattern is used to match prefix path - * @return all paths after removing wildcards in the path. - */ - @Override - public Set getMatchedDevices(PartialPath originPath, boolean isPrefixMatch) - throws MetadataException { - Map> sgPathMap = groupPathByStorageGroup(originPath); - Set ret = getMatchedDevices(sgPathMap, isPrefixMatch); - logger.debug("The devices of path {} are {}", originPath, ret); - return ret; - } - - /** - * Split the paths by the data group they belong to and query them from the groups separately. - * - * @param sgPathMap the key is the storage group name and the value is the path to be queried with - * storage group added - * @return a collection of all queried paths - */ - private List getMatchedPaths( - Map> sgPathMap, boolean withAlias) throws MetadataException { - List result = new ArrayList<>(); - // split the paths by the data group they belong to - Map> remoteGroupPathMap = new HashMap<>(); - for (Entry> sgPathEntry : sgPathMap.entrySet()) { - String storageGroupName = sgPathEntry.getKey(); - List paths = sgPathEntry.getValue(); - // find the data group that should hold the timeseries schemas of the storage group - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().route(storageGroupName, 0); - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // this node is a member of the group, perform a local query after synchronizing with the - // leader - try { - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), partitionGroup.getRaftId()) - .syncLeader(null); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - List allTimeseriesName = new ArrayList<>(); - for (PartialPath path : paths) { - allTimeseriesName.addAll(getMatchedPathsLocally(path, withAlias)); - } - logger.debug( - "{}: get matched paths of {} locally, result {}", - metaGroupMember.getName(), - partitionGroup, - allTimeseriesName); - result.addAll(allTimeseriesName); - } else { - // batch the queries of the same group to reduce communication - for (PartialPath path : paths) { - remoteGroupPathMap - .computeIfAbsent(partitionGroup, p -> new ArrayList<>()) - .add(path.getFullPath()); - } - } - } - - // query each data group separately - for (Entry> partitionGroupPathEntry : - remoteGroupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupPathEntry.getKey(); - List pathsToQuery = partitionGroupPathEntry.getValue(); - result.addAll(getMatchedPaths(partitionGroup, pathsToQuery, withAlias)); - } - - return result; - } - - private List getMatchedPathsLocally(PartialPath partialPath, boolean withAlias) - throws MetadataException { - if (!withAlias) { - return getMeasurementPaths(partialPath); - } else { - return super.getMeasurementPathsWithAlias(partialPath, -1, -1, false).left; - } - } - - private List getMatchedPaths( - PartitionGroup partitionGroup, List pathsToQuery, boolean withAlias) - throws MetadataException { - // choose the node with lowest latency or highest throughput - List coordinatedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - for (Node node : coordinatedNodes) { - try { - List paths = - getMatchedPaths(node, partitionGroup.getHeader(), pathsToQuery, withAlias); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: get matched paths of {} and other {} paths from {} in {}, result {}", - metaGroupMember.getName(), - pathsToQuery.get(0), - pathsToQuery.size() - 1, - node, - partitionGroup.getHeader(), - paths); - } - if (paths != null) { - // a non-null result contains correct result even if it is empty, so query next group - return paths; - } - } catch (IOException | TException e) { - throw new MetadataException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new MetadataException(e); - } - } - logger.warn("Cannot get paths of {} from {}", pathsToQuery, partitionGroup); - return Collections.emptyList(); - } - - @SuppressWarnings("java:S1168") // null and empty list are different - private List getMatchedPaths( - Node node, RaftNode header, List pathsToQuery, boolean withAlias) - throws IOException, TException, InterruptedException { - GetAllPathsResult result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - result = SyncClientAdaptor.getAllPaths(client, header, pathsToQuery, withAlias); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - result = syncDataClient.getAllPaths(header, pathsToQuery, withAlias); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - - if (result != null) { - // paths may be empty, implying that the group does not contain matched paths, so we do not - // need to query other nodes in the group - List measurementPaths = new ArrayList<>(); - for (int i = 0; i < result.paths.size(); i++) { - MeasurementPath matchedPath = - ClusterQueryUtils.getAssembledPathFromRequest( - result.getPaths().get(i), result.getDataTypes().get(i)); - measurementPaths.add(matchedPath); - if (withAlias && matchedPath != null) { - matchedPath.setMeasurementAlias(result.aliasList.get(i)); - } - if (matchedPath != null) { - matchedPath.setUnderAlignedEntity(result.getUnderAlignedEntity().get(i)); - } - } - return measurementPaths; - } else { - // a null implies a network failure, so we have to query other nodes in the group - return null; - } - } - - /** - * Split the paths by the data group they belong to and query them from the groups separately. - * - * @param sgPathMap the key is the storage group name and the value is the path pattern to be - * queried with storage group added - * @param isPrefixMatch if true, the path pattern is used to match prefix path - * @return a collection of all queried devices - */ - private Set getMatchedDevices( - Map> sgPathMap, boolean isPrefixMatch) throws MetadataException { - Set result = new HashSet<>(); - // split the paths by the data group they belong to - Map> groupPathMap = new HashMap<>(); - for (Entry> sgPathEntry : sgPathMap.entrySet()) { - String storageGroupName = sgPathEntry.getKey(); - List paths = sgPathEntry.getValue(); - // find the data group that should hold the timeseries schemas of the storage group - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().route(storageGroupName, 0); - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // this node is a member of the group, perform a local query after synchronizing with the - // leader - try { - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), partitionGroup.getRaftId()) - .syncLeader(null); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - Set allDevices = new HashSet<>(); - for (PartialPath path : paths) { - allDevices.addAll(super.getMatchedDevices(path, isPrefixMatch)); - } - logger.debug( - "{}: get matched paths of {} locally, result {}", - metaGroupMember.getName(), - partitionGroup, - allDevices); - result.addAll(allDevices); - } else { - // batch the queries of the same group to reduce communication - for (PartialPath path : paths) { - groupPathMap - .computeIfAbsent(partitionGroup, p -> new ArrayList<>()) - .add(path.getFullPath()); - } - } - } - - // query each data group separately - for (Entry> partitionGroupPathEntry : groupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupPathEntry.getKey(); - List pathsToQuery = partitionGroupPathEntry.getValue(); - - result.addAll(getMatchedDevices(partitionGroup, pathsToQuery, isPrefixMatch)); - } - - return result; - } - - private Set getMatchedDevices( - PartitionGroup partitionGroup, List pathsToQuery, boolean isPrefixMatch) - throws MetadataException { - // choose the node with lowest latency or highest throughput - List coordinatedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - for (Node node : coordinatedNodes) { - try { - Set paths = - getMatchedDevices(node, partitionGroup.getHeader(), pathsToQuery, isPrefixMatch); - logger.debug( - "{}: get matched paths of {} from {}, result {} for {}", - metaGroupMember.getName(), - partitionGroup, - node, - paths, - pathsToQuery); - if (paths != null) { - // query next group - Set partialPaths = new HashSet<>(); - for (String path : paths) { - partialPaths.add(new PartialPath(path)); - } - return partialPaths; - } - } catch (IOException | TException e) { - throw new MetadataException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new MetadataException(e); - } - } - logger.warn("Cannot get paths of {} from {}", pathsToQuery, partitionGroup); - return Collections.emptySet(); - } - - private Set getMatchedDevices( - Node node, RaftNode header, List pathsToQuery, boolean isPrefixMatch) - throws IOException, TException, InterruptedException { - Set paths; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - paths = SyncClientAdaptor.getAllDevices(client, header, pathsToQuery, isPrefixMatch); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - try { - paths = syncDataClient.getAllDevices(header, pathsToQuery, isPrefixMatch); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return paths; - } - - /** - * Similar to method getAllTimeseriesPath(), but return Path with alias. - * - *

Please note that for a returned measurement path, the name, alias and datatype are - * guaranteed to be accurate, while the compression type, encoding and other fields are not. See - * {@link GetAllPathsResult} - */ - @Override - public Pair, Integer> getMeasurementPathsWithAlias( - PartialPath pathPattern, int limit, int offset, boolean isPrefixMatch) - throws MetadataException { - Map> sgPathMap = groupPathByStorageGroup(pathPattern); - - if (isPrefixMatch) { - // adapt to prefix match of IoTDB v0.12 - Map> prefixSgPathMap = - groupPathByStorageGroup(pathPattern.concatNode(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)); - List originPaths; - List addedPaths; - for (String sg : prefixSgPathMap.keySet()) { - originPaths = sgPathMap.get(sg); - addedPaths = prefixSgPathMap.get(sg); - if (originPaths == null) { - sgPathMap.put(sg, addedPaths); - } else { - for (PartialPath path : addedPaths) { - if (!originPaths.contains(path)) { - originPaths.add(path); - } - } - } - } - } - - List result = getMatchedPaths(sgPathMap, true); - - int skippedOffset = 0; - // apply offset and limit - if (offset > 0 && result.size() > offset) { - skippedOffset = offset; - result = result.subList(offset, result.size()); - } else if (offset > 0) { - skippedOffset = result.size(); - result = Collections.emptyList(); - } - if (limit > 0 && result.size() > limit) { - result = result.subList(0, limit); - } - logger.debug("The paths of path {} are {}", pathPattern, result); - - return new Pair<>(result, skippedOffset); - } - - /** - * Get all paths after removing wildcards in the path. - * - *

Please note that for a returned measurement path, the name, alias and datatype are - * guaranteed to be accurate, while the compression type, encoding and other fields are not. See - * {@link GetAllPathsResult}. - * - * @param originPath a path potentially with wildcard - * @return all paths after removing wildcards in the path - */ - public List getMatchedPaths(PartialPath originPath) throws MetadataException { - Map> sgPathMap = groupPathByStorageGroup(originPath); - List ret = getMatchedPaths(sgPathMap, false); - logger.debug("The paths of path {} are {}", originPath, ret); - return ret; - } - - /** - * Get all paths after removing wildcards in the path - * - * @param originalPaths a list of paths, potentially with wildcard - * @return a pair of path lists, the first are the existing full paths, the second are invalid - * original paths - */ - public Pair, List> getMatchedPaths( - List originalPaths) { - ConcurrentSkipListSet fullPaths = new ConcurrentSkipListSet<>(); - ConcurrentSkipListSet nonExistPaths = new ConcurrentSkipListSet<>(); - // TODO it is not suitable for register and deregister an Object to JMX to such a frequent - // function call. - // BUT is it suitable to create a thread pool for each calling?? - ExecutorService getAllPathsService = - Executors.newFixedThreadPool(metaGroupMember.getPartitionTable().getGlobalGroups().size()); - for (PartialPath pathStr : originalPaths) { - getAllPathsService.submit( - () -> { - try { - List fullPathStrs = getMatchedPaths(pathStr); - if (fullPathStrs.isEmpty()) { - nonExistPaths.add(pathStr); - logger.debug("Path {} is not found.", pathStr); - } else { - fullPaths.addAll(fullPathStrs); - } - } catch (MetadataException e) { - logger.error("Failed to get full paths of the prefix path: {} because", pathStr, e); - } - }); - } - getAllPathsService.shutdown(); - try { - getAllPathsService.awaitTermination( - ClusterConstant.getReadOperationTimeoutMS(), TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for get all paths services to stop", e); - } - return new Pair<>(new ArrayList<>(fullPaths), new ArrayList<>(nonExistPaths)); - } - - /** - * Get the local devices that match any path in "paths". The result is deduplicated. - * - * @param paths paths potentially contain wildcards. - * @param isPrefixMatch if true, the path pattern is used to match prefix path. - * @return A HashSet instance which stores devices paths matching the given path pattern. - */ - public Set getAllDevices(List paths, boolean isPrefixMatch) - throws MetadataException { - Set results = new HashSet<>(); - for (String path : paths) { - this.getMatchedDevices(new PartialPath(path), isPrefixMatch).stream() - .map(PartialPath::getFullPath) - .forEach(results::add); - } - return results; - } - - /** - * Get the nodes of a prefix "path" at "nodeLevel". The method currently requires strong - * consistency. - * - * @param path - * @param nodeLevel - */ - public List getNodeList(String path, int nodeLevel) throws MetadataException { - return getNodesListInGivenLevel(new PartialPath(path), nodeLevel).stream() - .map(PartialPath::getFullPath) - .collect(Collectors.toList()); - } - - public Set getChildNodeInNextLevel(String path) throws MetadataException { - return getChildNodeNameInNextLevel(new PartialPath(path)); - } - - public Set getChildNodePathInNextLevel(String path) throws MetadataException { - return getChildNodePathInNextLevel(new PartialPath(path)); - } - - /** - * Replace partial paths (paths not containing measurements), and abstract paths (paths containing - * wildcards) with full paths. - */ - public void convertToFullPaths(PhysicalPlan plan) - throws PathNotExistException, CheckConsistencyException { - // make sure this node knows all storage groups - metaGroupMember.syncLeaderWithConsistencyCheck(false); - - Pair, List> getMatchedPathsRet = - getMatchedPaths(plan.getPaths()); - List fullPaths = getMatchedPathsRet.left; - List nonExistPath = getMatchedPathsRet.right; - plan.setPaths(fullPaths); - if (!nonExistPath.isEmpty()) { - throw new PathNotExistException( - nonExistPath.stream().map(PartialPath::getFullPath).collect(Collectors.toList())); - } - } - - @Override - protected IMeasurementMNode getMeasurementMNode(IMNode deviceMNode, String measurementName) - throws MetadataException { - IMeasurementMNode child = super.getMeasurementMNode(deviceMNode, measurementName); - if (child == null) { - child = mRemoteMetaCache.get(deviceMNode.getPartialPath().concatNode(measurementName)); - } - return child; - } - - public List showLocalTimeseries( - ShowTimeSeriesPlan plan, QueryContext context) throws MetadataException { - return super.showTimeseries(plan, context); - } - - public List getLocalDevices(ShowDevicesPlan plan) throws MetadataException { - return super.getMatchedDevices(plan); - } - - @Override - public List getMatchedDevices(ShowDevicesPlan plan) throws MetadataException { - ConcurrentSkipListSet resultSet = new ConcurrentSkipListSet<>(); - ExecutorService pool = - new ThreadPoolExecutor( - THREAD_POOL_SIZE, THREAD_POOL_SIZE, 0, TimeUnit.SECONDS, new LinkedBlockingDeque<>()); - List globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups(); - - int limit = plan.getLimit() == 0 ? Integer.MAX_VALUE : plan.getLimit(); - int offset = plan.getOffset(); - // do not use limit and offset in sub-queries unless offset is 0, otherwise the results are - // not combinable - if (offset != 0) { - if (limit > Integer.MAX_VALUE - offset) { - plan.setLimit(0); - } else { - plan.setLimit(limit + offset); - } - plan.setOffset(0); - } - - if (logger.isDebugEnabled()) { - logger.debug( - "Fetch devices schemas of {} from {} groups", plan.getPath(), globalGroups.size()); - } - - List> futureList = new ArrayList<>(); - for (PartitionGroup group : globalGroups) { - futureList.add( - pool.submit( - () -> { - try { - getDevices(group, plan, resultSet); - } catch (CheckConsistencyException e) { - logger.error("Cannot get show devices result of {} from {}", plan, group); - } - return null; - })); - } - - waitForThreadPool(futureList, pool, "getDevices()"); - List showDevicesResults = - applyShowDevicesLimitOffset(resultSet, limit, offset); - logger.debug("show devices {} has {} results", plan.getPath(), showDevicesResults.size()); - return showDevicesResults; - } - - @Override - public List showTimeseries(ShowTimeSeriesPlan plan, QueryContext context) - throws MetadataException { - ExecutorService pool = - new ThreadPoolExecutor( - THREAD_POOL_SIZE, THREAD_POOL_SIZE, 0, TimeUnit.SECONDS, new LinkedBlockingDeque<>()); - - List groups = new ArrayList<>(); - try { - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().partitionByPathTime(plan.getPath(), 0); - groups.add(partitionGroup); - } catch (MetadataException e) { - // if the path location is not find, obtain the path location from all groups. - groups = metaGroupMember.getPartitionTable().getGlobalGroups(); - } - - int limit = plan.getLimit() == 0 ? Integer.MAX_VALUE : plan.getLimit(); - int offset = plan.getOffset(); - // do not use limit and offset in sub-queries unless offset is 0, otherwise the results are - // not combinable - if (offset != 0) { - if (limit > Integer.MAX_VALUE - offset) { - plan.setLimit(0); - } else { - plan.setLimit(limit + offset); - } - plan.setOffset(0); - } - - if (logger.isDebugEnabled()) { - logger.debug("Fetch timeseries schemas of {} from {} groups", plan.getPath(), groups.size()); - } - - ShowTimeSeriesHandler handler = new ShowTimeSeriesHandler(groups.size(), plan.getPath()); - List> futureList = new ArrayList<>(); - for (PartitionGroup group : groups) { - futureList.add( - pool.submit( - () -> { - showTimeseries(group, plan, context, handler); - return null; - })); - } - - waitForThreadPool(futureList, pool, "showTimeseries()"); - List showTimeSeriesResults = - applyShowTimeseriesLimitOffset(handler.getResult(), limit, offset); - logger.debug("Show {} has {} results", plan.getPath(), showTimeSeriesResults.size()); - return showTimeSeriesResults; - } - - private List applyShowTimeseriesLimitOffset( - List results, int limit, int offset) { - List showTimeSeriesResults = new ArrayList<>(); - Iterator iterator = results.iterator(); - while (iterator.hasNext() && limit > 0) { - if (offset > 0) { - offset--; - iterator.next(); - } else { - limit--; - showTimeSeriesResults.add(iterator.next()); - } - } - - return showTimeSeriesResults; - } - - private List applyShowDevicesLimitOffset( - Set resultSet, int limit, int offset) { - List showDevicesResults = new ArrayList<>(); - Iterator iterator = resultSet.iterator(); - while (iterator.hasNext() && limit > 0) { - if (offset > 0) { - offset--; - iterator.next(); - } else { - limit--; - showDevicesResults.add(iterator.next()); - } - } - return showDevicesResults; - } - - private void showTimeseries( - PartitionGroup group, - ShowTimeSeriesPlan plan, - QueryContext context, - ShowTimeSeriesHandler handler) { - if (group.contains(metaGroupMember.getThisNode())) { - showLocalTimeseries(group, plan, context, handler); - } else { - showRemoteTimeseries(group, plan, context, handler); - } - } - - private void getDevices( - PartitionGroup group, ShowDevicesPlan plan, Set resultSet) - throws CheckConsistencyException, MetadataException { - if (group.contains(metaGroupMember.getThisNode())) { - getLocalDevices(group, plan, resultSet); - } else { - getRemoteDevices(group, plan, resultSet); - } - } - - private void getLocalDevices( - PartitionGroup group, ShowDevicesPlan plan, Set resultSet) - throws CheckConsistencyException, MetadataException { - DataGroupMember localDataMember = - metaGroupMember.getLocalDataMember(group.getHeader(), group.getRaftId()); - localDataMember.syncLeaderWithConsistencyCheck(false); - try { - List localResult = super.getMatchedDevices(plan); - resultSet.addAll(localResult); - logger.debug("Fetched {} devices of {} from {}", localResult.size(), plan.getPath(), group); - } catch (MetadataException e) { - logger.error("Cannot execute show devices plan {} from {} locally.", plan, group); - throw e; - } - } - - private void showLocalTimeseries( - PartitionGroup group, - ShowTimeSeriesPlan plan, - QueryContext context, - ShowTimeSeriesHandler handler) { - try { - DataGroupMember localDataMember = - metaGroupMember.getLocalDataMember(group.getHeader(), group.getRaftId()); - localDataMember.syncLeaderWithConsistencyCheck(false); - List localResult = super.showTimeseries(plan, context); - handler.onComplete(localResult); - } catch (MetadataException | CheckConsistencyException e) { - handler.onError(e); - } - } - - private void showRemoteTimeseries( - PartitionGroup group, - ShowTimeSeriesPlan plan, - QueryContext context, - ShowTimeSeriesHandler handler) { - ByteBuffer resultBinary = null; - for (Node node : group) { - try { - resultBinary = showRemoteTimeseries(context, node, group, plan); - if (resultBinary != null) { - break; - } - } catch (IOException | TException e) { - logger.error(LOG_FAIL_CONNECT, node, e); - } catch (InterruptedException e) { - logger.error("Interrupted when getting timeseries schemas in node {}.", node, e); - Thread.currentThread().interrupt(); - } finally { - // record the queried node to release resources later - ((RemoteQueryContext) context).registerRemoteNode(node, group.getHeader()); - } - } - - if (resultBinary != null) { - int size = resultBinary.getInt(); - List results = new ArrayList<>(); - logger.debug( - "Fetched remote timeseries {} schemas of {} from {}", size, plan.getPath(), group); - for (int i = 0; i < size; i++) { - results.add(ShowTimeSeriesResult.deserialize(resultBinary)); - } - handler.onComplete(results); - } else { - String errMsg = - String.format("Failed to get timeseries in path %s from group %s", plan.getPath(), group); - handler.onError(new MetadataException(errMsg)); - } - } - - private void getRemoteDevices( - PartitionGroup group, ShowDevicesPlan plan, Set resultSet) { - ByteBuffer resultBinary = null; - for (Node node : group) { - try { - resultBinary = getRemoteDevices(node, group, plan); - if (resultBinary != null) { - break; - } - } catch (IOException | TException e) { - logger.error(LOG_FAIL_CONNECT, node, e); - } catch (InterruptedException e) { - logger.error("Interrupted when getting devices schemas in node {}.", node, e); - Thread.currentThread().interrupt(); - } - } - - if (resultBinary != null) { - int size = resultBinary.getInt(); - logger.debug("Fetched remote devices {} schemas of {} from {}", size, plan.getPath(), group); - for (int i = 0; i < size; i++) { - resultSet.add(ShowDevicesResult.deserialize(resultBinary)); - } - } else { - logger.error("Failed to execute show devices {} in group: {}.", plan, group); - } - } - - private ByteBuffer showRemoteTimeseries( - QueryContext context, Node node, PartitionGroup group, ShowTimeSeriesPlan plan) - throws IOException, TException, InterruptedException { - ByteBuffer resultBinary; - - // prepare request - MeasurementSchemaRequest request; - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - plan.serialize(dataOutputStream); - request = - new MeasurementSchemaRequest( - context.getQueryId(), - group.getHeader(), - node, - ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - } - - // execute remote query - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - resultBinary = SyncClientAdaptor.getAllMeasurementSchema(client, request); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - resultBinary = syncDataClient.getAllMeasurementSchema(request); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return resultBinary; - } - - private ByteBuffer getRemoteDevices(Node node, PartitionGroup group, ShowDevicesPlan plan) - throws IOException, TException, InterruptedException { - ByteBuffer resultBinary; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - resultBinary = SyncClientAdaptor.getDevices(client, group.getHeader(), plan); - } else { - SyncDataClient syncDataClient = null; - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - plan.serialize(dataOutputStream); - resultBinary = - syncDataClient.getDevices( - group.getHeader(), ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return resultBinary; - } - - public GetAllPathsResult getAllPaths(List paths, boolean withAlias) - throws MetadataException { - List retPaths = new ArrayList<>(); - List dataTypes = new ArrayList<>(); - List alias = withAlias ? new ArrayList<>() : null; - List underAlignedEntity = new ArrayList<>(); - - for (String path : paths) { - List allTimeseriesPathWithAlias = - super.getMeasurementPathsWithAlias(new PartialPath(path), -1, -1, false).left; - for (MeasurementPath timeseriesPathWithAlias : allTimeseriesPathWithAlias) { - retPaths.add(timeseriesPathWithAlias.getFullPath()); - dataTypes.add(timeseriesPathWithAlias.getSeriesTypeInByte()); - if (withAlias) { - alias.add(timeseriesPathWithAlias.getMeasurementAlias()); - } - underAlignedEntity.add(timeseriesPathWithAlias.isUnderAlignedEntity()); - } - } - - GetAllPathsResult getAllPathsResult = new GetAllPathsResult(); - getAllPathsResult.setPaths(retPaths); - getAllPathsResult.setDataTypes(dataTypes); - getAllPathsResult.setAliasList(alias); - getAllPathsResult.setUnderAlignedEntity(underAlignedEntity); - return getAllPathsResult; - } - - @Override - public PartialPath getBelongedStorageGroup(PartialPath path) throws StorageGroupNotSetException { - try { - return super.getBelongedStorageGroup(path); - } catch (StorageGroupNotSetException e) { - try { - metaGroupMember.syncLeader(null); - } catch (CheckConsistencyException ex) { - logger.warn("Failed to check consistency.", e); - } - return super.getBelongedStorageGroup(path); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java b/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java deleted file mode 100644 index 73a16b748b13..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/metadata/MetaPuller.java +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.metadata; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.manage.QueryCoordinator; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.utils.SchemaUtils; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; -import org.apache.iotdb.tsfile.write.schema.VectorMeasurementSchema; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; - -public class MetaPuller { - - private static final Logger logger = LoggerFactory.getLogger(MetaPuller.class); - private MetaGroupMember metaGroupMember; - - private MetaPuller() {} - - private static class MetaPullerHolder { - - private static final MetaPuller INSTANCE = new MetaPuller(); - } - - public void init(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - /** - * we should not use this function in other place, but only in IoTDB class - * - * @return - */ - public static MetaPuller getInstance() { - return MetaPullerHolder.INSTANCE; - } - - /** - * Pull the all timeseries schemas of given prefixPaths from remote nodes. All prefixPaths must - * contain the storage group. - */ - List pullMeasurementSchemas(List prefixPaths) - throws MetadataException { - logger.debug("{}: Pulling timeseries schemas of {}", metaGroupMember.getName(), prefixPaths); - // split the paths by the data groups that will hold them - Map> partitionGroupPathMap = new HashMap<>(); - for (PartialPath prefixPath : prefixPaths) { - PartitionGroup partitionGroup = - ClusterUtils.partitionByPathTimeWithSync(prefixPath, metaGroupMember); - partitionGroupPathMap.computeIfAbsent(partitionGroup, g -> new ArrayList<>()).add(prefixPath); - } - - List schemas = new ArrayList<>(); - // pull timeseries schema from every group involved - if (logger.isDebugEnabled()) { - logger.debug( - "{}: pulling schemas of {} and other {} paths from {} groups", - metaGroupMember.getName(), - prefixPaths.get(0), - prefixPaths.size() - 1, - partitionGroupPathMap.size()); - } - for (Map.Entry> partitionGroupListEntry : - partitionGroupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupListEntry.getKey(); - List paths = partitionGroupListEntry.getValue(); - pullMeasurementSchemas(partitionGroup, paths, schemas); - } - if (logger.isDebugEnabled()) { - logger.debug( - "{}: pulled {} schemas for {} and other {} paths", - metaGroupMember.getName(), - schemas.size(), - prefixPaths.get(0), - prefixPaths.size() - 1); - } - return schemas; - } - - /** - * Pull timeseries schemas of "prefixPaths" from "partitionGroup" and store them in "results". If - * this node is a member of "partitionGroup", synchronize with the group leader and collect local - * schemas. Otherwise pull schemas from one node in the group. - * - * @param partitionGroup - * @param prefixPaths - * @param results - */ - public void pullMeasurementSchemas( - PartitionGroup partitionGroup, - List prefixPaths, - List results) { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the node is in the target group, synchronize with leader should be enough - try { - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), "Pull timeseries of " + prefixPaths) - .syncLeader(null); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - int preSize = results.size(); - for (PartialPath prefixPath : prefixPaths) { - // IoTDB.schemaProcessor.collectMeasurementSchema(prefixPath, results); - } - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Pulled {} timeseries schemas of {} and other {} paths from local", - metaGroupMember.getName(), - results.size() - preSize, - prefixPaths.get(0), - prefixPaths.size() - 1); - } - return; - } - - // pull schemas from a remote node - PullSchemaRequest pullSchemaRequest = new PullSchemaRequest(); - pullSchemaRequest.setHeader(partitionGroup.getHeader()); - pullSchemaRequest.setPrefixPaths( - prefixPaths.stream().map(PartialPath::getFullPath).collect(Collectors.toList())); - - for (Node node : partitionGroup) { - if (pullMeasurementSchemas(node, pullSchemaRequest, results)) { - break; - } - } - } - - private boolean pullMeasurementSchemas( - Node node, PullSchemaRequest request, List results) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Pulling timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node); - } - - List schemas = null; - try { - schemas = pullMeasurementSchemas(node, request); - } catch (IOException | TException e) { - logger.error( - "{}: Cannot pull timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error( - "{}: Cannot pull timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - e); - } - - if (schemas != null) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Pulled {} timeseries schemas of {} and other {} paths from {} of {}", - metaGroupMember.getName(), - schemas.size(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - request.getHeader()); - } - results.addAll(schemas); - return true; - } - return false; - } - - private List pullMeasurementSchemas(Node node, PullSchemaRequest request) - throws IOException, TException, InterruptedException { - List schemas; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - schemas = SyncClientAdaptor.pullMeasurementSchema(client, request); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - // only need measurement name - PullSchemaResp pullSchemaResp = syncDataClient.pullMeasurementSchema(request); - ByteBuffer buffer = pullSchemaResp.schemaBytes; - int size = buffer.getInt(); - schemas = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - schemas.add( - buffer.get() == 0 - ? MeasurementSchema.partialDeserializeFrom(buffer) - : VectorMeasurementSchema.partialDeserializeFrom(buffer)); - } - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - - return schemas; - } - - /** - * Pull the all timeseries schemas of given prefixPaths from remote nodes. All prefixPaths must - * contain a storage group. The pulled schemas will be cache in CSchemaProcessor. - * - * @param ignoredGroup do not pull schema from the group to avoid backward dependency. If a user - * send an insert request before registering schemas, then this method may pull schemas from - * the same groups. If this method is called by an applier, it holds the lock of LogManager, - * while the pulling thread may want this lock too, resulting in a deadlock. - */ - public void pullTimeSeriesSchemas(List prefixPaths, RaftNode ignoredGroup) - throws MetadataException { - // Remove duplicated prefix paths to optimize - Set prefixPathSet = new HashSet<>(prefixPaths); - List uniquePrefixPaths = new ArrayList<>(prefixPathSet); - logger.debug( - "{}: Pulling timeseries schemas of {}, ignored group {}", - metaGroupMember.getName(), - uniquePrefixPaths, - ignoredGroup); - // split the paths by the data groups that should hold them - Map> partitionGroupPathMap = new HashMap<>(); - for (PartialPath prefixPath : uniquePrefixPaths) { - if (SQLConstant.RESERVED_TIME.equalsIgnoreCase(prefixPath.getFullPath())) { - continue; - } - PartitionGroup partitionGroup = - ClusterUtils.partitionByPathTimeWithSync(prefixPath, metaGroupMember); - if (!partitionGroup.getHeader().equals(ignoredGroup)) { - partitionGroupPathMap - .computeIfAbsent(partitionGroup, g -> new ArrayList<>()) - .add(prefixPath.getFullPath()); - } - } - - // pull timeseries schema from every group involved - if (logger.isDebugEnabled()) { - logger.debug( - "{}: pulling schemas of {} and other {} paths from {} groups", - metaGroupMember.getName(), - uniquePrefixPaths.get(0), - uniquePrefixPaths.size() - 1, - partitionGroupPathMap.size()); - } - for (Map.Entry> partitionGroupListEntry : - partitionGroupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupListEntry.getKey(); - List paths = partitionGroupListEntry.getValue(); - pullTimeSeriesSchemas(partitionGroup, paths, null); - } - } - - /** - * Pull timeseries schemas of "prefixPaths" from "partitionGroup". If this node is a member of - * "partitionGroup", synchronize with the group leader and collect local schemas. Otherwise pull - * schemas from one node in the group. If "timeseriesSchemas" is null, the pulled schemas will be - * cached in CSchemaProcessor. - */ - public void pullTimeSeriesSchemas( - PartitionGroup partitionGroup, - List prefixPaths, - List timeseriesSchemas) { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the node is in the target group, synchronize with leader should be enough - try { - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), "Pull timeseries of " + prefixPaths) - .syncLeader(null); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - return; - } - - // pull schemas from a remote node - PullSchemaRequest pullSchemaRequest = new PullSchemaRequest(); - pullSchemaRequest.setHeader(partitionGroup.getHeader()); - pullSchemaRequest.setPrefixPaths(prefixPaths); - - // decide the node access order with the help of QueryCoordinator - List nodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - for (Node node : nodes) { - if (tryPullTimeSeriesSchemas(node, pullSchemaRequest, timeseriesSchemas)) { - break; - } - } - } - - /** - * send the PullSchemaRequest to "node" and cache the results in CSchemaProcessor or add the - * results to "timeseriesSchemas" if they are successfully returned. - * - * @return true if the pull succeeded, false otherwise - */ - private boolean tryPullTimeSeriesSchemas( - Node node, PullSchemaRequest request, List timeseriesSchemas) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Pulling timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node); - } - - List schemas = null; - try { - schemas = pullTimeSeriesSchemas(node, request); - } catch (IOException | TException e) { - logger.error( - "{}: Cannot pull timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error( - "{}: Cannot pull timeseries schemas of {} and other {} paths from {}", - metaGroupMember.getName(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - e); - } - - if (schemas != null) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Pulled {} timeseries schemas of {} and other {} paths from {} of {}", - metaGroupMember.getName(), - schemas.size(), - request.getPrefixPaths().get(0), - request.getPrefixPaths().size() - 1, - node, - request.getHeader()); - } - if (timeseriesSchemas == null) { - for (TimeseriesSchema schema : schemas) { - SchemaUtils.cacheTimeseriesSchema(schema); - } - } else { - timeseriesSchemas.addAll(schemas); - } - return true; - } - return false; - } - - /** - * send a PullSchemaRequest to a node to pull TimeseriesSchemas, and return the pulled schema or - * null if there was a timeout. - */ - private List pullTimeSeriesSchemas(Node node, PullSchemaRequest request) - throws IOException, TException, InterruptedException { - List schemas; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - schemas = SyncClientAdaptor.pullTimeseriesSchema(client, request); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - PullSchemaResp pullSchemaResp = syncDataClient.pullTimeSeriesSchema(request); - ByteBuffer buffer = pullSchemaResp.schemaBytes; - int size = buffer.getInt(); - schemas = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - schemas.add(TimeseriesSchema.deserializeFrom(buffer)); - } - } catch (TException e) { - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - - return schemas; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeAdditionResult.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeAdditionResult.java deleted file mode 100644 index 95256b9d75f2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeAdditionResult.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition; - -import java.util.ArrayList; -import java.util.List; - -public class NodeAdditionResult { - - /** new data groups(multi raft) headed by the new node. */ - private final List newGroupList = new ArrayList<>(); - - public List getNewGroupList() { - return newGroupList; - } - - public void addNewGroup(PartitionGroup newGroup) { - this.newGroupList.add(newGroup); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeRemovalResult.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeRemovalResult.java deleted file mode 100644 index b5e3b2897ac7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/NodeRemovalResult.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -/** NodeRemovalResult stores the removed partition group. */ -public class NodeRemovalResult { - - private final List removedGroupList = new ArrayList<>(); - - public PartitionGroup getRemovedGroup(int raftId) { - for (PartitionGroup group : removedGroupList) { - if (group.getRaftId() == raftId) { - return group; - } - } - return null; - } - - public void addRemovedGroup(PartitionGroup group) { - this.removedGroupList.add(group); - } - - public void serialize(DataOutputStream dataOutputStream) throws IOException { - dataOutputStream.writeInt(removedGroupList.size()); - for (PartitionGroup group : removedGroupList) { - group.serialize(dataOutputStream); - } - } - - public void deserialize(ByteBuffer buffer) { - int removedGroupListSize = buffer.getInt(); - for (int i = 0; i < removedGroupListSize; i++) { - PartitionGroup group = new PartitionGroup(); - group.deserialize(buffer); - removedGroupList.add(group); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionGroup.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionGroup.java deleted file mode 100644 index bb2f1d9cb02a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionGroup.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Objects; - -/** - * PartitionGroup contains all the nodes of a data group. The first element of the list is called - * header. - */ -public class PartitionGroup extends ArrayList { - - private int raftId; - - public PartitionGroup() {} - - public PartitionGroup(Collection nodes) { - this.addAll(nodes); - } - - public PartitionGroup(int raftId, Node... nodes) { - this.addAll(Arrays.asList(nodes)); - this.raftId = raftId; - } - - public PartitionGroup(PartitionGroup other) { - super(other); - this.raftId = other.getRaftId(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - PartitionGroup group = (PartitionGroup) o; - return Objects.equals(raftId, group.getRaftId()) && super.equals(group); - } - - public void serialize(DataOutputStream dataOutputStream) throws IOException { - dataOutputStream.writeInt(getRaftId()); - dataOutputStream.writeInt(size()); - for (Node node : this) { - NodeSerializeUtils.serialize(node, dataOutputStream); - } - } - - public void deserialize(ByteBuffer buffer) { - raftId = buffer.getInt(); - int nodeNum = buffer.getInt(); - for (int i2 = 0; i2 < nodeNum; i2++) { - Node node = new Node(); - NodeSerializeUtils.deserialize(node, buffer); - add(node); - } - } - - @Override - public int hashCode() { - return Objects.hash(raftId, super.hashCode()); - } - - public RaftNode getHeader() { - return new RaftNode(get(0), getRaftId()); - } - - public int getRaftId() { - return raftId; - } - - public void setRaftId(int raftId) { - this.raftId = raftId; - } - - @Override - public String toString() { - return super.toString() + ", id = " + raftId; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionTable.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionTable.java deleted file mode 100644 index b6cdc9a51681..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/PartitionTable.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.service.IoTDB; - -import org.apache.commons.collections4.map.MultiKeyMap; - -import java.nio.ByteBuffer; -import java.util.List; - -/** - * PartitionTable manages the map whose key is the StorageGroupName with a time interval and the - * value is a PartitionGroup which contains all nodes that manage the corresponding data. - */ -public interface PartitionTable { - - /** - * Given the storageGroupName and the timestamp, return the list of nodes on which the storage - * group and the corresponding time interval is managed. - * - * @param storageGroupName - * @param timestamp - * @return - */ - PartitionGroup route(String storageGroupName, long timestamp); - - /** - * Given the storageGroupName and the timestamp, return the header RaftNode of the partitionGroup - * by which the storage group and the corresponding time interval is managed. - * - * @param storageGroupName - * @param timestamp - * @return - */ - RaftNode routeToHeaderByTime(String storageGroupName, long timestamp); - - /** - * Add a new node to update the partition table. - * - * @param node - */ - void addNode(Node node); - - NodeAdditionResult getNodeAdditionResult(Node node); - - /** - * Remove a node and update the partition table. - * - * @param node - */ - void removeNode(Node node); - - /** - * Get the result after remove node, include removedGroupList and newSlotOwners. - * - * @return result after remove node. - */ - NodeRemovalResult getNodeRemovalResult(); - - /** - * @return All data groups where all VNodes of this node is the header. The first index indicates - * the VNode and the second index indicates the data group of one VNode. - */ - List getLocalGroups(); - - /** - * @param headerNode from which node the partition starts - * @return the partition group starting from the header. - */ - PartitionGroup getPartitionGroup(RaftNode headerNode); - - ByteBuffer serialize(); - - /** - * Deserialize partition table and check whether the partition table in byte buffer is valid - * - * @param buffer - * @return true if the partition table is valid - */ - boolean deserialize(ByteBuffer buffer); - - List getAllNodes(); - - List getGlobalGroups(); - - List calculateGlobalGroups(List nodeRing); - - /** get the last meta log index that modifies the partition table */ - long getLastMetaLogIndex(); - - /** set the last meta log index that modifies the partition table */ - void setLastMetaLogIndex(long index); - - /** - * @param path can be an incomplete path (but should contain a storage group name) e.g., if - * "root.sg" is a storage group, then path can not be "root". - * @param timestamp - * @return - * @throws StorageGroupNotSetException - */ - default PartitionGroup partitionByPathTime(PartialPath path, long timestamp) - throws MetadataException { - PartialPath storageGroup = IoTDB.schemaProcessor.getBelongedStorageGroup(path); - return this.route(storageGroup.getFullPath(), timestamp); - } - - /** - * Get partition info by path and range time - * - * @return (startTime, endTime) - partitionGroup pair @UsedBy NodeTool - */ - default MultiKeyMap partitionByPathRangeTime( - PartialPath path, long startTime, long endTime) throws MetadataException { - long partitionInterval = StorageEngine.getTimePartitionInterval(); - - MultiKeyMap timeRangeMapRaftGroup = new MultiKeyMap<>(); - PartialPath storageGroup = IoTDB.schemaProcessor.getBelongedStorageGroup(path); - startTime = StorageEngine.convertMilliWithPrecision(startTime); - endTime = StorageEngine.convertMilliWithPrecision(endTime); - while (startTime <= endTime) { - long nextTime = (startTime / partitionInterval + 1) * partitionInterval; - timeRangeMapRaftGroup.put( - startTime, - Math.min(nextTime - 1, endTime), - this.route(storageGroup.getFullPath(), startTime)); - startTime = nextTime; - } - return timeRangeMapRaftGroup; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/DefaultSlotBalancer.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/DefaultSlotBalancer.java deleted file mode 100644 index 264312362317..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/DefaultSlotBalancer.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.balancer; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** This balancer aims to avg slots to all raft groups. */ -public class DefaultSlotBalancer implements SlotBalancer { - - private int multiRaftFactor = ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor(); - private SlotPartitionTable table; - - public DefaultSlotBalancer(SlotPartitionTable partitionTable) { - this.table = partitionTable; - } - - /** - * Move last slots from each group whose slot number is bigger than the new average to the new - * node. - */ - @Override - public void moveSlotsToNew(Node newNode, List oldRing) { - Map> nodeSlotMap = table.getAllNodeSlots(); - Map> previousNodeMap = table.getPreviousNodeMap(); - RaftNode[] slotNodes = table.getSlotNodes(); - - // as a node is added, the average slots for each node decrease - // move the slots to the new node if any previous node have more slots than the new average - int newAvg = table.getTotalSlotNumbers() / table.getAllNodes().size() / multiRaftFactor; - Map> newNodeSlotMap = new HashMap<>(); - int raftId = 0; - for (int i = 0; i < multiRaftFactor; i++) { - RaftNode raftNode = new RaftNode(newNode, i); - newNodeSlotMap.putIfAbsent(raftNode, new ArrayList<>()); - previousNodeMap.putIfAbsent(raftNode, new HashMap<>()); - } - for (Entry> entry : nodeSlotMap.entrySet()) { - List slots = entry.getValue(); - int transferNum = slots.size() - newAvg; - if (transferNum > 0) { - RaftNode curNode = new RaftNode(newNode, raftId); - int numToMove = transferNum; - if (raftId != multiRaftFactor - 1) { - numToMove = Math.min(numToMove, newAvg - newNodeSlotMap.get(curNode).size()); - } - List slotsToMove = - slots.subList(slots.size() - transferNum, slots.size() - transferNum + numToMove); - newNodeSlotMap.get(curNode).addAll(slotsToMove); - for (Integer slot : slotsToMove) { - // record what node previously hold the integer - previousNodeMap.get(curNode).put(slot, table.getPartitionGroup(entry.getKey(), oldRing)); - slotNodes[slot] = curNode; - } - slotsToMove.clear(); - transferNum -= numToMove; - if (transferNum > 0) { - curNode = new RaftNode(newNode, ++raftId); - slotsToMove = slots.subList(slots.size() - transferNum, slots.size()); - newNodeSlotMap.get(curNode).addAll(slotsToMove); - for (Integer slot : slotsToMove) { - // record what node previously hold the integer - previousNodeMap - .get(curNode) - .put(slot, table.getPartitionGroup(entry.getKey(), oldRing)); - slotNodes[slot] = curNode; - } - slotsToMove.clear(); - } - } - } - nodeSlotMap.putAll(newNodeSlotMap); - } - - @Override - public Map> retrieveSlots(Node target) { - Map> nodeSlotMap = table.getAllNodeSlots(); - RaftNode[] slotNodes = table.getSlotNodes(); - List nodeRing = table.getAllNodes(); - - Map> newHolderSlotMap = new HashMap<>(); - for (int raftId = 0; raftId < multiRaftFactor; raftId++) { - RaftNode raftNode = new RaftNode(target, raftId); - List slots = nodeSlotMap.remove(raftNode); - for (int i = 0; i < slots.size(); i++) { - int slot = slots.get(i); - RaftNode newHolder = new RaftNode(nodeRing.get(i % nodeRing.size()), raftId); - slotNodes[slot] = newHolder; - nodeSlotMap.computeIfAbsent(newHolder, n -> new ArrayList<>()).add(slot); - newHolderSlotMap.computeIfAbsent(newHolder, n -> new ArrayList<>()).add(slot); - } - } - return newHolderSlotMap; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/SlotBalancer.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/SlotBalancer.java deleted file mode 100644 index 20f7e4dfcbec..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/balancer/SlotBalancer.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.balancer; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; - -import java.util.List; -import java.util.Map; - -/** When add/remove a node, the slots need to be redistributed. */ -public interface SlotBalancer { - - /** - * When add a new node, new raft groups will take over some hash slots from another raft groups. - */ - void moveSlotsToNew(Node newNode, List oldRing); - - /** - * When remove an old node, all hash slots of the removed groups will assigned to other raft - * groups. - * - * @param target the node to be removed - */ - Map> retrieveSlots(Node target); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotManager.java deleted file mode 100644 index fb1928df53b3..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotManager.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; - -/** - * SlotManager manages the status of the slots involved during a data transfer (data slot ownership - * changes caused by node removals or additions) of a data group member. - */ -public class SlotManager { - - private static final Logger logger = LoggerFactory.getLogger(SlotManager.class); - private static final long SLOT_WAIT_INTERVAL_MS = 10; - private static final long SLOT_WAIT_THRESHOLD_MS = 2000; - private static final String SLOT_FILE_NAME = "SLOT_STATUS"; - - private String slotFilePath; - - /** the serial number of a slot -> the status and source of a slot */ - private Map idSlotMap; - - private String memberName; - - public SlotManager(long totalSlotNumber, String memberDir, String memberName) { - if (memberDir != null) { - this.slotFilePath = memberDir + File.separator + SLOT_FILE_NAME; - } - this.memberName = memberName; - if (!load()) { - init(totalSlotNumber); - } - } - - private void init(long totalSlotNumber) { - idSlotMap = new ConcurrentHashMap<>(); - for (int i = 0; i < totalSlotNumber; i++) { - idSlotMap.put(i, new SlotDescriptor(SlotStatus.NULL)); - } - } - - /** - * Wait until the status of the slot becomes NULL - * - * @param slotId - */ - public void waitSlot(int slotId) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - long startTime = System.currentTimeMillis(); - while (true) { - synchronized (slotDescriptor) { - if (slotDescriptor.slotStatus == SlotStatus.PULLING - || slotDescriptor.slotStatus == SlotStatus.PULLING_WRITABLE) { - try { - slotDescriptor.wait(SLOT_WAIT_INTERVAL_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for slot {}", slotId, e); - } - } else { - long cost = System.currentTimeMillis() - startTime; - if (cost > SLOT_WAIT_THRESHOLD_MS) { - logger.info("Wait slot {} cost {}ms", slotId, cost); - } - return; - } - } - } - } - - /** - * Wait until the status of the slot becomes NULL or PULLING_WRITABLE - * - * @param slotId - */ - public void waitSlotForWrite(int slotId) throws StorageEngineException { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - long startTime = System.currentTimeMillis(); - while (true) { - synchronized (slotDescriptor) { - if (slotDescriptor.slotStatus == SlotStatus.PULLING) { - try { - if ((System.currentTimeMillis() - startTime) >= SLOT_WAIT_THRESHOLD_MS) { - throw new StorageEngineException( - String.format("The status of slot %d is still PULLING after 5s.", slotId)); - } - slotDescriptor.wait(SLOT_WAIT_INTERVAL_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for slot {}", slotId, e); - } - } else { - return; - } - } - } - } - - /** If a slot in the status of PULLING or PULLING_WRITABLE, reads of it should merge the source */ - public boolean checkSlotInDataMigrationStatus(int slotId) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - return slotDescriptor.slotStatus == SlotStatus.PULLING - || slotDescriptor.slotStatus == SlotStatus.PULLING_WRITABLE; - } - - public boolean checkSlotInMetaMigrationStatus(int slotId) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - return slotDescriptor.slotStatus == SlotStatus.PULLING; - } - - /** - * @param slotId - * @return the SlotStatus of a slot - */ - public SlotStatus getStatus(int slotId) { - return idSlotMap.get(slotId).slotStatus; - } - - /** - * @param slotId - * @return the source of a slot if it is being pulled, or null if it is not being pulled - */ - public Node getSource(int slotId) { - return idSlotMap.get(slotId).source; - } - - /** - * Set the status of slot "slotId" to PULLING and its source to "source". - * - * @param slotId - * @param source - */ - public void setToPulling(int slotId, Node source) { - setToPulling(slotId, source, true); - } - - public void setToPulling(int slotId, Node source, boolean needSave) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - slotDescriptor.slotStatus = SlotStatus.PULLING; - slotDescriptor.source = source; - } - if (needSave) { - save(); - } - } - - /** - * Set the status of slot "slotId" to PULLING_WRITABLE. - * - * @param slotId - */ - public void setToPullingWritable(int slotId) { - setToPullingWritable(slotId, true); - } - - public void setToPullingWritable(int slotId, boolean needSave) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - slotDescriptor.slotStatus = SlotStatus.PULLING_WRITABLE; - slotDescriptor.notifyAll(); - } - if (needSave) { - save(); - } - } - - /** - * Set the status of slot "slotId" to NULL. - * - * @param slotId - */ - public void setToNull(int slotId) { - setToNull(slotId, true); - } - - public void setToNull(int slotId, boolean needSave) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - slotDescriptor.slotStatus = SlotStatus.NULL; - slotDescriptor.source = null; - slotDescriptor.notifyAll(); - } - if (needSave) { - save(); - } - } - - public void setToSending(int slotId) { - setToSending(slotId, true); - } - - public void setToSending(int slotId, boolean needSave) { - // only NULL slots can be set to SENDING - waitSlot(slotId); - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - slotDescriptor.slotStatus = SlotStatus.SENDING; - slotDescriptor.snapshotReceivedCount = 0; - } - if (needSave) { - save(); - } - } - - private void setToSent(int slotId) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - slotDescriptor.slotStatus = SlotStatus.SENT; - } - } - - /** - * If a slot is in LOSING status and one member of the remote group has pulled snapshot, the - * method should be called so eventually we can clear data of the slot. - * - * @param slotId - * @return how many members in the remote group has received the snapshot (including this - * invocation). - */ - public int sentOneReplication(int slotId) { - return sentOneReplication(slotId, true); - } - - public int sentOneReplication(int slotId, boolean needSave) { - SlotDescriptor slotDescriptor = idSlotMap.get(slotId); - synchronized (slotDescriptor) { - int sentReplicaNum = ++slotDescriptor.snapshotReceivedCount; - if (sentReplicaNum >= ClusterDescriptor.getInstance().getConfig().getReplicationNum()) { - setToSent(slotId); - } - if (needSave) { - save(); - } - return sentReplicaNum; - } - } - - private boolean load() { - if (slotFilePath == null) { - return false; - } - File slotFile = new File(slotFilePath); - if (!slotFile.exists()) { - return false; - } - - try (FileInputStream fileInputStream = new FileInputStream(slotFile); - BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { - byte[] bytes = new byte[(int) slotFile.length()]; - int read = bufferedInputStream.read(bytes); - if (read != slotFile.length() && logger.isWarnEnabled()) { - logger.warn( - "SlotManager in {} read size does not equal to file size: {}/{}", - slotFilePath, - read, - slotFile.length()); - } - deserialize(ByteBuffer.wrap(bytes)); - return true; - } catch (Exception e) { - logger.warn("Cannot deserialize slotManager from {}", slotFilePath, e); - return false; - } - } - - public synchronized void save() { - if (slotFilePath == null) { - return; - } - File slotFile = new File(slotFilePath); - if (!slotFile.getParentFile().exists() && !slotFile.getParentFile().mkdirs()) { - logger.warn("Cannot mkdirs for {}", slotFile); - } - try (FileOutputStream outputStream = new FileOutputStream(slotFilePath); - BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outputStream); - DataOutputStream dataOutputStream = new DataOutputStream(bufferedOutputStream)) { - serialize(dataOutputStream); - } catch (IOException e) { - logger.warn("SlotManager in {} cannot be saved", slotFilePath, e); - } - } - - public int getSlotNumInDataMigration() { - int res = 0; - for (Entry entry : idSlotMap.entrySet()) { - SlotDescriptor descriptor = entry.getValue(); - if (descriptor.slotStatus == SlotStatus.PULLING - || descriptor.slotStatus == SlotStatus.PULLING_WRITABLE) { - logger.info( - "{}: slot {} is in data migration, status is {}", - memberName, - entry.getKey(), - descriptor.slotStatus); - res++; - } - } - return res; - } - - private void serialize(DataOutputStream outputStream) throws IOException { - outputStream.writeInt(idSlotMap.size()); - for (Entry integerSlotDescriptorEntry : idSlotMap.entrySet()) { - outputStream.writeInt(integerSlotDescriptorEntry.getKey()); - integerSlotDescriptorEntry.getValue().serialize(outputStream); - } - } - - private void deserialize(ByteBuffer buffer) { - int slotNum = buffer.getInt(); - idSlotMap = new ConcurrentHashMap<>(slotNum); - for (int i = 0; i < slotNum; i++) { - int slotId = buffer.getInt(); - SlotDescriptor descriptor = SlotDescriptor.deserialize(buffer); - idSlotMap.put(slotId, descriptor); - } - } - - public enum SlotStatus { - // the slot has pulled data or does not belong to this member - NULL, - // the slot is pulling data and writes into it should be blocked and reads of it should merge - // the source - PULLING, - // the slot is pulling data and reads of it should merge the source - PULLING_WRITABLE, - // the slot is allocated to another group but that group has not pulled data - SENDING, - // the new owner of the slot has pulled data, and the local data can be removed - SENT - } - - private static class SlotDescriptor { - private volatile SlotStatus slotStatus; - private Node source; - // in LOSING status, how many members in the new owner have pulled data - private volatile int snapshotReceivedCount; - - SlotDescriptor() {} - - SlotDescriptor(SlotStatus slotStatus) { - this.slotStatus = slotStatus; - } - - private void serialize(DataOutputStream outputStream) throws IOException { - outputStream.writeInt(slotStatus.ordinal()); - if (slotStatus == SlotStatus.PULLING || slotStatus == SlotStatus.PULLING_WRITABLE) { - NodeSerializeUtils.serialize(source, outputStream); - } else if (slotStatus == SlotStatus.SENDING) { - outputStream.writeInt(snapshotReceivedCount); - } - } - - private static SlotDescriptor deserialize(ByteBuffer buffer) { - SlotDescriptor descriptor = new SlotDescriptor(); - descriptor.slotStatus = SlotStatus.values()[buffer.getInt()]; - if (descriptor.slotStatus == SlotStatus.PULLING - || descriptor.slotStatus == SlotStatus.PULLING_WRITABLE) { - descriptor.source = new Node(); - NodeSerializeUtils.deserialize(descriptor.source, buffer); - } else if (descriptor.slotStatus == SlotStatus.SENDING) { - descriptor.snapshotReceivedCount = buffer.getInt(); - } - return descriptor; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeAdditionResult.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeAdditionResult.java deleted file mode 100644 index 139aed40e191..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeAdditionResult.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; - -import java.util.Map; -import java.util.Set; - -public class SlotNodeAdditionResult extends NodeAdditionResult { - - /** which slots will the old data groups transfer to the new one. */ - private Map> lostSlots; - - public Map> getLostSlots() { - return lostSlots; - } - - public void setLostSlots(Map> lostSlots) { - this.lostSlots = lostSlots; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeRemovalResult.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeRemovalResult.java deleted file mode 100644 index 783208df2652..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotNodeRemovalResult.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** SlotNodeRemovalResult stores the removed partition group and who will take over its slots. */ -public class SlotNodeRemovalResult extends NodeRemovalResult { - - private Map> newSlotOwners = new HashMap<>(); - - public Map> getNewSlotOwners() { - return newSlotOwners; - } - - public void addNewSlotOwners(Map> newSlotOwners) { - this.newSlotOwners = newSlotOwners; - } - - @Override - public void serialize(DataOutputStream dataOutputStream) throws IOException { - super.serialize(dataOutputStream); - dataOutputStream.writeInt(newSlotOwners.size()); - for (Map.Entry> entry : newSlotOwners.entrySet()) { - RaftNode raftNode = entry.getKey(); - NodeSerializeUtils.serialize(raftNode.getNode(), dataOutputStream); - dataOutputStream.writeInt(raftNode.getRaftId()); - dataOutputStream.writeInt(entry.getValue().size()); - for (Integer slot : entry.getValue()) { - dataOutputStream.writeInt(slot); - } - } - } - - @Override - public void deserialize(ByteBuffer buffer) { - super.deserialize(buffer); - int size = buffer.getInt(); - for (int i = 0; i < size; i++) { - Node node = new Node(); - NodeSerializeUtils.deserialize(node, buffer); - RaftNode raftNode = new RaftNode(node, buffer.getInt()); - List slots = new ArrayList<>(); - int slotSize = buffer.getInt(); - for (int j = 0; j < slotSize; j++) { - slots.add(buffer.getInt()); - } - newSlotOwners.put(raftNode, slots); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java deleted file mode 100644 index 2caf6302c931..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotPartitionTable.java +++ /dev/null @@ -1,611 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.balancer.DefaultSlotBalancer; -import org.apache.iotdb.cluster.partition.balancer.SlotBalancer; -import org.apache.iotdb.cluster.partition.slot.SlotStrategy.DefaultStrategy; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.utils.NodeSerializeUtils; -import org.apache.iotdb.commons.utils.SerializeUtils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -/** - * SlotPartitionTable manages the slots (data partition) of each node using a look-up table. Slot: - * 1,2,3... - */ -@SuppressWarnings("DuplicatedCode") // Using SerializeUtils causes unknown thread crush -public class SlotPartitionTable implements PartitionTable { - - private static final Logger logger = LoggerFactory.getLogger(SlotPartitionTable.class); - private static SlotStrategy slotStrategy = new DefaultStrategy(); - - private final int replicationNum = - ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - - private final int multiRaftFactor = - ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor(); - - // all nodes - private List nodeRing = new ArrayList<>(); - // normally, it is equal to ClusterConstant.SLOT_NUM. - private int totalSlotNumbers; - - // The following fields are used for determining which node a data item belongs to. - // the slots held by each node - private Map> nodeSlotMap = new ConcurrentHashMap<>(); - // each slot is managed by whom - private RaftNode[] slotNodes = new RaftNode[ClusterConstant.SLOT_NUM]; - // the nodes that each slot belongs to before a new node is added, used for the new node to - // find the data source - private Map> previousNodeMap = new ConcurrentHashMap<>(); - - private NodeRemovalResult nodeRemovalResult = new SlotNodeRemovalResult(); - - // the filed is used for determining which nodes need to be a group. - // the data groups which this node belongs to. - private List localGroups; - - private Node thisNode; - - private List globalGroups; - - // the last meta log index that modifies the partition table - private volatile long lastMetaLogIndex = -1; - - private SlotBalancer slotBalancer = new DefaultSlotBalancer(this); - - /** - * only used for deserialize. - * - * @param thisNode - */ - public SlotPartitionTable(Node thisNode) { - this.thisNode = thisNode; - } - - public SlotPartitionTable(SlotPartitionTable other) { - this.thisNode = other.thisNode; - this.totalSlotNumbers = other.totalSlotNumbers; - this.lastMetaLogIndex = other.lastMetaLogIndex; - this.nodeRing = new ArrayList<>(other.nodeRing); - this.nodeSlotMap = new HashMap<>(other.nodeSlotMap); - this.slotNodes = new RaftNode[totalSlotNumbers]; - System.arraycopy(other.slotNodes, 0, this.slotNodes, 0, totalSlotNumbers); - this.previousNodeMap = new HashMap<>(previousNodeMap); - - localGroups = getPartitionGroups(thisNode); - } - - public SlotPartitionTable(Collection nodes, Node thisNode) { - this(nodes, thisNode, ClusterConstant.SLOT_NUM); - } - - private SlotPartitionTable(Collection nodes, Node thisNode, int totalSlotNumbers) { - this.thisNode = thisNode; - this.totalSlotNumbers = totalSlotNumbers; - init(nodes); - } - - public static SlotStrategy getSlotStrategy() { - return slotStrategy; - } - - public static void setSlotStrategy(SlotStrategy slotStrategy) { - SlotPartitionTable.slotStrategy = slotStrategy; - } - - public SlotBalancer getLoadBalancer() { - return slotBalancer; - } - - public void setLoadBalancer(SlotBalancer slotBalancer) { - this.slotBalancer = slotBalancer; - } - - private void init(Collection nodes) { - logger.info("Initializing a new partition table"); - nodeRing.addAll(nodes); - Collections.sort(nodeRing); - localGroups = getPartitionGroups(thisNode); - assignPartitions(); - } - - private void assignPartitions() { - // evenly assign the slots to each node - int nodeNum = nodeRing.size(); - int slotsPerNode = totalSlotNumbers / nodeNum; - int slotsPerRaftGroup = slotsPerNode / multiRaftFactor; - for (Node node : nodeRing) { - for (int i = 0; i < multiRaftFactor; i++) { - nodeSlotMap.put(new RaftNode(node, i), new ArrayList<>()); - } - } - - for (int i = 0; i < totalSlotNumbers; i++) { - int nodeIdx = i / slotsPerNode; - int raftId = i % slotsPerNode / slotsPerRaftGroup; - if (nodeIdx >= nodeNum) { - // the last node may receive a little more if total slots cannot de divided by node number - nodeIdx--; - } - if (raftId >= multiRaftFactor) { - raftId--; - } - nodeSlotMap.get(new RaftNode(nodeRing.get(nodeIdx), raftId)).add(i); - } - - // build the index to find a node by slot - for (Entry> entry : nodeSlotMap.entrySet()) { - for (Integer slot : entry.getValue()) { - slotNodes[slot] = entry.getKey(); - } - } - } - - // find replicationNum groups that a node is in - private List getPartitionGroups(Node node) { - List ret = new ArrayList<>(); - - int nodeIndex = nodeRing.indexOf(node); - if (nodeIndex == -1) { - logger.info("PartitionGroups is empty due to this node has been removed from the cluster!"); - return ret; - } - for (int i = 0; i < replicationNum; i++) { - // the previous replicationNum nodes (including the node itself) are the headers of the - // groups the node is in - int startIndex = nodeIndex - i; - if (startIndex < 0) { - startIndex = startIndex + nodeRing.size(); - } - for (int j = 0; j < multiRaftFactor; j++) { - ret.add(getPartitionGroup(new RaftNode(nodeRing.get(startIndex), j))); - } - } - - logger.debug("The partition groups of {} are: {}", node, ret); - return ret; - } - - public PartitionGroup getPartitionGroup(RaftNode header, List nodeRing) { - PartitionGroup ret = new PartitionGroup(header.getRaftId()); - - // assuming the nodes are [1,2,3,4,5] - int nodeIndex = nodeRing.indexOf(header.getNode()); - if (nodeIndex == -1) { - logger.warn("Node {} is not in the cluster", header.getNode()); - return null; - } - int endIndex = nodeIndex + replicationNum; - if (endIndex > nodeRing.size()) { - // for startIndex = 4 and replicationNum = 3, we concat [4, 5] and [1] to generate the group - ret.addAll(nodeRing.subList(nodeIndex, nodeRing.size())); - ret.addAll(nodeRing.subList(0, endIndex - nodeRing.size())); - } else { - // for startIndex = 2 and replicationNum = 3, [2,3,4] is the group - ret.addAll(nodeRing.subList(nodeIndex, endIndex)); - } - return ret; - } - - @Override - public PartitionGroup getPartitionGroup(RaftNode headerNode) { - return getPartitionGroup(headerNode, this.nodeRing); - } - - @Override - public PartitionGroup route(String storageGroupName, long timestamp) { - synchronized (nodeRing) { - RaftNode raftNode = routeToHeaderByTime(storageGroupName, timestamp); - return getPartitionGroup(raftNode); - } - } - - public PartitionGroup route(int slot) { - if (slot >= slotNodes.length || slot < 0) { - logger.warn( - "Invalid slot to route: {}, stack trace: {}", - slot, - Thread.currentThread().getStackTrace()); - return null; - } - RaftNode raftNode = slotNodes[slot]; - logger.debug("The slot of {} is held by {}", slot, raftNode); - if (raftNode.getNode() == null) { - logger.warn("The slot {} is incorrect", slot); - return null; - } - return getPartitionGroup(raftNode); - } - - @Override - public RaftNode routeToHeaderByTime(String storageGroupName, long timestamp) { - synchronized (nodeRing) { - int slot = - getSlotStrategy().calculateSlotByTime(storageGroupName, timestamp, getTotalSlotNumbers()); - RaftNode raftNode = slotNodes[slot]; - logger.trace( - "The slot of {}@{} is {}, held by {}", storageGroupName, timestamp, slot, raftNode); - return raftNode; - } - } - - @Override - public void addNode(Node node) { - List oldRing; - synchronized (nodeRing) { - if (nodeRing.contains(node)) { - return; - } - - oldRing = new ArrayList<>(nodeRing); - nodeRing.add(node); - nodeRing.sort(Comparator.comparingInt(Node::getNodeIdentifier)); - - List retiredGroups = new ArrayList<>(); - for (int i = 0; i < localGroups.size(); i++) { - PartitionGroup oldGroup = localGroups.get(i); - RaftNode header = oldGroup.getHeader(); - PartitionGroup newGrp = getPartitionGroup(header); - if (newGrp.contains(node) && newGrp.contains(thisNode)) { - // this group changes but still contains the local node - localGroups.set(i, newGrp); - } else if (newGrp.contains(node) && !newGrp.contains(thisNode)) { - // the local node retires from the group - retiredGroups.add(newGrp); - } - } - - // remove retired groups - Iterator groupIterator = localGroups.iterator(); - while (groupIterator.hasNext()) { - PartitionGroup partitionGroup = groupIterator.next(); - for (PartitionGroup retiredGroup : retiredGroups) { - if (retiredGroup.getHeader().equals(partitionGroup.getHeader()) - && retiredGroup.getRaftId() == partitionGroup.getRaftId()) { - groupIterator.remove(); - break; - } - } - } - } - - for (int raftId = 0; raftId < multiRaftFactor; raftId++) { - PartitionGroup newGroup = getPartitionGroup(new RaftNode(node, raftId)); - if (newGroup.contains(thisNode)) { - localGroups.add(newGroup); - } - } - - globalGroups = calculateGlobalGroups(nodeRing); - - // the slots movement is only done logically, the new node itself will pull data from the - // old node - slotBalancer.moveSlotsToNew(node, oldRing); - this.nodeRemovalResult = new SlotNodeRemovalResult(); - } - - @Override - public NodeAdditionResult getNodeAdditionResult(Node node) { - SlotNodeAdditionResult result = new SlotNodeAdditionResult(); - Map> lostSlotsMap = new HashMap<>(); - for (int raftId = 0; raftId < multiRaftFactor; raftId++) { - RaftNode raftNode = new RaftNode(node, raftId); - result.addNewGroup(getPartitionGroup(raftNode)); - for (Entry entry : previousNodeMap.get(raftNode).entrySet()) { - RaftNode header = entry.getValue().getHeader(); - lostSlotsMap.computeIfAbsent(header, k -> new HashSet<>()).add(entry.getKey()); - } - } - result.setLostSlots(lostSlotsMap); - return result; - } - - @Override - public List getLocalGroups() { - return localGroups; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(4096); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream); - - try { - dataOutputStream.writeLong(lastMetaLogIndex); - dataOutputStream.writeInt(totalSlotNumbers); - dataOutputStream.writeInt(nodeSlotMap.size()); - for (Entry> entry : nodeSlotMap.entrySet()) { - NodeSerializeUtils.serialize(entry.getKey().getNode(), dataOutputStream); - dataOutputStream.writeInt(entry.getKey().getRaftId()); - SerializeUtils.serializeIntList(entry.getValue(), dataOutputStream); - } - - dataOutputStream.writeInt(previousNodeMap.size()); - for (Entry> nodeMapEntry : - previousNodeMap.entrySet()) { - NodeSerializeUtils.serialize(nodeMapEntry.getKey().getNode(), dataOutputStream); - dataOutputStream.writeInt(nodeMapEntry.getKey().getRaftId()); - Map prevHolders = nodeMapEntry.getValue(); - dataOutputStream.writeInt(prevHolders.size()); - for (Entry integerNodeEntry : prevHolders.entrySet()) { - integerNodeEntry.getValue().serialize(dataOutputStream); - dataOutputStream.writeInt(integerNodeEntry.getKey()); - } - } - - nodeRemovalResult.serialize(dataOutputStream); - } catch (IOException ignored) { - // not reachable - } - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - @Override - public synchronized boolean deserialize(ByteBuffer buffer) { - long newLastLogIndex = buffer.getLong(); - - if (logger.isDebugEnabled()) { - logger.debug( - "Partition table: lastMetaLogIndex {}, newLastLogIndex {}", - lastMetaLogIndex, - newLastLogIndex); - } - // judge whether the partition table of byte buffer is out of date - if (lastMetaLogIndex != -1 && lastMetaLogIndex >= newLastLogIndex) { - return lastMetaLogIndex == newLastLogIndex; - } - lastMetaLogIndex = newLastLogIndex; - logger.info("Initializing the partition table from buffer"); - totalSlotNumbers = buffer.getInt(); - int size = buffer.getInt(); - nodeSlotMap = new HashMap<>(); - Node node; - for (int i = 0; i < size; i++) { - node = new Node(); - NodeSerializeUtils.deserialize(node, buffer); - RaftNode raftNode = new RaftNode(node, buffer.getInt()); - List slots = new ArrayList<>(); - SerializeUtils.deserializeIntList(slots, buffer); - nodeSlotMap.put(raftNode, slots); - for (Integer slot : slots) { - slotNodes[slot] = raftNode; - } - } - - int prevNodeMapSize = buffer.getInt(); - previousNodeMap = new HashMap<>(); - for (int i = 0; i < prevNodeMapSize; i++) { - node = new Node(); - NodeSerializeUtils.deserialize(node, buffer); - RaftNode raftNode = new RaftNode(node, buffer.getInt()); - - Map prevHolders = new HashMap<>(); - int holderNum = buffer.getInt(); - for (int i1 = 0; i1 < holderNum; i1++) { - PartitionGroup group = new PartitionGroup(); - group.deserialize(buffer); - prevHolders.put(buffer.getInt(), group); - } - previousNodeMap.put(raftNode, prevHolders); - } - - nodeRemovalResult = new SlotNodeRemovalResult(); - nodeRemovalResult.deserialize(buffer); - - nodeRing.clear(); - for (RaftNode raftNode : nodeSlotMap.keySet()) { - if (!nodeRing.contains(raftNode.getNode())) { - nodeRing.add(raftNode.getNode()); - } - } - Collections.sort(nodeRing); - logger.info("All known nodes: {}", nodeRing); - - localGroups = getPartitionGroups(thisNode); - return true; - } - - @Override - public List getAllNodes() { - return nodeRing; - } - - public Map> getPreviousNodeMap() { - return previousNodeMap; - } - - public Map getPreviousNodeMap(RaftNode raftNode) { - return previousNodeMap.get(raftNode); - } - - public List getNodeSlots(RaftNode header) { - return nodeSlotMap.get(header); - } - - public Map> getAllNodeSlots() { - return nodeSlotMap; - } - - public int getTotalSlotNumbers() { - return totalSlotNumbers; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SlotPartitionTable that = (SlotPartitionTable) o; - return totalSlotNumbers == that.totalSlotNumbers - && Objects.equals(nodeRing, that.nodeRing) - && Objects.equals(nodeSlotMap, that.nodeSlotMap) - && Arrays.equals(slotNodes, that.slotNodes) - && Objects.equals(previousNodeMap, that.previousNodeMap) - && lastMetaLogIndex == that.lastMetaLogIndex; - } - - @Override - public int hashCode() { - return 0; - } - - @Override - public void removeNode(Node target) { - synchronized (nodeRing) { - if (!nodeRing.contains(target)) { - return; - } - - SlotNodeRemovalResult result = new SlotNodeRemovalResult(); - for (int raftId = 0; raftId < multiRaftFactor; raftId++) { - result.addRemovedGroup(getPartitionGroup(new RaftNode(target, raftId))); - } - nodeRing.remove(target); - - // if the node belongs to a group that headed by target, this group should be removed - // and other groups containing target should be updated - List removedGroupIdxs = new ArrayList<>(); - for (int i = 0; i < localGroups.size(); i++) { - PartitionGroup oldGroup = localGroups.get(i); - RaftNode header = oldGroup.getHeader(); - if (header.getNode().equals(target)) { - removedGroupIdxs.add(i); - } else { - PartitionGroup newGrp = getPartitionGroup(header); - localGroups.set(i, newGrp); - } - } - for (int i = removedGroupIdxs.size() - 1; i >= 0; i--) { - int removedGroupIdx = removedGroupIdxs.get(i); - int raftId = localGroups.get(removedGroupIdx).getRaftId(); - localGroups.remove(removedGroupIdx); - // each node exactly joins replicationNum groups, so when a group is removed, the node - // should join a new one - int thisNodeIdx = nodeRing.indexOf(thisNode); - - // check if this node is to be removed - if (thisNodeIdx == -1) { - continue; - } - - // this node must be the last node of the new group - int headerNodeIdx = thisNodeIdx - (replicationNum - 1); - headerNodeIdx = headerNodeIdx < 0 ? headerNodeIdx + nodeRing.size() : headerNodeIdx; - Node header = nodeRing.get(headerNodeIdx); - PartitionGroup newGrp = getPartitionGroup(new RaftNode(header, raftId)); - localGroups.add(newGrp); - } - - globalGroups = calculateGlobalGroups(nodeRing); - - // the slots movement is only done logically, the new node itself will pull data from the - // old node - Map> raftNodeListMap = slotBalancer.retrieveSlots(target); - result.addNewSlotOwners(raftNodeListMap); - this.nodeRemovalResult = result; - } - } - - @Override - public NodeRemovalResult getNodeRemovalResult() { - return nodeRemovalResult; - } - - @Override - public List getGlobalGroups() { - // preventing a thread from getting incomplete globalGroups - synchronized (nodeRing) { - if (globalGroups == null) { - globalGroups = calculateGlobalGroups(nodeRing); - } - return globalGroups; - } - } - - /** - * Judge whether the data of slot is held by node - * - * @param node target node - */ - public boolean judgeHoldSlot(Node node, int slot) { - return getPartitionGroup(slotNodes[slot]).contains(node); - } - - @Override - public List calculateGlobalGroups(List nodeRing) { - List result = new ArrayList<>(); - for (Node node : nodeRing) { - for (int i = 0; i < multiRaftFactor; i++) { - result.add(getPartitionGroup(new RaftNode(node, i), nodeRing)); - } - } - return result; - } - - @Override - public long getLastMetaLogIndex() { - return lastMetaLogIndex; - } - - @Override - public void setLastMetaLogIndex(long lastMetaLogIndex) { - if (logger.isDebugEnabled()) { - logger.debug("Set last meta log index of partition table to {}", lastMetaLogIndex); - } - this.lastMetaLogIndex = Math.max(this.lastMetaLogIndex, lastMetaLogIndex); - } - - public RaftNode[] getSlotNodes() { - return slotNodes; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotStrategy.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotStrategy.java deleted file mode 100644 index 00680b9faf36..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotStrategy.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.tsfile.utils.Murmur128Hash; - -import static org.apache.iotdb.cluster.config.ClusterConstant.HASH_SALT; - -/** SlotStrategy determines which slot a {sg, timestamp} or {sg, partition} belongs. */ -public interface SlotStrategy { - int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum); - - int calculateSlotByPartitionNum(String storageGroupName, long partitionId, int maxSlotNum); - - class DefaultStrategy implements SlotStrategy { - - @Override - public int calculateSlotByTime(String storageGroupName, long timestamp, int maxSlotNum) { - long partitionNum = StorageEngine.getTimePartition(timestamp); - return calculateSlotByPartitionNum(storageGroupName, partitionNum, maxSlotNum); - } - - @Override - public int calculateSlotByPartitionNum( - String storageGroupName, long partitionId, int maxSlotNum) { - int hash = Murmur128Hash.hash(storageGroupName, partitionId, HASH_SALT); - return Math.abs(hash % maxSlotNum); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotTimePartitionFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotTimePartitionFilter.java deleted file mode 100644 index 5bd42bb8fb64..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/partition/slot/SlotTimePartitionFilter.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.partition.slot; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.db.engine.storagegroup.DataRegion.TimePartitionFilter; - -import java.util.Objects; -import java.util.Set; - -/** Used for checking if a data partition belongs to the slotSet */ -public class SlotTimePartitionFilter implements TimePartitionFilter { - - private final Set slotSet; - - public SlotTimePartitionFilter(Set slotSet) { - this.slotSet = slotSet; - } - - @Override - public boolean satisfy(String storageGroupName, long timePartitionId) { - int slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum( - storageGroupName, timePartitionId, ClusterConstant.SLOT_NUM); - return slotSet.contains(slot); - } - - @Override - public boolean equals(Object obj) { - return obj instanceof SlotTimePartitionFilter - && Objects.equals(((SlotTimePartitionFilter) obj).slotSet, slotSet); - } - - @Override - public int hashCode() { - return Objects.hash(slotSet); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java deleted file mode 100644 index 95f1e8f3c246..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutor.java +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.query.reader.ClusterTimeGenerator; -import org.apache.iotdb.cluster.query.reader.mult.AbstractMultPointReader; -import org.apache.iotdb.cluster.query.reader.mult.AssignPathManagedMergeReader; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.RawQueryDataSetWithoutValueFilter; -import org.apache.iotdb.db.query.executor.RawDataQueryExecutor; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import com.google.common.collect.Lists; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -public class ClusterDataQueryExecutor extends RawDataQueryExecutor { - - private static final Logger logger = LoggerFactory.getLogger(ClusterDataQueryExecutor.class); - private MetaGroupMember metaGroupMember; - private ClusterReaderFactory readerFactory; - private QueryDataSet.EndPoint endPoint = null; - private boolean hasLocalReader = false; - - ClusterDataQueryExecutor(RawDataQueryPlan plan, MetaGroupMember metaGroupMember) { - super(plan); - this.metaGroupMember = metaGroupMember; - this.readerFactory = new ClusterReaderFactory(metaGroupMember); - } - - /** - * use mult batch query for without value filter - * - * @param context query context - * @return query data set - * @throws StorageEngineException - */ - @Override - public QueryDataSet executeWithoutValueFilter(QueryContext context) - throws StorageEngineException { - QueryDataSet dataSet = needRedirect(context, false); - if (dataSet != null) { - return dataSet; - } - try { - List readersOfSelectedSeries = initMultSeriesReader(context); - return new RawQueryDataSetWithoutValueFilter( - context.getQueryId(), queryPlan, readersOfSelectedSeries); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new StorageEngineException(e.getMessage()); - } catch (IOException | EmptyIntervalException | QueryProcessException e) { - throw new StorageEngineException(e.getMessage()); - } - } - - private List initMultSeriesReader(QueryContext context) - throws StorageEngineException, IOException, EmptyIntervalException, QueryProcessException { - Filter timeFilter = null; - if (queryPlan.getExpression() != null) { - timeFilter = ((GlobalTimeExpression) queryPlan.getExpression()).getFilter(); - } - - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - List readersOfSelectedSeries = Lists.newArrayList(); - List multPointReaders = Lists.newArrayList(); - - multPointReaders = - readerFactory.getMultSeriesReader( - queryPlan.getDeduplicatedPaths(), - queryPlan.getDeviceToMeasurements(), - queryPlan.getDeduplicatedDataTypes(), - timeFilter, - null, - context, - queryPlan.isAscending()); - - // combine reader of different partition group of the same path - // into a MultManagedMergeReader - for (int i = 0; i < queryPlan.getDeduplicatedPaths().size(); i++) { - PartialPath partialPath = queryPlan.getDeduplicatedPaths().get(i); - TSDataType dataType = queryPlan.getDeduplicatedDataTypes().get(i); - String fullPath = partialPath.getFullPath(); - AssignPathManagedMergeReader assignPathManagedMergeReader = - new AssignPathManagedMergeReader(fullPath, dataType, queryPlan.isAscending()); - for (AbstractMultPointReader multPointReader : multPointReaders) { - if (multPointReader.getAllPaths().contains(fullPath)) { - assignPathManagedMergeReader.addReader(multPointReader, 0); - } - } - readersOfSelectedSeries.add(assignPathManagedMergeReader); - } - if (logger.isDebugEnabled()) { - logger.debug("Initialized {} readers for {}", readersOfSelectedSeries.size(), queryPlan); - } - return readersOfSelectedSeries; - } - - @Override - protected List initManagedSeriesReader(QueryContext context) - throws StorageEngineException { - Filter timeFilter = null; - if (queryPlan.getExpression() != null) { - timeFilter = ((GlobalTimeExpression) queryPlan.getExpression()).getFilter(); - } - - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - - List readersOfSelectedSeries = new ArrayList<>(); - hasLocalReader = false; - for (int i = 0; i < queryPlan.getDeduplicatedPaths().size(); i++) { - PartialPath path = queryPlan.getDeduplicatedPaths().get(i); - TSDataType dataType = queryPlan.getDeduplicatedDataTypes().get(i); - - ManagedSeriesReader reader; - try { - reader = - readerFactory.getSeriesReader( - path, - queryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - timeFilter, - null, - context, - queryPlan.isAscending()); - } catch (EmptyIntervalException e) { - logger.info(e.getMessage()); - return Collections.emptyList(); - } - - readersOfSelectedSeries.add(reader); - } - if (logger.isDebugEnabled()) { - logger.debug("Initialized {} readers for {}", readersOfSelectedSeries.size(), queryPlan); - } - - return readersOfSelectedSeries; - } - - @Override - protected IReaderByTimestamp getReaderByTimestamp( - PartialPath path, Set deviceMeasurements, TSDataType dataType, QueryContext context) - throws StorageEngineException, QueryProcessException { - return readerFactory.getReaderByTimestamp( - path, deviceMeasurements, dataType, context, queryPlan.isAscending(), null); - } - - @Override - protected TimeGenerator getTimeGenerator(QueryContext context, RawDataQueryPlan rawDataQueryPlan) - throws StorageEngineException { - return new ClusterTimeGenerator(context, metaGroupMember, rawDataQueryPlan, false); - } - - @Override - protected QueryDataSet needRedirect(QueryContext context, boolean hasValueFilter) - throws StorageEngineException { - if (queryPlan.isEnableRedirect()) { - if (hasValueFilter) { - // 1. check time Generator has local data - ClusterTimeGenerator clusterTimeGenerator = - new ClusterTimeGenerator(context, metaGroupMember, queryPlan, true); - if (clusterTimeGenerator.isHasLocalReader()) { - this.hasLocalReader = true; - this.endPoint = null; - } - - // 2. check data reader has local data - checkReaderHasLocalData(context, true); - } else { - // check data reader has local data - checkReaderHasLocalData(context, false); - } - - logger.debug( - "redirect queryId {}, {}, {}, {}", - context.getQueryId(), - hasLocalReader, - hasValueFilter, - endPoint); - - if (!hasLocalReader) { - // dummy dataSet - QueryDataSet dataSet = new RawQueryDataSetWithoutValueFilter(context.getQueryId()); - dataSet.setEndPoint(endPoint); - return dataSet; - } - } - return null; - } - - @SuppressWarnings({"squid:S3776", "squid:S1141"}) - private void checkReaderHasLocalData(QueryContext context, boolean hasValueFilter) - throws StorageEngineException { - Filter timeFilter = null; - if (!hasValueFilter && queryPlan.getExpression() != null) { - timeFilter = ((GlobalTimeExpression) queryPlan.getExpression()).getFilter(); - } - - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - - for (int i = 0; i < queryPlan.getDeduplicatedPaths().size(); i++) { - PartialPath path = queryPlan.getDeduplicatedPaths().get(i); - TSDataType dataType = queryPlan.getDeduplicatedDataTypes().get(i); - - try { - List partitionGroups = null; - if (hasValueFilter) { - partitionGroups = metaGroupMember.routeFilter(null, path); - } else { - partitionGroups = metaGroupMember.routeFilter(timeFilter, path); - } - - for (PartitionGroup partitionGroup : partitionGroups) { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), - String.format( - "Query: %s, time filter: %s, queryId: %d", - path, null, context.getQueryId())); - - if (hasValueFilter) { - IReaderByTimestamp readerByTimestamp = - readerFactory.getReaderByTimestamp( - path, - queryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - context, - dataGroupMember, - queryPlan.isAscending(), - null); - - if (readerByTimestamp != null) { - this.hasLocalReader = true; - this.endPoint = null; - } - } else { - IPointReader pointReader = - readerFactory.getSeriesPointReader( - path, - queryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - timeFilter, - null, - context, - dataGroupMember, - queryPlan.isAscending(), - null); - - if (pointReader.hasNextTimeValuePair()) { - this.hasLocalReader = true; - this.endPoint = null; - pointReader.close(); - break; - } - pointReader.close(); - } - } else if (endPoint == null) { - endPoint = - new QueryDataSet.EndPoint( - partitionGroup.getHeader().getNode().getClientIp(), - partitionGroup.getHeader().getNode().getClientPort()); - } - } - } catch (Exception e) { - throw new StorageEngineException(e); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPhysicalGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPhysicalGenerator.java deleted file mode 100644 index dc2b55ebb829..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPhysicalGenerator.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.utils.MetaUtils; -import org.apache.iotdb.db.qp.logical.Operator; -import org.apache.iotdb.db.qp.logical.crud.QueryOperator; -import org.apache.iotdb.db.qp.logical.sys.LoadConfigurationOperator.LoadConfigurationOperatorType; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan.LoadConfigurationPlanType; -import org.apache.iotdb.db.qp.strategy.PhysicalGenerator; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URL; -import java.util.List; -import java.util.Properties; - -public class ClusterPhysicalGenerator extends PhysicalGenerator { - - private static final Logger logger = LoggerFactory.getLogger(ClusterPhysicalGenerator.class); - - private CSchemaProcessor getCSchemaProcessor() { - return ((CSchemaProcessor) IoTDB.schemaProcessor); - } - - @Override - public List groupVectorPaths(List paths) throws MetadataException { - return MetaUtils.groupAlignedPaths(paths); - } - - @Override - public PhysicalPlan transformToPhysicalPlan(Operator operator) throws QueryProcessException { - // update storage groups before parsing query plans - if (operator instanceof QueryOperator) { - try { - getCSchemaProcessor().syncMetaLeader(); - } catch (MetadataException e) { - throw new QueryProcessException(e); - } - } - return super.transformToPhysicalPlan(operator); - } - - @Override - public PhysicalPlan generateLoadConfigurationPlan(LoadConfigurationOperatorType type) - throws QueryProcessException { - if (type == LoadConfigurationOperatorType.GLOBAL) { - Properties[] properties = new Properties[2]; - properties[0] = new Properties(); - URL iotdbEnginePropertiesUrl = IoTDBDescriptor.getInstance().getPropsUrl(); - if (iotdbEnginePropertiesUrl == null) { - logger.error("Fail to find the engine config file"); - throw new QueryProcessException("Fail to find config file"); - } - try (InputStream inputStream = iotdbEnginePropertiesUrl.openStream()) { - properties[0].load(inputStream); - } catch (IOException e) { - logger.error("Fail to read iotdb-engine config file {}", iotdbEnginePropertiesUrl, e); - throw new QueryProcessException("Fail to read iotdb-engine config file."); - } - String clusterPropertiesUrl = ClusterDescriptor.getInstance().getPropsUrl(); - properties[1] = new Properties(); - try (InputStream inputStream = new FileInputStream(new File(clusterPropertiesUrl))) { - properties[1].load(inputStream); - } catch (IOException e) { - logger.error("Fail to read iotdb-cluster config file {}", clusterPropertiesUrl, e); - throw new QueryProcessException("Fail to read iotdb-cluster config file."); - } - - return new LoadConfigurationPlan(LoadConfigurationPlanType.GLOBAL, properties); - } else { - return new LoadConfigurationPlan(LoadConfigurationPlanType.LOCAL); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java deleted file mode 100644 index 6005798d1bf1..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanExecutor.java +++ /dev/null @@ -1,923 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.filter.SlotSgFilter; -import org.apache.iotdb.cluster.query.manage.QueryCoordinator; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion.TimePartitionFilter; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IStorageGroupMNode; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.DeletePlan; -import org.apache.iotdb.db.qp.physical.crud.QueryPlan; -import org.apache.iotdb.db.qp.physical.sys.AuthorPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -public class ClusterPlanExecutor extends PlanExecutor { - - private static final Logger logger = LoggerFactory.getLogger(ClusterPlanExecutor.class); - private final MetaGroupMember metaGroupMember; - - public static final int THREAD_POOL_SIZE = 6; - public static final String LOG_FAIL_CONNECT = "Failed to connect to node: {}"; - - public ClusterPlanExecutor(MetaGroupMember metaGroupMember) throws QueryProcessException { - super(); - this.metaGroupMember = metaGroupMember; - this.queryRouter = new ClusterQueryRouter(metaGroupMember); - } - - @Override - public QueryDataSet processQuery(PhysicalPlan queryPlan, QueryContext context) - throws IOException, StorageEngineException, QueryFilterOptimizationException, - QueryProcessException, MetadataException, InterruptedException { - if (queryPlan instanceof QueryPlan) { - logger.debug("Executing a query: {}", queryPlan); - return processDataQuery((QueryPlan) queryPlan, context); - } else if (queryPlan instanceof ShowPlan) { - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - return processShowQuery((ShowPlan) queryPlan, context); - } else if (queryPlan instanceof AuthorPlan) { - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - return processAuthorQuery((AuthorPlan) queryPlan); - } else { - throw new QueryProcessException(String.format("Unrecognized query plan %s", queryPlan)); - } - } - - @Override - @TestOnly - protected List getPathsName(PartialPath path) throws MetadataException { - return ((CSchemaProcessor) IoTDB.schemaProcessor).getMatchedPaths(path); - } - - @Override - protected int getDevicesNum(PartialPath path, boolean isPrefixMatch) throws MetadataException { - // adapt to prefix match of IoTDB v0.12 - return getDevicesNum(path) - + (isPrefixMatch - ? getDevicesNum(path.concatNode(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)) - : 0); - } - - protected int getDevicesNum(PartialPath path) throws MetadataException { - // make sure this node knows all storage groups - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new MetadataException(e); - } - Map> sgPathMap = IoTDB.schemaProcessor.groupPathByStorageGroup(path); - if (sgPathMap.isEmpty()) { - throw new PathNotExistException(path.getFullPath()); - } - logger.debug("The storage groups of path {} are {}", path, sgPathMap.keySet()); - int ret; - try { - ret = getDeviceCount(sgPathMap, path); - } catch (CheckConsistencyException e) { - throw new MetadataException(e); - } - logger.debug("The number of devices satisfying {} is {}", path, ret); - return ret; - } - - private int getDeviceCount(Map> sgPathMap, PartialPath queryPath) - throws CheckConsistencyException, MetadataException { - AtomicInteger result = new AtomicInteger(); - // split the paths by the data group they belong to - Map> groupPathMap = new HashMap<>(); - for (String storageGroupName : sgPathMap.keySet()) { - PartialPath pathUnderSG = new PartialPath(storageGroupName); - // find the data group that should hold the device schemas of the storage group - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().route(storageGroupName, 0); - PartialPath targetPath; - // If storage group node length is larger than the one of queryPath, we query the device count - // of the storage group directly - if (pathUnderSG.getNodeLength() >= queryPath.getNodeLength()) { - targetPath = pathUnderSG; - } else { - // Or we replace the prefix of queryPath with the storage group as the target queryPath - String[] targetNodes = new String[queryPath.getNodeLength()]; - for (int i = 0; i < queryPath.getNodeLength(); i++) { - if (i < pathUnderSG.getNodeLength()) { - targetNodes[i] = pathUnderSG.getNodes()[i]; - } else { - targetNodes[i] = queryPath.getNodes()[i]; - } - } - targetPath = new PartialPath(targetNodes); - } - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // this node is a member of the group, perform a local query after synchronizing with the - // leader - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), partitionGroup.getRaftId()) - .syncLeaderWithConsistencyCheck(false); - int localResult = getLocalDeviceCount(targetPath); - logger.debug( - "{}: get device count of {} locally, result {}", - metaGroupMember.getName(), - partitionGroup, - localResult); - result.addAndGet(localResult); - } else { - // batch the queries of the same group to reduce communication - groupPathMap - .computeIfAbsent(partitionGroup, p -> new ArrayList<>()) - .add(targetPath.getFullPath()); - } - } - if (groupPathMap.isEmpty()) { - return result.get(); - } - - ExecutorService remoteQueryThreadPool = Executors.newFixedThreadPool(groupPathMap.size()); - List> remoteFutures = new ArrayList<>(); - // query each data group separately - for (Entry> partitionGroupPathEntry : groupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupPathEntry.getKey(); - List pathsToQuery = partitionGroupPathEntry.getValue(); - remoteFutures.add( - remoteQueryThreadPool.submit( - () -> { - try { - result.addAndGet(getRemoteDeviceCount(partitionGroup, pathsToQuery)); - } catch (MetadataException e) { - logger.warn( - "Cannot get remote device count of {} from {}", - pathsToQuery, - partitionGroup, - e); - } - return null; - })); - } - waitForThreadPool(remoteFutures, remoteQueryThreadPool, "getDeviceCount()"); - - return result.get(); - } - - private int getLocalDeviceCount(PartialPath path) throws MetadataException { - return IoTDB.schemaProcessor.getDevicesNum(path); - } - - private int getRemoteDeviceCount(PartitionGroup partitionGroup, List pathsToCount) - throws MetadataException { - // choose the node with lowest latency or highest throughput - List coordinatedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - Integer count; - for (Node node : coordinatedNodes) { - try { - count = getRemoteDeviceCountForOneNode(node, partitionGroup, pathsToCount); - logger.debug( - "{}: get device count of {} from {}, result {}", - metaGroupMember.getName(), - partitionGroup, - node, - count); - if (count != null) { - return count; - } - } catch (IOException | TException e) { - throw new MetadataException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new MetadataException(e); - } - } - logger.warn("Cannot get devices of {} from {}", pathsToCount, partitionGroup); - return 0; - } - - private Integer getRemoteDeviceCountForOneNode( - Node node, PartitionGroup partitionGroup, List pathsToCount) - throws IOException, TException, InterruptedException { - Integer count; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - client.setTimeout(ClusterConstant.getReadOperationTimeoutMS()); - count = SyncClientAdaptor.getDeviceCount(client, partitionGroup.getHeader(), pathsToCount); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - syncDataClient.setTimeout(ClusterConstant.getReadOperationTimeoutMS()); - count = syncDataClient.getDeviceCount(partitionGroup.getHeader(), pathsToCount); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return count; - } - - @Override - protected int getPathsNum(PartialPath path, boolean isPrefixMatch) throws MetadataException { - return getNodesNumInGivenLevel(path, -1, isPrefixMatch); - } - - @Override - protected int getNodesNumInGivenLevel(PartialPath path, int level, boolean isPrefixMatch) - throws MetadataException { - int result = getNodesNumInGivenLevel(path, level); - if (isPrefixMatch) { - // adapt to prefix match of IoTDB v0.12 - result += - getNodesNumInGivenLevel(path.concatNode(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD), level); - } - return result; - } - - protected int getNodesNumInGivenLevel(PartialPath path, int level) throws MetadataException { - // make sure this node knows all storage groups - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new MetadataException(e); - } - - // Here we append a ** to the path to query the storage groups which have the prefix as 'path', - // if path doesn't end with **. - // e.g. we have SG root.sg.a and root.sg.b, the query path is root.sg, we should return the map - // with key root.sg.a and root.sg.b instead of an empty one. - PartialPath wildcardPath = path; - if (!wildcardPath.getMeasurement().equals(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)) { - wildcardPath = wildcardPath.concatNode(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD); - } - Map> sgPathMap = - IoTDB.schemaProcessor.groupPathByStorageGroup(wildcardPath); - if (sgPathMap.isEmpty()) { - return 0; - } - logger.debug("The storage groups of path {} are {}", path, sgPathMap.keySet()); - int ret = 0; - try { - // level >= 0 is the COUNT NODE query - if (level >= 0) { - int prefixPartIdx = 0; - for (; prefixPartIdx < path.getNodeLength(); prefixPartIdx++) { - String currentPart = path.getNodes()[prefixPartIdx]; - if (currentPart.equals(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)) { - break; - } else if (currentPart.equals(IoTDBConstant.ONE_LEVEL_PATH_WILDCARD)) { - // Only level equals the first * occurred level, e.g. root.sg.d1.* and level = 4, the - // query makes sense. - if (level != prefixPartIdx) { - return 0; - } - break; - } - } - // if level is less than the query path level, there's no suitable node - if (level < prefixPartIdx - 1) { - return 0; - } - Set deletedSg = new HashSet<>(); - Set matchedPath = new HashSet<>(0); - for (String sg : sgPathMap.keySet()) { - PartialPath p = new PartialPath(sg); - // if the storage group path level is larger than the query level, then the prefix must be - // a suitable node and there's no need to query children nodes later - if (p.getNodeLength() - 1 >= level) { - deletedSg.add(sg); - matchedPath.add(new PartialPath(Arrays.copyOfRange(p.getNodes(), 0, level + 1))); - } - } - for (String sg : deletedSg) { - sgPathMap.remove(sg); - } - ret += matchedPath.size(); - } - ret += getPathCount(sgPathMap, level); - } catch (CheckConsistencyException e) { - throw new MetadataException(e); - } - logger.debug("The number of paths satisfying {}@{} is {}", path, level, ret); - return ret; - } - - /** - * Split the paths by the data group they belong to and query them from the groups separately. - * - * @param sgPathMap the key is the storage group name and the value is the path to be queried with - * storage group added - * @param level the max depth to match the pattern, -1 means matching the whole pattern - * @return the number of paths that match the pattern at given level - */ - private int getPathCount(Map> sgPathMap, int level) - throws MetadataException, CheckConsistencyException { - AtomicInteger result = new AtomicInteger(); - // split the paths by the data group they belong to - Map> groupPathMap = new HashMap<>(); - for (Entry> sgPathEntry : sgPathMap.entrySet()) { - String storageGroupName = sgPathEntry.getKey(); - List paths = sgPathEntry.getValue(); - // find the data group that should hold the timeseries schemas of the storage group - PartitionGroup partitionGroup = - metaGroupMember.getPartitionTable().route(storageGroupName, 0); - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // this node is a member of the group, perform a local query after synchronizing with the - // leader - metaGroupMember - .getLocalDataMember(partitionGroup.getHeader(), partitionGroup.getRaftId()) - .syncLeaderWithConsistencyCheck(false); - int localResult = 0; - for (PartialPath path : paths) { - localResult += getLocalPathCount(path, level); - } - logger.debug( - "{}: get path count of {} locally, result {}", - metaGroupMember.getName(), - partitionGroup, - localResult); - result.addAndGet(localResult); - } else { - // batch the queries of the same group to reduce communication - for (PartialPath path : paths) { - groupPathMap - .computeIfAbsent(partitionGroup, p -> new ArrayList<>()) - .add(path.getFullPath()); - } - } - } - if (groupPathMap.isEmpty()) { - return result.get(); - } - // TODO: create a thread pool for each query calling. - ExecutorService remoteQueryThreadPool = Executors.newFixedThreadPool(groupPathMap.size()); - List> remoteFutures = new ArrayList<>(); - // query each data group separately - for (Entry> partitionGroupPathEntry : groupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupPathEntry.getKey(); - List pathsToQuery = partitionGroupPathEntry.getValue(); - remoteFutures.add( - remoteQueryThreadPool.submit( - () -> { - try { - result.addAndGet(getRemotePathCount(partitionGroup, pathsToQuery, level)); - } catch (MetadataException e) { - logger.warn( - "Cannot get remote path count of {} from {}", - pathsToQuery, - partitionGroup, - e); - } - return null; - })); - } - waitForThreadPool(remoteFutures, remoteQueryThreadPool, "getPathCount()"); - - return result.get(); - } - - private int getLocalPathCount(PartialPath path, int level) throws MetadataException { - int localResult; - if (level == -1) { - localResult = IoTDB.schemaProcessor.getAllTimeseriesCount(path); - } else { - localResult = IoTDB.schemaProcessor.getNodesCountInGivenLevel(path, level); - } - return localResult; - } - - private int getRemotePathCount( - PartitionGroup partitionGroup, List pathsToQuery, int level) - throws MetadataException { - // choose the node with lowest latency or highest throughput - List coordinatedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - Integer count; - for (Node node : coordinatedNodes) { - try { - count = getRemotePathCountForOneNode(node, partitionGroup, pathsToQuery, level); - logger.debug( - "{}: get path count of {} from {}, result {}", - metaGroupMember.getName(), - partitionGroup, - node, - count); - if (count != null) { - return count; - } - } catch (IOException | TException e) { - throw new MetadataException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new MetadataException(e); - } - } - logger.warn("Cannot get paths of {} from {}", pathsToQuery, partitionGroup); - return 0; - } - - private Integer getRemotePathCountForOneNode( - Node node, PartitionGroup partitionGroup, List pathsToQuery, int level) - throws IOException, TException, InterruptedException { - Integer count; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - client.setTimeout(ClusterConstant.getReadOperationTimeoutMS()); - count = - SyncClientAdaptor.getPathCount(client, partitionGroup.getHeader(), pathsToQuery, level); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - syncDataClient.setTimeout(ClusterConstant.getReadOperationTimeoutMS()); - count = syncDataClient.getPathCount(partitionGroup.getHeader(), pathsToQuery, level); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return count; - } - - @Override - protected List getNodesList(PartialPath schemaPattern, int level) - throws MetadataException { - ConcurrentSkipListSet nodeSet = new ConcurrentSkipListSet<>(); - - // TODO: create a thread pool for each query calling. - ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE); - List> futureList = new ArrayList<>(); - for (PartitionGroup group : metaGroupMember.getPartitionTable().getGlobalGroups()) { - futureList.add( - pool.submit( - () -> { - List paths; - paths = getNodesList(group, schemaPattern, level); - if (paths != null) { - nodeSet.addAll(paths); - } else { - logger.error( - "Fail to get node list of {}@{} from {}", schemaPattern, level, group); - } - return null; - })); - } - waitForThreadPool(futureList, pool, "getNodesList()"); - return new ArrayList<>(nodeSet); - } - - private List getNodesList(PartitionGroup group, PartialPath schemaPattern, int level) - throws CheckConsistencyException, MetadataException { - if (group.contains(metaGroupMember.getThisNode())) { - return getLocalNodesList(group, schemaPattern, level); - } else { - return getRemoteNodesList(group, schemaPattern, level); - } - } - - private List getLocalNodesList( - PartitionGroup group, PartialPath schemaPattern, int level) - throws CheckConsistencyException, MetadataException { - DataGroupMember localDataMember = metaGroupMember.getLocalDataMember(group.getHeader()); - localDataMember.syncLeaderWithConsistencyCheck(false); - try { - return IoTDB.schemaProcessor.getNodesListInGivenLevel( - schemaPattern, - level, - new SlotSgFilter( - ((SlotPartitionTable) metaGroupMember.getPartitionTable()) - .getNodeSlots(group.getHeader()))); - } catch (MetadataException e) { - logger.error( - "Cannot not get node list of {}@{} from {} locally", schemaPattern, level, group); - throw e; - } - } - - private List getRemoteNodesList( - PartitionGroup group, PartialPath schemaPattern, int level) { - List paths = null; - for (Node node : group) { - try { - paths = getRemoteNodesListForOneNode(node, group, schemaPattern, level); - if (paths != null) { - break; - } - } catch (IOException e) { - logger.error(LOG_FAIL_CONNECT, node, e); - } catch (TException e) { - logger.error("Error occurs when getting node lists in node {}.", node, e); - } catch (InterruptedException e) { - logger.error("Interrupted when getting node lists in node {}.", node, e); - Thread.currentThread().interrupt(); - } - } - return PartialPath.fromStringList(paths); - } - - private List getRemoteNodesListForOneNode( - Node node, PartitionGroup group, PartialPath schemaPattern, int level) - throws TException, InterruptedException, IOException { - List paths; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - paths = - SyncClientAdaptor.getNodeList( - client, group.getHeader(), schemaPattern.getFullPath(), level); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - paths = syncDataClient.getNodeList(group.getHeader(), schemaPattern.getFullPath(), level); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return paths; - } - - @Override - protected Set getNodeNextChildren(PartialPath path) throws MetadataException { - ConcurrentSkipListSet resultSet = new ConcurrentSkipListSet<>(); - List globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups(); - // TODO: create a thread pool for each query calling. - ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE); - List> futureList = new ArrayList<>(); - for (PartitionGroup group : globalGroups) { - futureList.add( - pool.submit( - () -> { - Set nextChildrenNodes = null; - try { - nextChildrenNodes = getChildNodeInNextLevel(group, path); - } catch (CheckConsistencyException e) { - logger.error("Fail to get next children nodes of {} from {}", path, group, e); - } - if (nextChildrenNodes != null) { - resultSet.addAll(nextChildrenNodes); - } else { - logger.error("Fail to get next children nodes of {} from {}", path, group); - } - return null; - })); - } - waitForThreadPool(futureList, pool, "getChildNodeInNextLevel()"); - return resultSet; - } - - private Set getChildNodeInNextLevel(PartitionGroup group, PartialPath path) - throws CheckConsistencyException { - if (group.contains(metaGroupMember.getThisNode())) { - return getLocalChildNodeInNextLevel(group, path); - } else { - return getRemoteChildNodeInNextLevel(group, path); - } - } - - private Set getLocalChildNodeInNextLevel(PartitionGroup group, PartialPath path) - throws CheckConsistencyException { - DataGroupMember localDataMember = - metaGroupMember.getLocalDataMember(group.getHeader(), group.getRaftId()); - localDataMember.syncLeaderWithConsistencyCheck(false); - try { - return IoTDB.schemaProcessor.getChildNodeNameInNextLevel(path); - } catch (MetadataException e) { - logger.error("Cannot not get next children nodes of {} from {} locally", path, group); - return Collections.emptySet(); - } - } - - private Set getRemoteChildNodeInNextLevel(PartitionGroup group, PartialPath path) { - Set nextChildrenNodes = null; - for (Node node : group) { - try { - nextChildrenNodes = getRemoteChildNodeInNextLevelForOneNode(node, group, path); - if (nextChildrenNodes != null) { - break; - } - } catch (IOException e) { - logger.error(LOG_FAIL_CONNECT, node, e); - } catch (TException e) { - logger.error("Error occurs when getting node lists in node {}.", node, e); - } catch (InterruptedException e) { - logger.error("Interrupted when getting node lists in node {}.", node, e); - Thread.currentThread().interrupt(); - } - } - return nextChildrenNodes; - } - - private Set getRemoteChildNodeInNextLevelForOneNode( - Node node, PartitionGroup group, PartialPath path) - throws TException, InterruptedException, IOException { - Set nextChildrenNodes; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - nextChildrenNodes = - SyncClientAdaptor.getChildNodeInNextLevel(client, group.getHeader(), path.getFullPath()); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - nextChildrenNodes = - syncDataClient.getChildNodeInNextLevel(group.getHeader(), path.getFullPath()); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return nextChildrenNodes; - } - - @Override - protected Set getPathNextChildren(PartialPath path) throws MetadataException { - ConcurrentSkipListSet resultSet = new ConcurrentSkipListSet<>(); - // TODO: create a thread pool for each query calling. - ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE); - - List> futureList = new ArrayList<>(); - - for (PartitionGroup group : metaGroupMember.getPartitionTable().getGlobalGroups()) { - futureList.add( - pool.submit( - () -> { - Set nextChildren = null; - try { - nextChildren = getNextChildren(group, path); - } catch (CheckConsistencyException e) { - logger.error("Fail to get next children of {} from {}", path, group, e); - } - if (nextChildren != null) { - resultSet.addAll(nextChildren); - } else { - logger.error("Fail to get next children of {} from {}", path, group); - } - return null; - })); - } - waitForThreadPool(futureList, pool, "getPathNextChildren()"); - return resultSet; - } - - public static void waitForThreadPool( - List> futures, ExecutorService pool, String methodName) - throws MetadataException { - for (Future future : futures) { - try { - future.get(); - } catch (InterruptedException e) { - logger.error("Unexpected interruption when waiting for {}", methodName, e); - Thread.currentThread().interrupt(); - } catch (RuntimeException | ExecutionException e) { - throw new MetadataException(e); - } - } - - pool.shutdown(); - try { - pool.awaitTermination(ClusterConstant.getReadOperationTimeoutMS(), TimeUnit.MILLISECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for {}", methodName, e); - } - } - - private Set getNextChildren(PartitionGroup group, PartialPath path) - throws CheckConsistencyException { - if (group.contains(metaGroupMember.getThisNode())) { - return getLocalNextChildren(group, path); - } else { - return getRemoteNextChildren(group, path); - } - } - - private Set getLocalNextChildren(PartitionGroup group, PartialPath path) - throws CheckConsistencyException { - DataGroupMember localDataMember = metaGroupMember.getLocalDataMember(group.getHeader()); - localDataMember.syncLeaderWithConsistencyCheck(false); - try { - return IoTDB.schemaProcessor.getChildNodePathInNextLevel(path); - } catch (MetadataException e) { - logger.error("Cannot not get next children of {} from {} locally", path, group); - return Collections.emptySet(); - } - } - - private Set getRemoteNextChildren(PartitionGroup group, PartialPath path) { - Set nextChildren = null; - for (Node node : group) { - try { - nextChildren = getRemoteNextChildrenForOneNode(node, group, path); - if (nextChildren != null) { - break; - } - } catch (IOException e) { - logger.error(LOG_FAIL_CONNECT, node, e); - } catch (TException e) { - logger.error("Error occurs when getting node lists in node {}.", node, e); - } catch (InterruptedException e) { - logger.error("Interrupted when getting node lists in node {}.", node, e); - Thread.currentThread().interrupt(); - } - } - return nextChildren; - } - - private Set getRemoteNextChildrenForOneNode( - Node node, PartitionGroup group, PartialPath path) - throws TException, InterruptedException, IOException { - Set nextChildren; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - nextChildren = - SyncClientAdaptor.getNextChildren(client, group.getHeader(), path.getFullPath()); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - nextChildren = - syncDataClient.getChildNodePathInNextLevel(group.getHeader(), path.getFullPath()); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return nextChildren; - } - - @Override - protected List getAllStorageGroupNodes() { - try { - metaGroupMember.syncLeader(null); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - return IoTDB.schemaProcessor.getAllStorageGroupNodes(); - } - - @Override - protected void loadConfiguration(LoadConfigurationPlan plan) throws QueryProcessException { - switch (plan.getLoadConfigurationPlanType()) { - case GLOBAL: - IoTDBDescriptor.getInstance().loadHotModifiedProps(plan.getIoTDBProperties()); - ClusterDescriptor.getInstance().loadHotModifiedProps(plan.getClusterProperties()); - break; - case LOCAL: - IoTDBDescriptor.getInstance().loadHotModifiedProps(); - ClusterDescriptor.getInstance().loadHotModifiedProps(); - break; - default: - throw new QueryProcessException( - String.format( - "Unrecognized load configuration plan type: %s", - plan.getLoadConfigurationPlanType())); - } - } - - @Override - public void delete(DeletePlan deletePlan) throws QueryProcessException { - if (deletePlan.getPaths().isEmpty()) { - logger.info("TimeSeries list to be deleted is empty."); - return; - } - for (PartialPath path : deletePlan.getPaths()) { - delete( - path, - deletePlan.getDeleteStartTime(), - deletePlan.getDeleteEndTime(), - deletePlan.getIndex(), - deletePlan.getPartitionFilter()); - } - } - - @Override - public void delete( - PartialPath path, - long startTime, - long endTime, - long planIndex, - TimePartitionFilter timePartitionFilter) - throws QueryProcessException { - try { - StorageEngine.getInstance().delete(path, startTime, endTime, planIndex, timePartitionFilter); - } catch (StorageEngineException e) { - throw new QueryProcessException(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java deleted file mode 100644 index e657f8ad3799..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanRouter.java +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.exception.UnsupportedPlanException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.PartitionUtils; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.metadata.LocalSchemaProcessor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsOfOneDevicePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.AlterTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateAlignedTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.LogPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowChildPathsPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.utils.Binary; -import org.apache.iotdb.tsfile.utils.BitMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -public class ClusterPlanRouter { - - private static final Logger logger = LoggerFactory.getLogger(ClusterPlanRouter.class); - - private PartitionTable partitionTable; - - public ClusterPlanRouter(PartitionTable partitionTable) { - this.partitionTable = partitionTable; - } - - private LocalSchemaProcessor SchemaProcessor() { - return IoTDB.schemaProcessor; - } - - @TestOnly - public PartitionGroup routePlan(PhysicalPlan plan) - throws UnsupportedPlanException, MetadataException { - if (plan instanceof InsertRowPlan) { - return routePlan((InsertRowPlan) plan); - } else if (plan instanceof CreateTimeSeriesPlan) { - return routePlan((CreateTimeSeriesPlan) plan); - } else if (plan instanceof ShowChildPathsPlan) { - return routePlan((ShowChildPathsPlan) plan); - } - // the if clause can be removed after the program is stable - if (PartitionUtils.isLocalNonQueryPlan(plan)) { - logger.error("{} is a local plan. Please run it locally directly", plan); - } else if (PartitionUtils.isGlobalMetaPlan(plan) || PartitionUtils.isGlobalDataPlan(plan)) { - logger.error("{} is a global plan. Please forward it to all partitionGroups", plan); - } - if (plan.canBeSplit()) { - logger.error("{} can be split. Please call splitPlanAndMapToGroups", plan); - } - throw new UnsupportedPlanException(plan); - } - - private PartitionGroup routePlan(InsertRowPlan plan) throws MetadataException { - return partitionTable.partitionByPathTime(plan.getDevicePath(), plan.getTime()); - } - - private PartitionGroup routePlan(CreateTimeSeriesPlan plan) throws MetadataException { - return partitionTable.partitionByPathTime(plan.getPath(), 0); - } - - private PartitionGroup routePlan(ShowChildPathsPlan plan) { - try { - return partitionTable.route( - SchemaProcessor().getBelongedStorageGroup(plan.getPath()).getFullPath(), 0); - } catch (MetadataException e) { - // the path is too short to have no a storage group name, e.g., "root" - // so we can do it locally. - return partitionTable.getLocalGroups().get(0); - } - } - - public Map splitAndRoutePlan(PhysicalPlan plan) - throws UnsupportedPlanException, MetadataException, UnknownLogTypeException { - if (plan instanceof InsertRowsPlan) { - return splitAndRoutePlan((InsertRowsPlan) plan); - } else if (plan instanceof InsertTabletPlan) { - return splitAndRoutePlan((InsertTabletPlan) plan); - } else if (plan instanceof InsertMultiTabletsPlan) { - return splitAndRoutePlan((InsertMultiTabletsPlan) plan); - } else if (plan instanceof CreateTimeSeriesPlan) { - return splitAndRoutePlan((CreateTimeSeriesPlan) plan); - } else if (plan instanceof CreateAlignedTimeSeriesPlan) { - return splitAndRoutePlan((CreateAlignedTimeSeriesPlan) plan); - } else if (plan instanceof InsertRowPlan) { - return splitAndRoutePlan((InsertRowPlan) plan); - } else if (plan instanceof InsertRowsOfOneDevicePlan) { - return splitAndRoutePlan((InsertRowsOfOneDevicePlan) plan); - } else if (plan instanceof AlterTimeSeriesPlan) { - return splitAndRoutePlan((AlterTimeSeriesPlan) plan); - } else if (plan instanceof CreateMultiTimeSeriesPlan) { - return splitAndRoutePlan((CreateMultiTimeSeriesPlan) plan); - } - // the if clause can be removed after the program is stable - if (PartitionUtils.isLocalNonQueryPlan(plan)) { - logger.error("{} is a local plan. Please run it locally directly", plan); - } else if (PartitionUtils.isGlobalMetaPlan(plan) || PartitionUtils.isGlobalDataPlan(plan)) { - logger.error("{} is a global plan. Please forward it to all partitionGroups", plan); - } - if (!plan.canBeSplit()) { - logger.error("{} cannot be split. Please call routePlan", plan); - } - throw new UnsupportedPlanException(plan); - } - - public Map splitAndRouteChangeMembershipLog(Log log) { - Map result = new HashMap<>(); - LogPlan plan = new LogPlan(log.serialize()); - List oldRing = new ArrayList<>(partitionTable.getAllNodes()); - if (log instanceof AddNodeLog) { - oldRing.remove(((AddNodeLog) log).getNewNode()); - } else if (log instanceof RemoveNodeLog) { - if (!oldRing.contains(((RemoveNodeLog) log).getRemovedNode())) { - oldRing.add(((RemoveNodeLog) log).getRemovedNode()); - oldRing.sort(Comparator.comparingInt(Node::getNodeIdentifier)); - } - } - for (PartitionGroup partitionGroup : partitionTable.calculateGlobalGroups(oldRing)) { - // It doesn't need to notify the data group which will be removed from cluster. - if (log instanceof RemoveNodeLog - && partitionGroup.getHeader().getNode().equals(((RemoveNodeLog) log).getRemovedNode())) { - continue; - } - result.put(new LogPlan(plan), partitionGroup); - } - return result; - } - - private Map splitAndRoutePlan(InsertRowPlan plan) - throws MetadataException { - PartitionGroup partitionGroup = - partitionTable.partitionByPathTime(plan.getDevicePath(), plan.getTime()); - return Collections.singletonMap(plan, partitionGroup); - } - - private Map splitAndRoutePlan(AlterTimeSeriesPlan plan) - throws MetadataException { - PartitionGroup partitionGroup = partitionTable.partitionByPathTime(plan.getPath(), 0); - return Collections.singletonMap(plan, partitionGroup); - } - - private Map splitAndRoutePlan(CreateTimeSeriesPlan plan) - throws MetadataException { - PartitionGroup partitionGroup = partitionTable.partitionByPathTime(plan.getPath(), 0); - return Collections.singletonMap(plan, partitionGroup); - } - - private Map splitAndRoutePlan(CreateAlignedTimeSeriesPlan plan) - throws MetadataException { - PartitionGroup partitionGroup = partitionTable.partitionByPathTime(plan.getPrefixPath(), 0); - return Collections.singletonMap(plan, partitionGroup); - } - - /** - * @param plan InsertMultiTabletsPlan - * @return key is InsertMultiTabletsPlan, value is the partition group the plan belongs to, all - * InsertTabletPlans in InsertMultiTabletsPlan belongs to one same storage group. - */ - private Map splitAndRoutePlan(InsertMultiTabletsPlan plan) - throws MetadataException { - /* - * the key of pgSgPathPlanMap is the partition group; the value is one map, - * the key of the map is storage group, the value is the InsertMultiTabletsPlan, - * all InsertTabletPlans in InsertMultiTabletsPlan belongs to one same storage group. - */ - Map> pgSgPathPlanMap = new HashMap<>(); - for (int i = 0; i < plan.getInsertTabletPlanList().size(); i++) { - InsertTabletPlan insertTabletPlan = plan.getInsertTabletPlanList().get(i); - Map tmpResult = splitAndRoutePlan(insertTabletPlan); - for (Map.Entry entry : tmpResult.entrySet()) { - // 1. handle the value returned by call splitAndRoutePlan(InsertTabletPlan) - InsertTabletPlan tmpPlan = (InsertTabletPlan) entry.getKey(); - PartitionGroup tmpPg = entry.getValue(); - // 1.1 the sg that the plan(actually calculated based on device) belongs to - PartialPath tmpSgPath = - IoTDB.schemaProcessor.getBelongedStorageGroup(tmpPlan.getDevicePath()); - Map sgPathPlanMap = pgSgPathPlanMap.get(tmpPg); - if (sgPathPlanMap == null) { - // 2.1 construct the InsertMultiTabletsPlan - List insertTabletPlanList = new ArrayList<>(); - List parentInsetTablePlanIndexList = new ArrayList<>(); - insertTabletPlanList.add(tmpPlan); - parentInsetTablePlanIndexList.add(i); - InsertMultiTabletsPlan insertMultiTabletsPlan = - new InsertMultiTabletsPlan(insertTabletPlanList, parentInsetTablePlanIndexList); - - // 2.2 construct the sgPathPlanMap - sgPathPlanMap = new HashMap<>(); - sgPathPlanMap.put(tmpSgPath, insertMultiTabletsPlan); - - // 2.3 put the sgPathPlanMap to the pgSgPathPlanMap - pgSgPathPlanMap.put(tmpPg, sgPathPlanMap); - } else { - InsertMultiTabletsPlan insertMultiTabletsPlan = sgPathPlanMap.get(tmpSgPath); - if (insertMultiTabletsPlan == null) { - List insertTabletPlanList = new ArrayList<>(); - List parentInsetTablePlanIndexList = new ArrayList<>(); - insertTabletPlanList.add(tmpPlan); - parentInsetTablePlanIndexList.add(i); - insertMultiTabletsPlan = - new InsertMultiTabletsPlan(insertTabletPlanList, parentInsetTablePlanIndexList); - - // 2.4 put the insertMultiTabletsPlan to the tmpSgPath - sgPathPlanMap.put(tmpSgPath, insertMultiTabletsPlan); - } else { - // 2.5 just add the tmpPlan to the insertMultiTabletsPlan - insertMultiTabletsPlan.addInsertTabletPlan(tmpPlan, i); - } - } - } - } - - Map result = new HashMap<>(pgSgPathPlanMap.values().size()); - for (Map.Entry> pgMapEntry : - pgSgPathPlanMap.entrySet()) { - PartitionGroup pg = pgMapEntry.getKey(); - Map sgPathPlanMap = pgMapEntry.getValue(); - // All InsertTabletPlan in InsertMultiTabletsPlan belong to the same storage group - for (Map.Entry sgPathEntry : sgPathPlanMap.entrySet()) { - result.put(sgPathEntry.getValue(), pg); - } - } - return result; - } - - /** - * @param insertRowsPlan InsertRowsPlan - * @return key is InsertRowsPlan, value is the partition group the plan belongs to, all - * InsertRowPlans in InsertRowsPlan belongs to one same storage group. - */ - private Map splitAndRoutePlan(InsertRowsPlan insertRowsPlan) - throws MetadataException { - Map result = new HashMap<>(); - Map groupPlanMap = new HashMap<>(); - for (int i = 0; i < insertRowsPlan.getInsertRowPlanList().size(); i++) { - InsertRowPlan rowPlan = insertRowsPlan.getInsertRowPlanList().get(i); - PartialPath storageGroup = SchemaProcessor().getBelongedStorageGroup(rowPlan.getDevicePath()); - PartitionGroup group = partitionTable.route(storageGroup.getFullPath(), rowPlan.getTime()); - if (groupPlanMap.containsKey(group)) { - InsertRowsPlan tmpPlan = groupPlanMap.get(group); - tmpPlan.addOneInsertRowPlan(rowPlan, i); - } else { - InsertRowsPlan tmpPlan = new InsertRowsPlan(); - tmpPlan.addOneInsertRowPlan(rowPlan, i); - groupPlanMap.put(group, tmpPlan); - } - } - - for (Entry entry : groupPlanMap.entrySet()) { - result.put(entry.getValue(), entry.getKey()); - } - return result; - } - - @SuppressWarnings("SuspiciousSystemArraycopy") - private Map splitAndRoutePlan(InsertTabletPlan plan) - throws MetadataException { - PartialPath storageGroup = SchemaProcessor().getBelongedStorageGroup(plan.getDevicePath()); - Map result = new HashMap<>(); - long[] times = plan.getTimes(); - if (times.length == 0) { - return Collections.emptyMap(); - } - long startTime = - (times[0] / StorageEngine.getTimePartitionInterval()) - * StorageEngine.getTimePartitionInterval(); // included - long endTime = startTime + StorageEngine.getTimePartitionInterval(); // excluded - int startLoc = 0; // included - - Map> splitMap = new HashMap<>(); - // for each List in split, they are range1.start, range1.end, range2.start, range2.end, ... - for (int i = 1; i < times.length; i++) { // times are sorted in session API. - if (times[i] >= endTime) { - // a new range. - PartitionGroup group = partitionTable.route(storageGroup.getFullPath(), startTime); - List ranges = splitMap.computeIfAbsent(group, x -> new ArrayList<>()); - ranges.add(startLoc); // included - ranges.add(i); // excluded - // next init - startLoc = i; - startTime = endTime; - endTime = - (times[i] / StorageEngine.getTimePartitionInterval() + 1) - * StorageEngine.getTimePartitionInterval(); - } - } - // the final range - PartitionGroup group = partitionTable.route(storageGroup.getFullPath(), startTime); - List ranges = splitMap.computeIfAbsent(group, x -> new ArrayList<>()); - ranges.add(startLoc); // included - ranges.add(times.length); // excluded - - List locs; - for (Map.Entry> entry : splitMap.entrySet()) { - // generate a new times and values - locs = entry.getValue(); - int count = 0; - for (int i = 0; i < locs.size(); i += 2) { - int start = locs.get(i); - int end = locs.get(i + 1); - count += end - start; - } - long[] subTimes = new long[count]; - int destLoc = 0; - Object[] values = initTabletValues(plan.getDataTypes().length, count, plan.getDataTypes()); - BitMap[] bitMaps = - plan.getBitMaps() == null ? null : initBitmaps(plan.getDataTypes().length, count); - for (int i = 0; i < locs.size(); i += 2) { - int start = locs.get(i); - int end = locs.get(i + 1); - System.arraycopy(plan.getTimes(), start, subTimes, destLoc, end - start); - for (int k = 0; k < values.length; k++) { - System.arraycopy(plan.getColumns()[k], start, values[k], destLoc, end - start); - if (bitMaps != null && plan.getBitMaps()[k] != null) { - BitMap.copyOfRange(plan.getBitMaps()[k], start, bitMaps[k], destLoc, end - start); - } - } - destLoc += end - start; - } - InsertTabletPlan newBatch = PartitionUtils.copy(plan, subTimes, values, bitMaps); - newBatch.setRange(locs); - newBatch.setAligned(plan.isAligned()); - result.put(newBatch, entry.getKey()); - } - return result; - } - - private Object[] initTabletValues(int columnSize, int rowSize, TSDataType[] dataTypes) { - Object[] values = new Object[columnSize]; - for (int i = 0; i < values.length; i++) { - switch (dataTypes[i]) { - case TEXT: - values[i] = new Binary[rowSize]; - break; - case FLOAT: - values[i] = new float[rowSize]; - break; - case INT32: - values[i] = new int[rowSize]; - break; - case INT64: - values[i] = new long[rowSize]; - break; - case DOUBLE: - values[i] = new double[rowSize]; - break; - case BOOLEAN: - values[i] = new boolean[rowSize]; - break; - } - } - return values; - } - - private BitMap[] initBitmaps(int columnSize, int rowSize) { - BitMap[] bitMaps = new BitMap[columnSize]; - for (int i = 0; i < columnSize; i++) { - bitMaps[i] = new BitMap(rowSize); - } - return bitMaps; - } - - private Map splitAndRoutePlan(CreateMultiTimeSeriesPlan plan) - throws MetadataException { - Map result = new HashMap<>(); - Map groupHoldPlan = new HashMap<>(); - - for (int i = 0; i < plan.getPaths().size(); i++) { - PartialPath path = plan.getPaths().get(i); - if (plan.getResults().containsKey(i)) { - continue; - } - PartitionGroup partitionGroup = partitionTable.partitionByPathTime(path, 0); - CreateMultiTimeSeriesPlan subPlan; - if (groupHoldPlan.get(partitionGroup) == null) { - subPlan = createSubPlan(plan); - groupHoldPlan.put(partitionGroup, subPlan); - } else { - subPlan = (CreateMultiTimeSeriesPlan) groupHoldPlan.get(partitionGroup); - } - - subPlan.getPaths().add(path); - subPlan.getDataTypes().add(plan.getDataTypes().get(i)); - subPlan.getEncodings().add(plan.getEncodings().get(i)); - subPlan.getCompressors().add(plan.getCompressors().get(i)); - if (plan.getAlias() != null) { - subPlan.getAlias().add(plan.getAlias().get(i)); - } - if (plan.getProps() != null) { - subPlan.getProps().add(plan.getProps().get(i)); - } - if (plan.getTags() != null) { - subPlan.getTags().add(plan.getTags().get(i)); - } - if (plan.getAttributes() != null) { - subPlan.getAttributes().add(plan.getAttributes().get(i)); - } - subPlan.getIndexes().add(i); - } - - for (Map.Entry entry : groupHoldPlan.entrySet()) { - result.put(entry.getValue(), entry.getKey()); - } - return result; - } - - private CreateMultiTimeSeriesPlan createSubPlan(CreateMultiTimeSeriesPlan plan) { - CreateMultiTimeSeriesPlan subPlan = new CreateMultiTimeSeriesPlan(); - subPlan.setPaths(new ArrayList<>()); - subPlan.setDataTypes(new ArrayList<>()); - subPlan.setEncodings(new ArrayList<>()); - subPlan.setCompressors(new ArrayList<>()); - if (plan.getAlias() != null) { - subPlan.setAlias(new ArrayList<>()); - } - if (plan.getProps() != null) { - subPlan.setProps(new ArrayList<>()); - } - if (plan.getTags() != null) { - subPlan.setTags(new ArrayList<>()); - } - if (plan.getAttributes() != null) { - subPlan.setAttributes(new ArrayList<>()); - } - subPlan.setIndexes(new ArrayList<>()); - return subPlan; - } - - /** - * @param plan InsertRowsOfOneDevicePlan - * @return key is InsertRowsOfOneDevicePlan, value is the partition group the plan belongs to. All - * InsertRowPlans in InsertRowsOfOneDevicePlan belong to one same storage group. - */ - private Map splitAndRoutePlan(InsertRowsOfOneDevicePlan plan) - throws MetadataException { - Map result = new HashMap<>(); - Map> groupPlanMap = new HashMap<>(); - Map> groupPlanIndexMap = new HashMap<>(); - PartialPath storageGroup = SchemaProcessor().getBelongedStorageGroup(plan.getDevicePath()); - for (int i = 0; i < plan.getRowPlans().length; i++) { - InsertRowPlan p = plan.getRowPlans()[i]; - PartitionGroup group = partitionTable.route(storageGroup.getFullPath(), p.getTime()); - List groupedPlans = - groupPlanMap.computeIfAbsent(group, k -> new ArrayList<>()); - List groupedPlanIndex = - groupPlanIndexMap.computeIfAbsent(group, k -> new ArrayList<>()); - groupedPlans.add(p); - groupedPlanIndex.add(plan.getRowPlanIndexList()[i]); - } - - for (Entry> entry : groupPlanMap.entrySet()) { - PhysicalPlan reducedPlan = - new InsertRowsOfOneDevicePlan( - plan.getDevicePath(), - entry.getValue().toArray(new InsertRowPlan[0]), - groupPlanIndexMap.get(entry.getKey()).stream().mapToInt(i -> i).toArray()); - result.put(reducedPlan, entry.getKey()); - } - return result; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java deleted file mode 100644 index 5ee0126d3c5b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterPlanner.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.Planner; -import org.apache.iotdb.db.qp.logical.Operator; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; - -public class ClusterPlanner extends Planner { - - @Override - protected PhysicalPlan generatePhysicalPlanFromOperator(Operator operator) - throws QueryProcessException { - // from logical operator to physical plan - return new ClusterPhysicalGenerator().transformToPhysicalPlan(operator); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java deleted file mode 100644 index 19dde2f75ed7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterQueryRouter.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.query.aggregate.ClusterAggregateExecutor; -import org.apache.iotdb.cluster.query.fill.ClusterFillExecutor; -import org.apache.iotdb.cluster.query.groupby.ClusterGroupByNoVFilterDataSet; -import org.apache.iotdb.cluster.query.groupby.ClusterGroupByVFilterDataSet; -import org.apache.iotdb.cluster.query.last.ClusterLastQueryExecutor; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.AggregationPlan; -import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.qp.physical.crud.LastQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.UDTFPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.groupby.GroupByWithValueFilterDataSet; -import org.apache.iotdb.db.query.dataset.groupby.GroupByWithoutValueFilterDataSet; -import org.apache.iotdb.db.query.executor.AggregationExecutor; -import org.apache.iotdb.db.query.executor.FillQueryExecutor; -import org.apache.iotdb.db.query.executor.LastQueryExecutor; -import org.apache.iotdb.db.query.executor.QueryRouter; -import org.apache.iotdb.db.query.executor.RawDataQueryExecutor; -import org.apache.iotdb.tsfile.read.expression.ExpressionType; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import java.io.IOException; - -public class ClusterQueryRouter extends QueryRouter { - - private MetaGroupMember metaGroupMember; - - ClusterQueryRouter(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - @Override - protected FillQueryExecutor getFillExecutor(FillQueryPlan plan) { - return new ClusterFillExecutor(plan, metaGroupMember); - } - - @Override - protected GroupByWithoutValueFilterDataSet getGroupByWithoutValueFilterDataSet( - QueryContext context, GroupByTimePlan plan) { - return new ClusterGroupByNoVFilterDataSet(context, plan, metaGroupMember); - } - - @Override - protected GroupByWithValueFilterDataSet getGroupByWithValueFilterDataSet( - QueryContext context, GroupByTimePlan plan) { - return new ClusterGroupByVFilterDataSet(context, plan, metaGroupMember); - } - - @Override - protected AggregationExecutor getAggregationExecutor( - QueryContext context, AggregationPlan aggregationPlan) { - return new ClusterAggregateExecutor(context, aggregationPlan, metaGroupMember); - } - - @Override - protected RawDataQueryExecutor getRawDataQueryExecutor(RawDataQueryPlan queryPlan) { - return new ClusterDataQueryExecutor(queryPlan, metaGroupMember); - } - - @Override - protected LastQueryExecutor getLastQueryExecutor(LastQueryPlan lastQueryPlan) { - return new ClusterLastQueryExecutor(lastQueryPlan, metaGroupMember); - } - - @Override - public QueryDataSet udtfQuery(UDTFPlan udtfPlan, QueryContext context) - throws StorageEngineException, QueryProcessException, IOException, InterruptedException { - boolean withValueFilter = - udtfPlan.getExpression() != null - && udtfPlan.getExpression().getType() != ExpressionType.GLOBAL_TIME; - ClusterUDTFQueryExecutor clusterUDTFQueryExecutor = - new ClusterUDTFQueryExecutor(udtfPlan, metaGroupMember); - - if (udtfPlan.isAlignByTime()) { - return withValueFilter - ? clusterUDTFQueryExecutor.executeWithValueFilterAlignByTime(context) - : clusterUDTFQueryExecutor.executeWithoutValueFilterAlignByTime(context); - } else { - return withValueFilter - ? clusterUDTFQueryExecutor.executeWithValueFilterNonAlign(context) - : clusterUDTFQueryExecutor.executeWithoutValueFilterNonAlign(context); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java deleted file mode 100644 index 8a32d38ba909..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutor.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.crud.UDTFPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.UDTFAlignByTimeDataSet; -import org.apache.iotdb.db.query.dataset.UDTFNonAlignDataSet; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator; -import org.apache.iotdb.tsfile.utils.Pair; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import static org.apache.iotdb.tsfile.read.query.executor.ExecutorWithTimeGenerator.markFilterdPaths; - -public class ClusterUDTFQueryExecutor extends ClusterDataQueryExecutor { - - protected final UDTFPlan udtfPlan; - protected final MetaGroupMember metaGroupMember; - - public ClusterUDTFQueryExecutor(UDTFPlan udtfPlan, MetaGroupMember metaGroupMember) { - super(udtfPlan, metaGroupMember); - this.udtfPlan = udtfPlan; - this.metaGroupMember = metaGroupMember; - } - - public QueryDataSet executeWithoutValueFilterAlignByTime(QueryContext context) - throws StorageEngineException, QueryProcessException, IOException, InterruptedException { - List readersOfSelectedSeries = initManagedSeriesReader(context); - return new UDTFAlignByTimeDataSet(context, udtfPlan, readersOfSelectedSeries); - } - - public QueryDataSet executeWithValueFilterAlignByTime(QueryContext context) - throws StorageEngineException, QueryProcessException, IOException { - // transfer to MeasurementPath to AlignedPath if it's under an aligned entity - queryPlan.setDeduplicatedPaths( - queryPlan.getDeduplicatedPaths().stream() - .map(p -> ((MeasurementPath) p).transformToExactPath()) - .collect(Collectors.toList())); - TimeGenerator timestampGenerator = getTimeGenerator(context, udtfPlan); - List cached = - markFilterdPaths( - udtfPlan.getExpression(), - new ArrayList<>(udtfPlan.getDeduplicatedPaths()), - timestampGenerator.hasOrNode()); - Pair, List>> pair = - initSeriesReaderByTimestamp(context, udtfPlan, cached, timestampGenerator.getTimeFilter()); - return new UDTFAlignByTimeDataSet( - context, udtfPlan, timestampGenerator, pair.left, pair.right, cached); - } - - public QueryDataSet executeWithoutValueFilterNonAlign(QueryContext context) - throws QueryProcessException, StorageEngineException, IOException, InterruptedException { - List readersOfSelectedSeries = initManagedSeriesReader(context); - return new UDTFNonAlignDataSet(context, udtfPlan, readersOfSelectedSeries); - } - - public QueryDataSet executeWithValueFilterNonAlign(QueryContext context) - throws QueryProcessException, StorageEngineException, IOException { - // transfer to MeasurementPath to AlignedPath if it's under an aligned entity - queryPlan.setDeduplicatedPaths( - queryPlan.getDeduplicatedPaths().stream() - .map(p -> ((MeasurementPath) p).transformToExactPath()) - .collect(Collectors.toList())); - TimeGenerator timestampGenerator = getTimeGenerator(context, udtfPlan); - List cached = - markFilterdPaths( - udtfPlan.getExpression(), - new ArrayList<>(udtfPlan.getDeduplicatedPaths()), - timestampGenerator.hasOrNode()); - Pair, List>> pair = - initSeriesReaderByTimestamp(context, udtfPlan, cached, timestampGenerator.getTimeFilter()); - return new UDTFNonAlignDataSet( - context, udtfPlan, timestampGenerator, pair.left, pair.right, cached); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java deleted file mode 100644 index 4cf133e5f265..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/LocalQueryExecutor.java +++ /dev/null @@ -1,1066 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.ReaderNotFoundException; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.filter.SlotTsFileFilter; -import org.apache.iotdb.cluster.query.manage.ClusterQueryManager; -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.query.reader.mult.IMultBatchReader; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowDevicesPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowTimeSeriesPlan; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.aggregation.AggregationType; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.ShowDevicesResult; -import org.apache.iotdb.db.query.dataset.ShowTimeSeriesResult; -import org.apache.iotdb.db.query.executor.AggregationExecutor; -import org.apache.iotdb.db.query.executor.LastQueryExecutor; -import org.apache.iotdb.db.query.executor.fill.PreviousFill; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.db.query.executor.groupby.impl.LocalGroupByExecutor; -import org.apache.iotdb.db.query.factory.AggregateResultFactory; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; -import org.apache.iotdb.tsfile.utils.Pair; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.apache.iotdb.cluster.utils.ClusterQueryUtils.getAssembledPathFromRequest; - -public class LocalQueryExecutor { - - private static final Logger logger = LoggerFactory.getLogger(LocalQueryExecutor.class); - public static final String DEBUG_SHOW_QUERY_ID = "{}: local queryId for {}#{} is {}"; - private DataGroupMember dataGroupMember; - private ClusterReaderFactory readerFactory; - private String name; - private ClusterQueryManager queryManager; - - public LocalQueryExecutor(DataGroupMember dataGroupMember) { - this.dataGroupMember = dataGroupMember; - this.readerFactory = new ClusterReaderFactory(dataGroupMember.getMetaGroupMember()); - this.name = dataGroupMember.getName(); - this.queryManager = dataGroupMember.getQueryManager(); - } - - private CSchemaProcessor getCSchemaProcessor() { - return ((CSchemaProcessor) IoTDB.schemaProcessor); - } - - /** Return the data of the reader whose id is "readerId", using timestamps in "timeBuffer". */ - public ByteBuffer fetchSingleSeriesByTimestamps(long readerId, long[] timestamps, int length) - throws ReaderNotFoundException, IOException { - IReaderByTimestamp reader = dataGroupMember.getQueryManager().getReaderByTimestamp(readerId); - if (reader == null) { - throw new ReaderNotFoundException(readerId); - } - Object[] values = reader.getValuesInTimestamps(timestamps, length); - if (values != null) { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - - SerializeUtils.serializeObjects(values, dataOutputStream); - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } else { - return ByteBuffer.allocate(0); - } - } - - /** - * Fetch a batch from the reader whose id is "readerId". - * - * @param readerId - */ - public ByteBuffer fetchSingleSeries(long readerId) throws ReaderNotFoundException, IOException { - IBatchReader reader = dataGroupMember.getQueryManager().getReader(readerId); - if (reader == null) { - throw new ReaderNotFoundException(readerId); - } - - if (reader.hasNextBatch()) { - BatchData batchData = reader.nextBatch(); - - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - - SerializeUtils.serializeBatchData(batchData, dataOutputStream); - logger.debug( - "{}: Send results of reader {}, size:{}", - dataGroupMember.getName(), - readerId, - batchData.length()); - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } else { - return ByteBuffer.allocate(0); - } - } - - /** - * Fetch a batch from the reader whose id is "readerId". - * - * @param readerId reader id - * @param paths mult series path - */ - public Map fetchMultSeries(long readerId, List paths) - throws ReaderNotFoundException, IOException { - IMultBatchReader reader = - (IMultBatchReader) dataGroupMember.getQueryManager().getReader(readerId); - if (reader == null) { - throw new ReaderNotFoundException(readerId); - } - - Map pathByteBuffers = Maps.newHashMap(); - - for (String path : paths) { - ByteBuffer byteBuffer; - if (reader.hasNextBatch(path)) { - BatchData batchData = reader.nextBatch(path); - - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - - SerializeUtils.serializeBatchData(batchData, dataOutputStream); - logger.debug( - "{}: Send results of reader {}, size:{}", - dataGroupMember.getName(), - readerId, - batchData.length()); - byteBuffer = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } else { - byteBuffer = ByteBuffer.allocate(0); - } - pathByteBuffers.put(path, byteBuffer); - } - return pathByteBuffers; - } - - /** - * Create an IBatchReader of a path, register it in the query manager to get a reader id for it - * and send the id back to the requester. If the reader does not have any data, an id of -1 will - * be returned. - * - * @param request - */ - public long querySingleSeries(SingleSeriesQueryRequest request) - throws CheckConsistencyException, QueryProcessException, StorageEngineException, IOException, - MetadataException { - logger.debug( - "{}: {} is querying {}, queryId: {}", - name, - request.getRequester(), - request.getPath(), - request.getQueryId()); - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - MeasurementPath path = - getAssembledPathFromRequest(request.getPath(), (byte) request.getDataTypeOrdinal()); - // The request is routed to this node since this node contains the data and - // metadata of the designated timeseries. Because of which, the following metadata access will - // not trigger an RPC. - path.setMeasurementSchema(IoTDB.schemaProcessor.getSeriesSchema(path)); - TSDataType dataType = TSDataType.values()[request.getDataTypeOrdinal()]; - Filter timeFilter = null; - Filter valueFilter = null; - if (request.isSetTimeFilterBytes()) { - timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - } - if (request.isSetValueFilterBytes()) { - valueFilter = FilterFactory.deserialize(request.valueFilterBytes); - } - Set deviceMeasurements = request.getDeviceMeasurements(); - - // the same query from a requester correspond to a context here - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequester(), request.getQueryId()); - logger.debug( - DEBUG_SHOW_QUERY_ID, - name, - request.getQueryId(), - request.getPath(), - queryContext.getQueryId()); - IBatchReader batchReader = - readerFactory.getSeriesBatchReader( - path, - deviceMeasurements, - dataType, - timeFilter, - valueFilter, - queryContext, - dataGroupMember, - request.ascending, - request.requiredSlots); - - // if the reader contains no data, send a special id of -1 to prevent the requester from - // meaninglessly fetching data - if (batchReader != null && batchReader.hasNextBatch()) { - long readerId = queryManager.registerReader(batchReader); - queryContext.registerLocalReader(readerId); - logger.debug( - "{}: Build a reader of {} for {}#{}, readerId: {}", - name, - path, - request.getRequester(), - request.getQueryId(), - readerId); - return readerId; - } else { - logger.debug( - "{}: There is no data of {} for {}#{}", - name, - path, - request.getRequester(), - request.getQueryId()); - - if (batchReader != null) { - batchReader.close(); - } - return -1; - } - } - - /** - * Create an IBatchReader of a path, register it in the query manager to get a reader id for it - * and send the id back to the requester. If the reader does not have any data, an id of -1 will - * be returned. - * - * @param request - */ - public long queryMultSeries(MultSeriesQueryRequest request) - throws CheckConsistencyException, QueryProcessException, StorageEngineException, IOException, - MetadataException { - logger.debug( - "{}: {} is querying {}, queryId: {}", - name, - request.getRequester(), - request.getPath(), - request.getQueryId()); - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - List paths = Lists.newArrayList(); - List dataTypes = Lists.newArrayList(); - for (int i = 0; i < request.getPath().size(); i++) { - MeasurementPath path = - getAssembledPathFromRequest( - request.getPath().get(i), request.getDataTypeOrdinal().get(i).byteValue()); - // The request is routed to this node since this node contains the data and - // metadata of the designated timeseries. Because of which, the following metadata access will - // not trigger an RPC. - path.setMeasurementSchema(IoTDB.schemaProcessor.getSeriesSchema(path)); - paths.add(path); - dataTypes.add(TSDataType.values()[request.getDataTypeOrdinal().get(i)]); - } - Filter timeFilter = null; - Filter valueFilter = null; - if (request.isSetTimeFilterBytes()) { - timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - } - if (request.isSetValueFilterBytes()) { - valueFilter = FilterFactory.deserialize(request.valueFilterBytes); - } - Map> deviceMeasurements = request.getDeviceMeasurements(); - - // the same query from a requester correspond to a context here - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequester(), request.getQueryId()); - logger.debug( - DEBUG_SHOW_QUERY_ID, - name, - request.getQueryId(), - request.getPath(), - queryContext.getQueryId()); - IBatchReader batchReader = - readerFactory.getMultSeriesBatchReader( - paths, - deviceMeasurements, - dataTypes, - timeFilter, - valueFilter, - queryContext, - dataGroupMember, - request.ascending); - - // if the reader contains no data, send a special id of -1 to prevent the requester from - // meaninglessly fetching data - if (batchReader != null && batchReader.hasNextBatch()) { - long readerId = queryManager.registerReader(batchReader); - queryContext.registerLocalReader(readerId); - logger.debug( - "{}: Build a reader of {} for {}#{}, readerId: {}", - name, - paths, - request.getRequester(), - request.getQueryId(), - readerId); - return readerId; - } else { - logger.debug( - "{}: There is no data of {} for {}#{}", - name, - paths, - request.getRequester(), - request.getQueryId()); - - if (batchReader != null) { - batchReader.close(); - } - return -1; - } - } - - /** - * Send the timeseries schemas of some prefix paths to the requester. The schemas will be sent in - * the form of a list of MeasurementSchema, but notice the measurements in them are the full - * paths. - * - * @param request - */ - public PullSchemaResp queryTimeSeriesSchema(PullSchemaRequest request) - throws CheckConsistencyException, MetadataException { - // try to synchronize with the leader first in case that some schema logs are accepted but - // not committed yet - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - // collect local timeseries schemas and send to the requester - // the measurements in them are the full paths. - List prefixPaths = request.getPrefixPaths(); - List timeseriesSchemas = new ArrayList<>(); - collectTimeseriesSchema(prefixPaths, timeseriesSchemas); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Collected {} schemas for {} and other {} paths", - name, - timeseriesSchemas.size(), - prefixPaths.get(0), - prefixPaths.size() - 1); - } - - PullSchemaResp resp = new PullSchemaResp(); - // serialize the schemas - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - try { - dataOutputStream.writeInt(timeseriesSchemas.size()); - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - timeseriesSchema.serializeTo(dataOutputStream); - } - } catch (IOException ignored) { - // unreachable for we are using a ByteArrayOutputStream - } - resp.setSchemaBytes(byteArrayOutputStream.toByteArray()); - return resp; - } - - /** - * Send the timeseries schemas of some prefix paths to the requester. The schemas will be sent in - * the form of a list of MeasurementSchema, but notice the measurements in them are the full - * paths. - * - * @param request - */ - public PullSchemaResp queryMeasurementSchema(PullSchemaRequest request) // pullMeasurementSchemas - throws CheckConsistencyException, MetadataException { - // try to synchronize with the leader first in case that some schema logs are accepted but - // not committed yet - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - // collect local timeseries schemas and send to the requester - // the measurements in them are the full paths. - List prefixPaths = request.getPrefixPaths(); - List measurementSchemas = new ArrayList<>(); - - collectSeries(prefixPaths, measurementSchemas); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Collected {} schemas for {} and other {} paths", - name, - measurementSchemas.size(), - prefixPaths.get(0), - prefixPaths.size() - 1); - } - - PullSchemaResp resp = new PullSchemaResp(); - // serialize the schemas - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - try { - dataOutputStream.writeInt(measurementSchemas.size()); - for (IMeasurementSchema timeseriesSchema : measurementSchemas) { - timeseriesSchema.partialSerializeTo(dataOutputStream); - } - } catch (IOException ignored) { - // unreachable for we are using a ByteArrayOutputStream - } - resp.setSchemaBytes(byteArrayOutputStream.toByteArray()); - return resp; - } - - private void collectSeries(List prefixPaths, List measurementSchemas) - throws MetadataException { - // Due to add/remove node, some slots may in the state of PULLING, which will not contains the - // corresponding schemas. - // In this case, we need to pull series from previous holder. - Map> prePartitionGroupPathMap = new HashMap<>(); - - RaftNode header = dataGroupMember.getHeader(); - Map slotPreviousHolderMap = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getPreviousNodeMap() - .get(header); - - for (String prefixPath : prefixPaths) { - int slot = - ClusterUtils.getSlotByPathTimeWithSync( - new PartialPath(prefixPath), dataGroupMember.getMetaGroupMember()); - if (dataGroupMember.getSlotManager().checkSlotInMetaMigrationStatus(slot) - && slotPreviousHolderMap.containsKey(slot)) { - prePartitionGroupPathMap - .computeIfAbsent(slotPreviousHolderMap.get(slot), s -> new ArrayList<>()) - .add(new PartialPath(prefixPath)); - } else { - // getCSchemaProcessor() - // .collectMeasurementSchema(new PartialPath(prefixPath), measurementSchemas); - } - } - - if (prePartitionGroupPathMap.isEmpty()) { - return; - } - for (Map.Entry> partitionGroupListEntry : - prePartitionGroupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupListEntry.getKey(); - List paths = partitionGroupListEntry.getValue(); - MetaPuller.getInstance().pullMeasurementSchemas(partitionGroup, paths, measurementSchemas); - } - } - - private void collectTimeseriesSchema( - List prefixPaths, List timeseriesSchemas) throws MetadataException { - // Due to add/remove node, some slots may in the state of PULLING, which will not contains the - // corresponding schemas. - // In this case, we need to pull series from previous holder. - Map> prePartitionGroupPathMap = new HashMap<>(); - - RaftNode header = dataGroupMember.getHeader(); - Map slotPreviousHolderMap = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getPreviousNodeMap() - .get(header); - - for (String prefixPath : prefixPaths) { - int slot = - ClusterUtils.getSlotByPathTimeWithSync( - new PartialPath(prefixPath), dataGroupMember.getMetaGroupMember()); - if (dataGroupMember.getSlotManager().checkSlotInMetaMigrationStatus(slot) - && slotPreviousHolderMap.containsKey(slot)) { - prePartitionGroupPathMap - .computeIfAbsent(slotPreviousHolderMap.get(slot), s -> new ArrayList<>()) - .add(prefixPath); - } else { - // getCSchemaProcessor() - // .collectTimeseriesSchema(new PartialPath(prefixPath), timeseriesSchemas); - } - } - - if (prePartitionGroupPathMap.isEmpty()) { - return; - } - for (Map.Entry> partitionGroupListEntry : - prePartitionGroupPathMap.entrySet()) { - PartitionGroup partitionGroup = partitionGroupListEntry.getKey(); - List paths = partitionGroupListEntry.getValue(); - MetaPuller.getInstance().pullTimeSeriesSchemas(partitionGroup, paths, timeseriesSchemas); - } - } - - /** - * Create an IReaderByTime of a path, register it in the query manager to get a reader id for it - * and send the id back to the requester. If the reader does not have any data, an id of -1 will - * be returned. - */ - public long querySingleSeriesByTimestamp(SingleSeriesQueryRequest request) - throws CheckConsistencyException, QueryProcessException, StorageEngineException, - MetadataException { - logger.debug( - "{}: {} is querying {} by timestamp, queryId: {}", - name, - request.getRequester(), - request.getPath(), - request.getQueryId()); - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - MeasurementPath path = - getAssembledPathFromRequest(request.getPath(), (byte) request.getDataTypeOrdinal()); - // The request is routed to this node since this node contains the data and - // metadata of the designated timeseries. Because of which, the following metadata access will - // not trigger an RPC. - path.setMeasurementSchema(IoTDB.schemaProcessor.getSeriesSchema(path)); - TSDataType dataType = TSDataType.values()[request.dataTypeOrdinal]; - Set deviceMeasurements = request.getDeviceMeasurements(); - - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequester(), request.getQueryId()); - logger.debug( - DEBUG_SHOW_QUERY_ID, - name, - request.getQueryId(), - request.getPath(), - queryContext.getQueryId()); - IReaderByTimestamp readerByTimestamp = - readerFactory.getReaderByTimestamp( - path, - deviceMeasurements, - dataType, - queryContext, - dataGroupMember, - request.ascending, - request.requiredSlots); - if (readerByTimestamp != null) { - long readerId = queryManager.registerReaderByTime(readerByTimestamp); - queryContext.registerLocalReader(readerId); - - logger.debug( - "{}: Build a readerByTimestamp of {} for {}, readerId: {}", - name, - path, - request.getRequester(), - readerId); - return readerId; - } else { - logger.debug( - "{}: There is no data {} for {}#{}", - name, - path, - request.getRequester(), - request.getQueryId()); - return -1; - } - } - - public ByteBuffer getAllMeasurementSchema(MeasurementSchemaRequest request) - throws CheckConsistencyException, IOException, MetadataException { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - ShowTimeSeriesPlan plan = (ShowTimeSeriesPlan) PhysicalPlan.Factory.create(request.planBinary); - List allTimeseriesSchema; - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequester(), request.getQueryId()); - allTimeseriesSchema = getCSchemaProcessor().showLocalTimeseries(plan, queryContext); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(outputStream)) { - dataOutputStream.writeInt(allTimeseriesSchema.size()); - for (ShowTimeSeriesResult result : allTimeseriesSchema) { - result.serialize(outputStream); - } - } - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - public ByteBuffer getDevices(ByteBuffer planBuffer) - throws CheckConsistencyException, IOException, MetadataException { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - ShowDevicesPlan plan = (ShowDevicesPlan) PhysicalPlan.Factory.create(planBuffer); - List allDevicesResult = getCSchemaProcessor().getLocalDevices(plan); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(outputStream)) { - dataOutputStream.writeInt(allDevicesResult.size()); - for (ShowDevicesResult result : allDevicesResult) { - result.serialize(outputStream); - } - } - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - /** - * Execute aggregations over the given path and return the results to the requester. - * - * @param request - */ - public List getAggrResult(GetAggrResultRequest request) - throws StorageEngineException, QueryProcessException, IOException { - logger.debug( - "{}: {} is querying {} by aggregation, queryId: {}", - name, - request.getRequestor(), - request.getPath(), - request.getQueryId()); - - List aggregations = request.getAggregations(); - TSDataType dataType = TSDataType.values()[request.getDataTypeOrdinal()]; - PartialPath path; - try { - path = new MeasurementPath(request.getPath(), dataType); - } catch (IllegalPathException e) { - logger.error( - "{}: aggregation has error path: {}, queryId: {}", - name, - request.getPath(), - request.getQueryId()); - throw new QueryProcessException(e); - } - Filter timeFilter = null; - if (request.isSetTimeFilterBytes()) { - timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - } - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequestor(), request.queryId); - Set deviceMeasurements = request.getDeviceMeasurements(); - boolean ascending = request.ascending; - - // do the aggregations locally - List results; - results = - getAggrResult( - aggregations, deviceMeasurements, dataType, path, timeFilter, queryContext, ascending); - logger.trace("{}: aggregation results {}, queryId: {}", name, results, request.getQueryId()); - - // serialize and send the results - List resultBuffers = new ArrayList<>(); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - for (AggregateResult result : results) { - try { - result.serializeTo(byteArrayOutputStream); - } catch (IOException e) { - // ignore since we are using a ByteArrayOutputStream - } - resultBuffers.add(ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - byteArrayOutputStream.reset(); - } - return resultBuffers; - } - - /** - * Execute "aggregation" over "path" with "timeFilter". This method currently requires strong - * consistency. Only data managed by this group will be used for aggregation. - * - * @param aggregations aggregation names in SQLConstant - * @param dataType - * @param path - * @param timeFilter nullable - * @param context - * @return - * @throws IOException - * @throws StorageEngineException - * @throws QueryProcessException - */ - public List getAggrResult( - List aggregations, - Set allSensors, - TSDataType dataType, - PartialPath path, - Filter timeFilter, - QueryContext context, - boolean ascending) - throws IOException, StorageEngineException, QueryProcessException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - - List results = new ArrayList<>(); - List ascResults = new ArrayList<>(); - List descResults = new ArrayList<>(); - for (String aggregation : aggregations) { - AggregateResult ar = - AggregateResultFactory.getAggrResultByName(aggregation, dataType, ascending); - if (ar.isAscending()) { - ascResults.add(ar); - } else { - descResults.add(ar); - } - results.add(ar); - } - List nodeSlots = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getNodeSlots(dataGroupMember.getHeader()); - AggregationExecutor.aggregateOneSeries( - path, - allSensors, - context, - timeFilter, - dataType, - ascResults, - descResults, - new SlotTsFileFilter(nodeSlots), - ascending); - return results; - } - - /** - * Check if the given measurements are registered or not - * - * @param timeseriesList - */ - public List getUnregisteredTimeseries(List timeseriesList) - throws CheckConsistencyException { - dataGroupMember.syncLeaderWithConsistencyCheck(true); - - List result = new ArrayList<>(); - for (String seriesPath : timeseriesList) { - try { - List path = - getCSchemaProcessor().getMeasurementPaths(new PartialPath(seriesPath)); - if (path.size() != 1) { - throw new MetadataException( - String.format("Timeseries number of the name [%s] is not 1.", seriesPath)); - } - } catch (MetadataException e) { - result.add(seriesPath); - } - } - return result; - } - - /** - * Create a local GroupByExecutor that will run aggregations of "aggregationTypes" over "path" - * with "timeFilter". The method currently requires strong consistency. - * - * @param path - * @param dataType - * @param timeFilter nullable - * @param aggregationTypes ordinals of AggregationType - * @param context - * @return - * @throws StorageEngineException - */ - public LocalGroupByExecutor getGroupByExecutor( - PartialPath path, - Set deviceMeasurements, - TSDataType dataType, - Filter timeFilter, - List aggregationTypes, - QueryContext context, - boolean ascending) - throws StorageEngineException, QueryProcessException { - // pull the newest data - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - - List nodeSlots = - ((SlotPartitionTable) dataGroupMember.getMetaGroupMember().getPartitionTable()) - .getNodeSlots(dataGroupMember.getHeader()); - LocalGroupByExecutor executor = - new LocalGroupByExecutor( - path, - deviceMeasurements, - context, - timeFilter, - new SlotTsFileFilter(nodeSlots), - ascending); - for (Integer aggregationType : aggregationTypes) { - executor.addAggregateResult( - AggregateResultFactory.getAggrResultByType( - AggregationType.values()[aggregationType], dataType, ascending)); - } - return executor; - } - - /** - * Create a local GroupByExecutor that will run aggregations of "aggregationTypes" over "path" - * with "timeFilter", register it in the query manager to generate the executor id, and send it - * back to the requester. - * - * @param request - */ - public long getGroupByExecutor(GroupByRequest request) - throws QueryProcessException, StorageEngineException { - List aggregationTypeOrdinals = request.getAggregationTypeOrdinals(); - TSDataType dataType = TSDataType.values()[request.getDataTypeOrdinal()]; - PartialPath path; - try { - path = new MeasurementPath(request.getPath(), dataType); - } catch (IllegalPathException e) { - throw new QueryProcessException(e); - } - Filter timeFilter = null; - if (request.isSetTimeFilterBytes()) { - timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - } - long queryId = request.getQueryId(); - logger.debug( - "{}: {} is querying {} using group by, queryId: {}", - name, - request.getRequestor(), - path, - queryId); - Set deviceMeasurements = request.getDeviceMeasurements(); - boolean ascending = request.ascending; - - RemoteQueryContext queryContext = queryManager.getQueryContext(request.getRequestor(), queryId); - LocalGroupByExecutor executor = - getGroupByExecutor( - path, - deviceMeasurements, - dataType, - timeFilter, - aggregationTypeOrdinals, - queryContext, - ascending); - boolean isEmpty; - try { - isEmpty = executor.isEmpty(); - } catch (IOException e) { - logger.error("Something wrong happened", e); - throw new QueryProcessException(e, TSStatusCode.INTERNAL_SERVER_ERROR.ordinal()); - } - if (!isEmpty) { - long executorId = queryManager.registerGroupByExecutor(executor); - logger.debug( - "{}: Build a GroupByExecutor of {} for {}, executorId: {}", - name, - path, - request.getRequestor(), - executor); - queryContext.registerLocalGroupByExecutor(executorId); - return executorId; - } else { - logger.debug( - "{}: There is no data {} for {}#{}", - name, - path, - request.getRequestor(), - request.getQueryId()); - return -1; - } - } - - /** - * Fetch the aggregation results between [startTime, endTime] of the executor whose id is - * "executorId". This method currently requires strong consistency. - * - * @param executorId - * @param startTime - * @param endTime - */ - public List getGroupByResult(long executorId, long startTime, long endTime) - throws ReaderNotFoundException, IOException, QueryProcessException { - GroupByExecutor executor = queryManager.getGroupByExecutor(executorId); - if (executor == null) { - throw new ReaderNotFoundException(executorId); - } - List results = executor.calcResult(startTime, endTime); - List resultBuffers = new ArrayList<>(); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - for (AggregateResult result : results) { - result.serializeTo(byteArrayOutputStream); - resultBuffers.add(ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - byteArrayOutputStream.reset(); - } - logger.debug( - "{}: Send results of group by executor {}, size:{}", name, executor, resultBuffers.size()); - return resultBuffers; - } - - /** - * returns a non-nul ByteBuffer as the thrift response, which not allows null objects. If the - * ByteBuffer data equals <0, null>, it means that the NextNotNullValue is null. - */ - public ByteBuffer peekNextNotNullValue(long executorId, long startTime, long endTime) - throws ReaderNotFoundException, IOException { - GroupByExecutor executor = queryManager.getGroupByExecutor(executorId); - if (executor == null) { - throw new ReaderNotFoundException(executorId); - } - Pair pair = executor.peekNextNotNullValue(startTime, endTime); - if (pair == null) { - pair = new Pair<>(0L, null); - } - ByteBuffer resultBuffer; - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeLong(pair.left); - SerializeUtils.serializeObject(pair.right, dataOutputStream); - resultBuffer = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - logger.debug( - "{}: Send results of group by executor {}, size:{}", name, executor, resultBuffer.limit()); - return resultBuffer; - } - - public ByteBuffer previousFill(PreviousFillRequest request) - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - TSDataType dataType = TSDataType.values()[request.getDataTypeOrdinal()]; - PartialPath path = new MeasurementPath(request.getPath(), dataType); - long queryId = request.getQueryId(); - long queryTime = request.getQueryTime(); - long beforeRange = request.getBeforeRange(); - Node requester = request.getRequester(); - Set deviceMeasurements = request.getDeviceMeasurements(); - RemoteQueryContext queryContext = queryManager.getQueryContext(requester, queryId); - - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - TimeValuePair timeValuePair = - localPreviousFill(path, dataType, queryTime, beforeRange, deviceMeasurements, queryContext); - SerializeUtils.serializeTVPair(timeValuePair, dataOutputStream); - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - /** - * Perform a local previous fill and return the fill result. - * - * @param path - * @param dataType - * @param queryTime - * @param beforeRange - * @param deviceMeasurements - * @param context - * @return - * @throws QueryProcessException - * @throws StorageEngineException - * @throws IOException - */ - public TimeValuePair localPreviousFill( - PartialPath path, - TSDataType dataType, - long queryTime, - long beforeRange, - Set deviceMeasurements, - QueryContext context) - throws QueryProcessException, StorageEngineException, IOException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - - PreviousFill previousFill = new PreviousFill(dataType, queryTime, beforeRange); - previousFill.configureFill(path, dataType, queryTime, deviceMeasurements, context); - return previousFill.getFillResult(); - } - - public int getPathCount(List pathsToQuery, int level) - throws CheckConsistencyException, MetadataException { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - int count = 0; - for (String s : pathsToQuery) { - if (level == -1) { - count += getCSchemaProcessor().getAllTimeseriesCount(new PartialPath(s)); - } else { - count += getCSchemaProcessor().getNodesCountInGivenLevel(new PartialPath(s), level); - } - } - return count; - } - - public int getDeviceCount(List pathsToQuery) - throws CheckConsistencyException, MetadataException { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - int count = 0; - for (String s : pathsToQuery) { - count += getCSchemaProcessor().getDevicesNum(new PartialPath(s)); - } - return count; - } - - @SuppressWarnings("java:S1135") // ignore todos - public ByteBuffer last(LastQueryRequest request) - throws CheckConsistencyException, QueryProcessException, IOException, StorageEngineException, - MetadataException { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - - RemoteQueryContext queryContext = - queryManager.getQueryContext(request.getRequestor(), request.getQueryId()); - List seriesPaths = new ArrayList<>(); - for (String path : request.getPaths()) { - PartialPath partialPath = new PartialPath(path); - seriesPaths.add( - new MeasurementPath(partialPath, IoTDB.schemaProcessor.getSeriesSchema(partialPath))); - } - List dataTypes = new ArrayList<>(request.dataTypeOrdinals.size()); - for (Integer dataTypeOrdinal : request.dataTypeOrdinals) { - dataTypes.add(TSDataType.values()[dataTypeOrdinal]); - } - - IExpression expression = null; - if (request.isSetFilterBytes()) { - Filter filter = FilterFactory.deserialize(request.filterBytes); - expression = new GlobalTimeExpression(filter); - } - - List timeValuePairs = - LastQueryExecutor.calculateLastPairForSeriesLocally( - seriesPaths, dataTypes, queryContext, expression, request.getDeviceMeasurements()); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - for (TimeValuePair timeValuePair : timeValuePairs) { - SerializeUtils.serializeTVPair(timeValuePair, dataOutputStream); - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/RemoteQueryContext.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/RemoteQueryContext.java deleted file mode 100644 index 0f05411f7e6a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/RemoteQueryContext.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.db.query.context.QueryContext; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -public class RemoteQueryContext extends QueryContext { - /** The remote nodes that are queried in this query, grouped by the header nodes. */ - private Map> queriedNodesMap = new HashMap<>(); - /** The readers constructed locally to respond a remote query. */ - private Set localReaderIds = new ConcurrentSkipListSet<>(); - - /** The readers constructed locally to respond a remote query. */ - private Set localGroupByExecutorIds = new ConcurrentSkipListSet<>(); - - public RemoteQueryContext(long jobId) { - super(jobId); - } - - public RemoteQueryContext( - long jobId, boolean debug, long startTime, String statement, long timeout) { - super(jobId, debug, startTime, statement, timeout); - } - - public void registerRemoteNode(Node node, RaftNode header) { - queriedNodesMap.computeIfAbsent(header, n -> new HashSet<>()).add(node); - } - - public void registerLocalReader(long readerId) { - localReaderIds.add(readerId); - } - - public void registerLocalGroupByExecutor(long executorId) { - localGroupByExecutorIds.add(executorId); - } - - public Set getLocalReaderIds() { - return localReaderIds; - } - - public Set getLocalGroupByExecutorIds() { - return localGroupByExecutorIds; - } - - public Map> getQueriedNodesMap() { - return queriedNodesMap; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java deleted file mode 100644 index 434825d90190..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregateExecutor.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.aggregate; - -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.query.reader.ClusterTimeGenerator; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.AggregationPlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.executor.AggregationExecutor; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -public class ClusterAggregateExecutor extends AggregationExecutor { - - private MetaGroupMember metaMember; - private ClusterReaderFactory readerFactory; - private ClusterAggregator aggregator; - - /** - * constructor. - * - * @param aggregationPlan - */ - public ClusterAggregateExecutor( - QueryContext context, AggregationPlan aggregationPlan, MetaGroupMember metaMember) { - super(context, aggregationPlan); - this.metaMember = metaMember; - this.readerFactory = new ClusterReaderFactory(metaMember); - this.aggregator = new ClusterAggregator(metaMember); - } - - @Override - protected void aggregateOneSeries( - PartialPath seriesPath, - List indexes, - Set allMeasurementsInDevice, - Filter timeFilter) - throws StorageEngineException { - TSDataType tsDataType = dataTypes.get(indexes.get(0)); - List aggregationNames = new ArrayList<>(); - - for (int i : indexes) { - aggregationNames.add(aggregations.get(i)); - } - List aggregateResult = - aggregator.getAggregateResult( - seriesPath, - allMeasurementsInDevice, - aggregationNames, - tsDataType, - timeFilter, - context, - ascending); - int rstIndex = 0; - for (int i : indexes) { - aggregateResultList[i] = aggregateResult.get(rstIndex++); - } - } - - @Override - protected TimeGenerator getTimeGenerator(QueryContext context, RawDataQueryPlan rawDataQueryPlan) - throws StorageEngineException { - return new ClusterTimeGenerator(context, metaMember, rawDataQueryPlan, false); - } - - @Override - protected IReaderByTimestamp getReaderByTime( - PartialPath path, RawDataQueryPlan dataQueryPlan, TSDataType dataType, QueryContext context) - throws StorageEngineException, QueryProcessException { - return readerFactory.getReaderByTimestamp( - path, - dataQueryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - context, - dataQueryPlan.isAscending(), - null); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java deleted file mode 100644 index 5750873396cc..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/aggregate/ClusterAggregator.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.aggregate; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.exception.RequestTimeOutException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.LocalQueryExecutor; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.manage.QueryCoordinator; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Path; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import org.apache.thrift.TApplicationException; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -@SuppressWarnings("java:S107") -public class ClusterAggregator { - - private static final Logger logger = LoggerFactory.getLogger(ClusterAggregator.class); - private MetaGroupMember metaGroupMember; - - public ClusterAggregator(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - /** - * Perform "aggregations" over "path" in some data groups and merge the results. The groups to be - * queried is determined by "timeFilter". - * - * @param timeFilter nullable, when null, all groups will be queried - * @param ascending - */ - public List getAggregateResult( - PartialPath path, - Set deviceMeasurements, - List aggregations, - TSDataType dataType, - Filter timeFilter, - QueryContext context, - boolean ascending) - throws StorageEngineException { - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - // find groups to be queried using timeFilter and path - List partitionGroups; - try { - partitionGroups = metaGroupMember.routeFilter(timeFilter, path); - } catch (EmptyIntervalException e) { - logger.info(e.getMessage()); - partitionGroups = Collections.emptyList(); - } - logger.debug( - "{}: Sending aggregation query of {} to {} groups", - metaGroupMember.getName(), - path, - partitionGroups.size()); - List results = null; - // get the aggregation result of each group and merge them - for (PartitionGroup partitionGroup : partitionGroups) { - List groupResult = - getAggregateResult( - path, - deviceMeasurements, - aggregations, - dataType, - timeFilter, - partitionGroup, - context, - ascending); - if (results == null) { - // the first results - results = groupResult; - } else { - for (int i = 0; i < results.size(); i++) { - results.get(i).merge(groupResult.get(i)); - } - } - } - return results; - } - - /** - * Perform "aggregations" over "path" in "partitionGroup". If the local node is the member of the - * group, do it locally, otherwise pull the results from a remote node. - * - * @param timeFilter nullable - */ - private List getAggregateResult( - PartialPath path, - Set deviceMeasurements, - List aggregations, - TSDataType dataType, - Filter timeFilter, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending) - throws StorageEngineException { - if (!partitionGroup.contains(metaGroupMember.getThisNode())) { - return getRemoteAggregateResult( - path, - deviceMeasurements, - aggregations, - dataType, - timeFilter, - partitionGroup, - context, - ascending); - } else { - // perform the aggregations locally - DataGroupMember dataMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), partitionGroup.getRaftId()); - LocalQueryExecutor localQueryExecutor = new LocalQueryExecutor(dataMember); - try { - logger.debug( - "{}: querying aggregation {} of {} in {} locally", - metaGroupMember.getName(), - aggregations, - path, - partitionGroup.getHeader()); - List aggrResult = - localQueryExecutor.getAggrResult( - aggregations, deviceMeasurements, dataType, path, timeFilter, context, ascending); - logger.debug( - "{}: queried aggregation {} of {} in {} locally are {}", - metaGroupMember.getName(), - aggregations, - path, - partitionGroup.getHeader(), - aggrResult); - return aggrResult; - } catch (IOException | QueryProcessException e) { - throw new StorageEngineException(e); - } - } - } - - /** - * Perform "aggregations" over "path" in a remote data group "partitionGroup". Query one node in - * the group to get the results. - * - * @param timeFilter nullable - */ - private List getRemoteAggregateResult( - Path path, - Set deviceMeasurements, - List aggregations, - TSDataType dataType, - Filter timeFilter, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending) - throws StorageEngineException { - - GetAggrResultRequest request = new GetAggrResultRequest(); - request.setPath(path.getFullPath()); - request.setAggregations(aggregations); - request.setDataTypeOrdinal(dataType.ordinal()); - request.setQueryId(context.getQueryId()); - request.setRequestor(metaGroupMember.getThisNode()); - request.setHeader(partitionGroup.getHeader()); - request.setDeviceMeasurements(deviceMeasurements); - request.setAscending(ascending); - if (timeFilter != null) { - request.setTimeFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - - // put nodes with lowest delay at first - List reorderedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - for (Node node : reorderedNodes) { - logger.debug( - "{}: querying aggregation {} of {} from {} of {}", - metaGroupMember.getName(), - aggregations, - path, - node, - partitionGroup.getHeader()); - - try { - List resultBuffers = getRemoteAggregateResult(node, request); - if (resultBuffers != null) { - List results = new ArrayList<>(resultBuffers.size()); - for (ByteBuffer resultBuffer : resultBuffers) { - AggregateResult result = AggregateResult.deserializeFrom(resultBuffer); - results.add(result); - } - // register the queried node to release resources when the query ends - ((RemoteQueryContext) context).registerRemoteNode(node, partitionGroup.getHeader()); - logger.debug( - "{}: queried aggregation {} of {} from {} of {} are {}", - metaGroupMember.getName(), - aggregations, - path, - node, - partitionGroup.getHeader(), - results); - return results; - } - } catch (TApplicationException e) { - logger.error( - metaGroupMember.getName() + " query aggregation error " + path + " from " + node, e); - throw new StorageEngineException(e.getMessage()); - } catch (TException | IOException e) { - logger.error( - metaGroupMember.getName() + " cannot query aggregation " + path + " from " + node, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error(metaGroupMember.getName() + " query interrupted " + path + " from " + node, e); - } - } - throw new StorageEngineException( - new RequestTimeOutException("Query aggregate: " + path + " in " + partitionGroup)); - } - - private List getRemoteAggregateResult(Node node, GetAggrResultRequest request) - throws IOException, TException, InterruptedException { - List resultBuffers = null; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - // each buffer is an AggregationResult - resultBuffers = SyncClientAdaptor.getAggrResult(client, request); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - resultBuffers = syncDataClient.getAggrResult(request); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return resultBuffers; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java deleted file mode 100644 index c0fe38d6f8c2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutor.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.fill; - -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.executor.FillQueryExecutor; -import org.apache.iotdb.db.query.executor.fill.IFill; -import org.apache.iotdb.db.query.executor.fill.LinearFill; -import org.apache.iotdb.db.query.executor.fill.PreviousFill; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.utils.TsPrimitiveType; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -public class ClusterFillExecutor extends FillQueryExecutor { - - private MetaGroupMember metaGroupMember; - private ClusterReaderFactory clusterReaderFactory; - - public ClusterFillExecutor(FillQueryPlan plan, MetaGroupMember metaGroupMember) { - super(plan); - this.metaGroupMember = metaGroupMember; - this.clusterReaderFactory = new ClusterReaderFactory(metaGroupMember); - } - - @Override - protected IFill configureFill( - IFill fill, - PartialPath path, - TSDataType dataType, - long queryTime, - Set deviceMeasurements, - QueryContext context) - throws QueryProcessException, StorageEngineException { - if (fill instanceof LinearFill) { - IFill clusterFill = new ClusterLinearFill((LinearFill) fill, metaGroupMember); - clusterFill.configureFill(path, dataType, queryTime, deviceMeasurements, context); - return clusterFill; - } else if (fill instanceof PreviousFill) { - IFill clusterFill = new ClusterPreviousFill((PreviousFill) fill, metaGroupMember); - clusterFill.configureFill(path, dataType, queryTime, deviceMeasurements, context); - return clusterFill; - } else { - fill.configureFill(path, dataType, queryTime, deviceMeasurements, context); - return fill; - } - } - - @Override - protected List getTimeValuePairs(QueryContext context) - throws QueryProcessException, StorageEngineException, IOException { - List ret = new ArrayList<>(selectedSeries.size()); - - for (int i = 0; i < selectedSeries.size(); i++) { - PartialPath path = selectedSeries.get(i); - TSDataType dataType = dataTypes.get(i); - IReaderByTimestamp reader = - clusterReaderFactory.getReaderByTimestamp( - path, - plan.getAllMeasurementsInDevice(path.getDevice()), - dataTypes.get(i), - context, - plan.isAscending(), - null); - - Object[] results = reader.getValuesInTimestamps(new long[] {queryTime}, 1); - if (results != null && results[0] != null) { - ret.add(new TimeValuePair(queryTime, TsPrimitiveType.getByType(dataType, results[0]))); - } else { - ret.add(null); - } - } - - return ret; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java deleted file mode 100644 index 4a2cd737a4ae..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterLinearFill.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.fill; - -import org.apache.iotdb.cluster.query.aggregate.ClusterAggregator; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.executor.fill.LinearFill; -import org.apache.iotdb.tsfile.read.TimeValuePair; - -import java.util.Arrays; -import java.util.List; - -/** - * ClusterLinearFill overrides the dataReader in LinearFill so that it can read data from the whole - * cluster instead of only the local node. - */ -public class ClusterLinearFill extends LinearFill { - - private MetaGroupMember metaGroupMember; - private ClusterAggregator aggregator; - private static final List AGGREGATION_NAMES = - Arrays.asList(SQLConstant.MIN_TIME, SQLConstant.FIRST_VALUE); - - ClusterLinearFill(LinearFill fill, MetaGroupMember metaGroupMember) { - super( - fill.getDataType(), fill.getQueryStartTime(), fill.getBeforeRange(), fill.getAfterRange()); - this.metaGroupMember = metaGroupMember; - this.aggregator = new ClusterAggregator(metaGroupMember); - } - - @Override - protected TimeValuePair calculatePrecedingPoint() - throws QueryProcessException, StorageEngineException { - // calculate the preceding point can be viewed as a previous fill - ClusterPreviousFill clusterPreviousFill = - new ClusterPreviousFill(dataType, queryStartTime, beforeRange, metaGroupMember); - clusterPreviousFill.configureFill( - seriesPath, dataType, queryStartTime, deviceMeasurements, context); - return clusterPreviousFill.getFillResult(); - } - - @Override - protected TimeValuePair calculateSucceedingPoint() throws StorageEngineException { - - List aggregateResult = - aggregator.getAggregateResult( - seriesPath, - deviceMeasurements, - AGGREGATION_NAMES, - dataType, - afterFilter, - context, - true); - AggregateResult minTimeResult = aggregateResult.get(0); - AggregateResult firstValueResult = aggregateResult.get(1); - - return convertToResult(minTimeResult, firstValueResult); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java deleted file mode 100644 index b98b0297c7f7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/ClusterPreviousFill.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.fill; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.QueryTimeOutException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.server.handlers.caller.PreviousFillHandler; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.executor.fill.PreviousFill; -import org.apache.iotdb.db.utils.TimeValuePairUtils.Intervals; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -public class ClusterPreviousFill extends PreviousFill { - - private static final Logger logger = LoggerFactory.getLogger(ClusterPreviousFill.class); - private MetaGroupMember metaGroupMember; - private TimeValuePair fillResult; - private static final String PREVIOUS_FILL_EXCEPTION_LOGGER_FORMAT = - "{}: Cannot perform previous fill of {} to {}"; - - ClusterPreviousFill(PreviousFill fill, MetaGroupMember metaGroupMember) { - super(fill.getDataType(), fill.getQueryStartTime(), fill.getBeforeRange()); - this.metaGroupMember = metaGroupMember; - } - - ClusterPreviousFill( - TSDataType dataType, long queryTime, long beforeRange, MetaGroupMember metaGroupMember) { - super(dataType, queryTime, beforeRange); - this.metaGroupMember = metaGroupMember; - } - - @Override - public void configureFill( - PartialPath path, - TSDataType dataType, - long queryTime, - Set deviceMeasurements, - QueryContext context) - throws QueryProcessException, StorageEngineException { - fillResult = - performPreviousFill( - path, dataType, queryTime, getBeforeRange(), deviceMeasurements, context); - } - - @Override - public TimeValuePair getFillResult() { - return fillResult; - } - - private TimeValuePair performPreviousFill( - PartialPath path, - TSDataType dataType, - long queryTime, - long beforeRange, - Set deviceMeasurements, - QueryContext context) - throws StorageEngineException, QueryProcessException { - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - // find the groups that should be queried using the time range - Intervals intervals = new Intervals(); - long lowerBound = beforeRange == -1 ? Long.MIN_VALUE : queryTime - beforeRange; - intervals.addInterval(lowerBound, queryTime); - List partitionGroups = metaGroupMember.routeIntervals(intervals, path); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Sending data query of {} to {} groups", - metaGroupMember.getName(), - path, - partitionGroups.size()); - } - CountDownLatch latch = new CountDownLatch(partitionGroups.size()); - PreviousFillHandler handler = new PreviousFillHandler(latch); - // TODO: create a thread pool for each query calling. - ExecutorService fillService = Executors.newFixedThreadPool(partitionGroups.size()); - PreviousFillArguments arguments = - new PreviousFillArguments(path, dataType, queryTime, beforeRange, deviceMeasurements); - - for (PartitionGroup partitionGroup : partitionGroups) { - fillService.submit(() -> performPreviousFill(arguments, context, partitionGroup, handler)); - } - fillService.shutdown(); - try { - boolean terminated = - fillService.awaitTermination( - ClusterConstant.getReadOperationTimeoutMS(), TimeUnit.MILLISECONDS); - if (!terminated) { - logger.warn("Executor service termination timed out"); - } - } catch (InterruptedException e) { - throw new QueryProcessException(e.getMessage()); - } - return handler.getResult(); - } - - private void performPreviousFill( - PreviousFillArguments arguments, - QueryContext context, - PartitionGroup group, - PreviousFillHandler fillHandler) { - if (group.contains(metaGroupMember.getThisNode())) { - localPreviousFill(arguments, context, group, fillHandler); - } else { - remotePreviousFill(arguments, context, group, fillHandler); - } - } - - private void localPreviousFill( - PreviousFillArguments arguments, - QueryContext context, - PartitionGroup group, - PreviousFillHandler fillHandler) { - DataGroupMember localDataMember = - metaGroupMember.getLocalDataMember(group.getHeader(), group.getRaftId()); - try { - fillHandler.onComplete( - localDataMember - .getLocalQueryExecutor() - .localPreviousFill( - arguments.getPath(), - arguments.getDataType(), - arguments.getQueryTime(), - arguments.getBeforeRange(), - arguments.getDeviceMeasurements(), - context)); - } catch (QueryProcessException | StorageEngineException | IOException e) { - fillHandler.onError(e); - } - } - - private void remotePreviousFill( - PreviousFillArguments arguments, - QueryContext context, - PartitionGroup group, - PreviousFillHandler fillHandler) { - PreviousFillRequest request = - new PreviousFillRequest( - arguments.getPath().getFullPath(), - arguments.getQueryTime(), - arguments.getBeforeRange(), - context.getQueryId(), - metaGroupMember.getThisNode(), - group.getHeader(), - arguments.getDataType().ordinal(), - arguments.getDeviceMeasurements()); - - for (Node node : group) { - ByteBuffer byteBuffer; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - byteBuffer = remoteAsyncPreviousFill(node, request, arguments); - } else { - byteBuffer = remoteSyncPreviousFill(node, request, arguments); - } - - if (byteBuffer != null) { - fillHandler.onComplete(byteBuffer); - return; - } - } - - fillHandler.onError( - new QueryTimeOutException( - String.format( - "PreviousFill %s@%d range: %d", - arguments.getPath().getFullPath(), - arguments.getQueryTime(), - arguments.getBeforeRange()))); - } - - private ByteBuffer remoteAsyncPreviousFill( - Node node, PreviousFillRequest request, PreviousFillArguments arguments) { - ByteBuffer byteBuffer = null; - try { - AsyncDataClient asyncDataClient = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - byteBuffer = SyncClientAdaptor.previousFill(asyncDataClient, request); - } catch (IOException e) { - logger.warn("{}: Cannot connect to {} during previous fill", metaGroupMember, node); - } catch (Exception e) { - logger.error( - PREVIOUS_FILL_EXCEPTION_LOGGER_FORMAT, metaGroupMember, arguments.getPath(), node, e); - } - return byteBuffer; - } - - private ByteBuffer remoteSyncPreviousFill( - Node node, PreviousFillRequest request, PreviousFillArguments arguments) { - ByteBuffer byteBuffer = null; - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - byteBuffer = syncDataClient.previousFill(request); - - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - logger.error( - PREVIOUS_FILL_EXCEPTION_LOGGER_FORMAT, - metaGroupMember.getName(), - arguments.getPath(), - node, - e); - } catch (Exception e) { - logger.error( - PREVIOUS_FILL_EXCEPTION_LOGGER_FORMAT, - metaGroupMember.getName(), - arguments.getPath(), - node, - e); - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - return byteBuffer; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/PreviousFillArguments.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/PreviousFillArguments.java deleted file mode 100644 index b28df07f5d0d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/fill/PreviousFillArguments.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.fill; - -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; - -import java.util.Set; - -/** - * PreviousFillParameter records necessary parameters of a previous fill over a single timeseries, - * avoiding the corresponding method call having too many arguments and increasing flexibility. - */ -public class PreviousFillArguments { - private PartialPath path; - private TSDataType dataType; - private long queryTime; - private long beforeRange; - private Set deviceMeasurements; - - public PreviousFillArguments( - PartialPath path, - TSDataType dataType, - long queryTime, - long beforeRange, - Set deviceMeasurements) { - this.path = path; - this.dataType = dataType; - this.queryTime = queryTime; - this.beforeRange = beforeRange; - this.deviceMeasurements = deviceMeasurements; - } - - public PartialPath getPath() { - return path; - } - - public TSDataType getDataType() { - return dataType; - } - - public long getQueryTime() { - return queryTime; - } - - public long getBeforeRange() { - return beforeRange; - } - - public Set getDeviceMeasurements() { - return deviceMeasurements; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotSgFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotSgFilter.java deleted file mode 100644 index 4e8f67d81e8d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotSgFilter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.filter; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.db.metadata.LocalSchemaProcessor.StorageGroupFilter; - -import java.util.List; - -public class SlotSgFilter implements StorageGroupFilter { - - private List slots; - - public SlotSgFilter(List slots) { - this.slots = slots; - } - - @Override - public boolean satisfy(String storageGroup) { - return satisfy(storageGroup, slots); - } - - private static boolean satisfy(String storageGroup, List nodeSlots) { - int slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum(storageGroup, 0, ClusterConstant.SLOT_NUM); - return nodeSlots.contains(slot); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java deleted file mode 100644 index b71e32160a88..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/filter/SlotTsFileFilter.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.filter; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.query.filter.TsFileFilter; -import org.apache.iotdb.tsfile.utils.FilePathUtils; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class SlotTsFileFilter implements TsFileFilter { - - private static final Logger logger = LoggerFactory.getLogger(SlotTsFileFilter.class); - private Set slots; - - public SlotTsFileFilter(Set slots) { - this.slots = slots; - } - - public SlotTsFileFilter(List slots) { - this.slots = new HashSet<>(slots); - } - - @Override - public boolean fileNotSatisfy(TsFileResource resource) { - return fileNotInSlots(resource, slots); - } - - private static boolean fileNotInSlots(TsFileResource resource, Set nodeSlots) { - Pair sgNameAndPartitionIdPair = - FilePathUtils.getLogicalSgNameAndTimePartitionIdPair( - resource.getTsFile().getAbsolutePath()); - int slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum( - sgNameAndPartitionIdPair.left, - sgNameAndPartitionIdPair.right, - ClusterConstant.SLOT_NUM); - boolean contained = nodeSlots.contains(slot); - logger.debug( - "The slot of {} is {}, contained: {}", resource.getTsFile().getPath(), slot, contained); - return !contained; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSet.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSet.java deleted file mode 100644 index e356cb9d5df2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSet.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.groupby.GroupByWithoutValueFilterDataSet; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.db.query.filter.TsFileFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import java.util.ArrayList; -import java.util.Set; - -@SuppressWarnings("common-java:DuplicatedBlocks") -public class ClusterGroupByNoVFilterDataSet extends GroupByWithoutValueFilterDataSet { - - private MetaGroupMember metaGroupMember; - - public ClusterGroupByNoVFilterDataSet( - QueryContext context, GroupByTimePlan groupByPlan, MetaGroupMember metaGroupMember) { - initQueryDataSetFields( - new ArrayList<>(groupByPlan.getDeduplicatedPaths()), - groupByPlan.getDeduplicatedDataTypes(), - groupByPlan.isAscending()); - initGroupByTimeDataSetFields(context, groupByPlan); - - this.metaGroupMember = metaGroupMember; - } - - @Override - protected GroupByExecutor getGroupByExecutor( - PartialPath path, - Set deviceMeasurements, - QueryContext context, - Filter timeFilter, - TsFileFilter fileFilter, - boolean ascending) { - return new MergeGroupByExecutor( - path, deviceMeasurements, context, timeFilter, metaGroupMember, ascending); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSet.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSet.java deleted file mode 100644 index f3db01153de4..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSet.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.query.reader.ClusterTimeGenerator; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.dataset.groupby.GroupByWithValueFilterDataSet; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.tsfile.read.query.timegenerator.TimeGenerator; - -import java.util.ArrayList; - -@SuppressWarnings("common-java:DuplicatedBlocks") -public class ClusterGroupByVFilterDataSet extends GroupByWithValueFilterDataSet { - - private MetaGroupMember metaGroupMember; - private ClusterReaderFactory readerFactory; - - public ClusterGroupByVFilterDataSet( - QueryContext context, GroupByTimePlan groupByPlan, MetaGroupMember metaGroupMember) { - initQueryDataSetFields( - new ArrayList<>(groupByPlan.getDeduplicatedPaths()), - groupByPlan.getDeduplicatedDataTypes(), - groupByPlan.isAscending()); - initGroupByTimeDataSetFields(context, groupByPlan); - - this.timeStampFetchSize = IoTDBDescriptor.getInstance().getConfig().getBatchSize(); - this.metaGroupMember = metaGroupMember; - this.readerFactory = new ClusterReaderFactory(metaGroupMember); - } - - @Override - protected TimeGenerator getTimeGenerator(QueryContext context, RawDataQueryPlan rawDataQueryPlan) - throws StorageEngineException { - return new ClusterTimeGenerator(context, metaGroupMember, rawDataQueryPlan, false); - } - - @Override - protected IReaderByTimestamp getReaderByTime( - PartialPath path, RawDataQueryPlan dataQueryPlan, QueryContext context) - throws StorageEngineException, QueryProcessException { - return readerFactory.getReaderByTimestamp( - path, - dataQueryPlan.getAllMeasurementsInDevice(path.getDevice()), - path.getSeriesType(), - context, - dataQueryPlan.isAscending(), - null); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutor.java deleted file mode 100644 index 3a2bec08525e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutor.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -public class MergeGroupByExecutor implements GroupByExecutor { - - private static final Logger logger = LoggerFactory.getLogger(MergeGroupByExecutor.class); - - private List results = new ArrayList<>(); - private List aggregationTypes = new ArrayList<>(); - private PartialPath path; - private Set deviceMeasurements; - private TSDataType dataType; - private QueryContext context; - private Filter timeFilter; - private ClusterReaderFactory readerFactory; - private boolean ascending; - - private List groupByExecutors; - - MergeGroupByExecutor( - PartialPath path, - Set deviceMeasurements, - QueryContext context, - Filter timeFilter, - MetaGroupMember metaGroupMember, - boolean ascending) { - this.path = path; - this.deviceMeasurements = deviceMeasurements; - this.dataType = path.getSeriesType(); - this.context = context; - this.timeFilter = timeFilter; - this.readerFactory = new ClusterReaderFactory(metaGroupMember); - this.ascending = ascending; - } - - @Override - public void addAggregateResult(AggregateResult aggrResult) { - results.add(aggrResult); - aggregationTypes.add(aggrResult.getAggregationType().ordinal()); - } - - private void resetAggregateResults() { - for (AggregateResult result : results) { - result.reset(); - } - } - - @Override - public List calcResult(long curStartTime, long curEndTime) - throws QueryProcessException, IOException { - if (groupByExecutors == null) { - initExecutors(); - } - resetAggregateResults(); - for (GroupByExecutor groupByExecutor : groupByExecutors) { - List subResults = groupByExecutor.calcResult(curStartTime, curEndTime); - for (int i = 0; i < subResults.size(); i++) { - results.get(i).merge(subResults.get(i)); - } - } - logger.debug( - "Aggregation result of {}@[{}, {}] is {}", path, curStartTime, curEndTime, results); - return results; - } - - @Override - public Pair peekNextNotNullValue(long nextStartTime, long nextEndTime) - throws IOException { - if (groupByExecutors == null) { - try { - initExecutors(); - } catch (QueryProcessException e) { - throw new IOException(e); - } - } - - Pair result = null; - for (GroupByExecutor groupByExecutor : groupByExecutors) { - Pair pair = groupByExecutor.peekNextNotNullValue(nextStartTime, nextEndTime); - if (pair == null) { - continue; - } - if (result == null || result.left > pair.left) { - result = pair; - } - } - logger.debug( - "peekNextNotNullValue result of {}@[{}, {}] is {}", - path, - nextStartTime, - nextEndTime, - results); - return result; - } - - private void initExecutors() throws QueryProcessException { - try { - groupByExecutors = - readerFactory.getGroupByExecutors( - path, deviceMeasurements, dataType, context, timeFilter, aggregationTypes, ascending); - } catch (StorageEngineException e) { - throw new QueryProcessException(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java deleted file mode 100644 index 57cfc0a5c6aa..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutor.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -public class RemoteGroupByExecutor implements GroupByExecutor { - - private static final Logger logger = LoggerFactory.getLogger(RemoteGroupByExecutor.class); - - private final long executorId; - private final Node source; - private final RaftNode header; - - private final List results = new ArrayList<>(); - - public RemoteGroupByExecutor(long executorId, Node source, RaftNode header) { - this.executorId = executorId; - this.source = source; - this.header = header; - } - - @Override - public void addAggregateResult(AggregateResult aggrResult) { - results.add(aggrResult); - } - - private void resetAggregateResults() { - for (AggregateResult result : results) { - result.reset(); - } - } - - @Override - public List calcResult(long curStartTime, long curEndTime) throws IOException { - - List aggrBuffers; - try { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(source, ClusterConstant.getReadOperationTimeoutMS()); - aggrBuffers = - SyncClientAdaptor.getGroupByResult( - client, header, executorId, curStartTime, curEndTime); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(source, ClusterConstant.getReadOperationTimeoutMS()); - aggrBuffers = - syncDataClient.getGroupByResult(header, executorId, curStartTime, curEndTime); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException(e); - } catch (Exception e) { - throw new IOException(e); - } - resetAggregateResults(); - if (aggrBuffers != null) { - for (int i = 0; i < aggrBuffers.size(); i++) { - AggregateResult result = AggregateResult.deserializeFrom(aggrBuffers.get(i)); - results.get(i).merge(result); - } - } - logger.debug( - "Fetched group by result from {} of [{}, {}]: {}", - source, - curStartTime, - curEndTime, - results); - return results; - } - - @Override - public Pair peekNextNotNullValue(long nextStartTime, long nextEndTime) - throws IOException { - ByteBuffer aggrBuffer; - try { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(source, ClusterConstant.getReadOperationTimeoutMS()); - aggrBuffer = - SyncClientAdaptor.peekNextNotNullValue( - client, header, executorId, nextStartTime, nextEndTime); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(source, ClusterConstant.getReadOperationTimeoutMS()); - aggrBuffer = - syncDataClient.peekNextNotNullValue(header, executorId, nextStartTime, nextEndTime); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - } catch (TException e) { - throw new IOException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException(e); - } - - Pair result = null; - if (aggrBuffer != null) { - long time = aggrBuffer.getLong(); - Object o = SerializeUtils.deserializeObject(aggrBuffer); - if (o != null) { - result = new Pair<>(time, o); - } - } - logger.debug( - "Fetched peekNextNotNullValue from {} of [{}, {}]: {}", - source, - nextStartTime, - nextEndTime, - result); - return result; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java deleted file mode 100644 index d116987e04e0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutor.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.last; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.LastQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.executor.LastQueryExecutor; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -public class ClusterLastQueryExecutor extends LastQueryExecutor { - - private static final Logger logger = LoggerFactory.getLogger(ClusterLastQueryExecutor.class); - private MetaGroupMember metaGroupMember; - - private static ExecutorService lastQueryPool = - IoTDBThreadPoolFactory.newFixedThreadPool( - Runtime.getRuntime().availableProcessors(), "ClusterLastQuery"); - - public ClusterLastQueryExecutor(LastQueryPlan lastQueryPlan, MetaGroupMember metaGroupMember) { - super(lastQueryPlan); - this.metaGroupMember = metaGroupMember; - } - - @Override - protected List calculateLastPairForSeries( - List seriesPaths, - List dataTypes, - QueryContext context, - IExpression expression, - RawDataQueryPlan lastQueryPlan) - throws QueryProcessException, IOException { - return calculateLastPairsForSeries(seriesPaths, dataTypes, context, expression, lastQueryPlan); - } - - private List calculateLastPairsForSeries( - List seriesPaths, - List dataTypes, - QueryContext context, - IExpression expression, - RawDataQueryPlan lastQueryPlan) - throws IOException, QueryProcessException { - // calculate the global last from all data groups - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new IOException(e); - } - List results = new ArrayList<>(seriesPaths.size()); - for (int i = 0; i < seriesPaths.size(); i++) { - results.add(null); - } - List globalGroups = metaGroupMember.getPartitionTable().getGlobalGroups(); - List>> groupFutures = new ArrayList<>(globalGroups.size()); - List dataTypeOrdinals = new ArrayList<>(dataTypes.size()); - for (TSDataType dataType : dataTypes) { - dataTypeOrdinals.add(dataType.ordinal()); - } - for (PartitionGroup globalGroup : globalGroups) { - GroupLastTask task = - new GroupLastTask( - globalGroup, - seriesPaths, - dataTypes, - context, - expression, - lastQueryPlan, - dataTypeOrdinals); - groupFutures.add(lastQueryPool.submit(task)); - } - for (Future> groupFuture : groupFutures) { - try { - // merge results from each group - List timeValuePairs = groupFuture.get(); - for (int i = 0; i < timeValuePairs.size(); i++) { - TimeValuePair currentResult = results.get(i); - TimeValuePair newResult = timeValuePairs.get(i); - if (currentResult == null - || newResult != null && newResult.getTimestamp() > currentResult.getTimestamp()) { - results.add(i, newResult); - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Query last of {} interrupted", seriesPaths); - } catch (ExecutionException e) { - throw new QueryProcessException(e, TSStatusCode.QUERY_PROCESS_ERROR.getStatusCode()); - } - } - return results; - } - - class GroupLastTask implements Callable> { - - private PartitionGroup group; - private List seriesPaths; - private List dataTypes; - private List dataTypeOrdinals; - private QueryContext queryContext; - private RawDataQueryPlan queryPlan; - private IExpression expression; - - GroupLastTask( - PartitionGroup group, - List seriesPaths, - List dataTypes, - QueryContext context, - IExpression expression, - RawDataQueryPlan lastQueryPlan, - List dataTypeOrdinals) { - this.group = group; - this.seriesPaths = seriesPaths; - this.dataTypes = dataTypes; - this.queryContext = context; - this.queryPlan = lastQueryPlan; - this.expression = expression; - this.dataTypeOrdinals = dataTypeOrdinals; - } - - @Override - public List call() throws Exception { - return calculateSeriesLast(group, seriesPaths, queryContext); - } - - private List calculateSeriesLast( - PartitionGroup group, List seriesPaths, QueryContext context) - throws QueryProcessException, StorageEngineException, IOException { - if (group.contains(metaGroupMember.getThisNode())) { - return calculateSeriesLastLocally(group, seriesPaths, context); - } else { - return calculateSeriesLastRemotely(group, seriesPaths, context); - } - } - - private List calculateSeriesLastLocally( - PartitionGroup group, List seriesPaths, QueryContext context) - throws StorageEngineException, QueryProcessException, IOException { - DataGroupMember localDataMember = - metaGroupMember.getLocalDataMember(group.getHeader(), group.getRaftId()); - try { - localDataMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - return calculateLastPairForSeriesLocally( - seriesPaths, dataTypes, context, expression, queryPlan.getDeviceToMeasurements()); - } - - private List calculateSeriesLastRemotely( - PartitionGroup group, List seriesPaths, QueryContext context) { - for (Node node : group) { - try { - ByteBuffer buffer; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - buffer = lastAsync(node, context); - } else { - buffer = lastSync(node, context); - } - if (buffer == null) { - continue; - } - - List timeValuePairs = new ArrayList<>(); - for (int i = 0; i < seriesPaths.size(); i++) { - timeValuePairs.add(SerializeUtils.deserializeTVPair(buffer)); - } - List results = new ArrayList<>(); - for (int i = 0; i < seriesPaths.size(); i++) { - TimeValuePair pair = timeValuePairs.get(i); - results.add(pair); - } - return results; - } catch (IOException | TException e) { - logger.warn("Query last of {} from {} errored", group, seriesPaths, e); - return Collections.emptyList(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Query last of {} from {} interrupted", group, seriesPaths, e); - return Collections.emptyList(); - } - } - return Collections.emptyList(); - } - - private ByteBuffer lastAsync(Node node, QueryContext context) - throws IOException, TException, InterruptedException { - ByteBuffer buffer; - AsyncDataClient asyncDataClient = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - if (asyncDataClient == null) { - return null; - } - Filter timeFilter = - (expression == null) ? null : ((GlobalTimeExpression) expression).getFilter(); - buffer = - SyncClientAdaptor.last( - asyncDataClient, - seriesPaths, - dataTypeOrdinals, - timeFilter, - context, - queryPlan.getDeviceToMeasurements(), - group.getHeader()); - return buffer; - } - - private ByteBuffer lastSync(Node node, QueryContext context) throws IOException, TException { - ByteBuffer res; - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - LastQueryRequest lastQueryRequest = - new LastQueryRequest( - PartialPath.toStringList(seriesPaths), - dataTypeOrdinals, - context.getQueryId(), - queryPlan.getDeviceToMeasurements(), - group.getHeader(), - syncDataClient.getNode()); - Filter timeFilter = - (expression == null) ? null : ((GlobalTimeExpression) expression).getFilter(); - if (timeFilter != null) { - lastQueryRequest.setFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - res = syncDataClient.last(lastQueryRequest); - } catch (IOException | TException e) { - // the connection may be broken, close it to avoid it being reused - if (syncDataClient != null) { - syncDataClient.close(); - } - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - return res; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java deleted file mode 100644 index 7d9edabaf9a7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManager.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.manage; - -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.db.query.reader.series.IAggregateReader; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -public class ClusterQueryManager { - - private AtomicLong idAtom = new AtomicLong(); - private Map> queryContextMap = new ConcurrentHashMap<>(); - private Map seriesReaderMap = new ConcurrentHashMap<>(); - private Map seriesReaderByTimestampMap = new ConcurrentHashMap<>(); - private Map aggrReaderMap = new ConcurrentHashMap<>(); - private Map groupByExecutorMap = new ConcurrentHashMap<>(); - - public synchronized RemoteQueryContext getQueryContext(Node node, long queryId) { - Map nodeContextMap = - queryContextMap.computeIfAbsent(node, n -> new HashMap<>()); - return nodeContextMap.computeIfAbsent( - queryId, - qId -> new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true))); - } - - public long registerReader(IBatchReader reader) { - long newReaderId = idAtom.incrementAndGet(); - seriesReaderMap.put(newReaderId, reader); - return newReaderId; - } - - public long registerReaderByTime(IReaderByTimestamp readerByTimestamp) { - long newReaderId = idAtom.incrementAndGet(); - seriesReaderByTimestampMap.put(newReaderId, readerByTimestamp); - return newReaderId; - } - - public synchronized void endQuery(Node node, long queryId) throws StorageEngineException { - Map nodeContextMap = queryContextMap.get(node); - if (nodeContextMap == null) { - return; - } - RemoteQueryContext remoteQueryContext = nodeContextMap.remove(queryId); - if (remoteQueryContext == null) { - return; - } - // release file resources - QueryResourceManager.getInstance().endQuery(remoteQueryContext.getQueryId()); - - // remove the readers from the cache - Set readerIds = remoteQueryContext.getLocalReaderIds(); - for (long readerId : readerIds) { - seriesReaderMap.remove(readerId); - seriesReaderByTimestampMap.remove(readerId); - } - - Set localGroupByExecutorIds = remoteQueryContext.getLocalGroupByExecutorIds(); - for (Long localGroupByExecutorId : localGroupByExecutorIds) { - groupByExecutorMap.remove(localGroupByExecutorId); - } - } - - public IBatchReader getReader(long readerId) { - return seriesReaderMap.get(readerId); - } - - public IReaderByTimestamp getReaderByTimestamp(long readerId) { - return seriesReaderByTimestampMap.get(readerId); - } - - IAggregateReader getAggrReader(long readerId) { - return aggrReaderMap.get(readerId); - } - - public void endAllQueries() throws StorageEngineException { - for (Map contextMap : queryContextMap.values()) { - for (RemoteQueryContext context : contextMap.values()) { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - seriesReaderByTimestampMap.clear(); - seriesReaderMap.clear(); - aggrReaderMap.clear(); - } - - long registerAggrReader(IAggregateReader aggregateReader) { - long newReaderId = idAtom.incrementAndGet(); - aggrReaderMap.put(newReaderId, aggregateReader); - return newReaderId; - } - - public long registerGroupByExecutor(GroupByExecutor groupByExecutor) { - long newExecutorId = idAtom.incrementAndGet(); - groupByExecutorMap.put(newExecutorId, groupByExecutor); - return newExecutorId; - } - - public GroupByExecutor getGroupByExecutor(long executorId) { - return groupByExecutorMap.get(executorId); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterSessionManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterSessionManager.java deleted file mode 100644 index dd4e27bb998c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/ClusterSessionManager.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.manage; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.SessionManager; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicReference; - -public class ClusterSessionManager extends SessionManager { - - protected ClusterSessionManager() { - // singleton - } - - private static final Logger logger = LoggerFactory.getLogger(ClusterSessionManager.class); - - /** - * The Coordinator of the local node. Through this node ClientServer queries data and meta from - * the cluster and performs data manipulations to the cluster. - */ - private Coordinator coordinator; - - public void setCoordinator(Coordinator coordinator) { - this.coordinator = coordinator; - } - - /** - * queryId -> queryContext map. When a query ends either normally or accidentally, the resources - * used by the query can be found in the context and then released. - */ - private Map queryContextMap = new ConcurrentHashMap<>(); - - public void putContext(long queryId, RemoteQueryContext context) { - queryContextMap.put(queryId, context); - } - - @Override - public void releaseQueryResource(long queryId) throws StorageEngineException { - super.releaseQueryResource(queryId); - this.releaseRemoteQueryResource(queryId); - } - - /** Release remote resources used by a query. */ - public void releaseRemoteQueryResource(long queryId) { - // release resources remotely - RemoteQueryContext context = queryContextMap.remove(queryId); - if (context != null) { - // release the resources in every queried node - for (Entry> headerEntry : context.getQueriedNodesMap().entrySet()) { - RaftNode header = headerEntry.getKey(); - Set queriedNodes = headerEntry.getValue(); - for (Node queriedNode : queriedNodes) { - releaseQueryResourceForOneNode(queryId, header, queriedNode); - } - } - } - } - - protected void releaseQueryResourceForOneNode(long queryId, RaftNode header, Node queriedNode) { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - GenericHandler handler = new GenericHandler<>(queriedNode, new AtomicReference<>()); - try { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(queriedNode, ClusterConstant.getReadOperationTimeoutMS()); - client.endQuery(header, coordinator.getThisNode(), queryId, handler); - } catch (IOException | TException e) { - logger.error("Cannot end query {} in {}", queryId, queriedNode); - } - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(queriedNode, ClusterConstant.getReadOperationTimeoutMS()); - syncDataClient.endQuery(header, coordinator.getThisNode(), queryId); - } catch (IOException | TException e) { - // the connection may be broken, close it to avoid it being reused - if (syncDataClient != null) { - syncDataClient.close(); - } - logger.error("Cannot end query {} in {}", queryId, queriedNode); - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - } - - public static ClusterSessionManager getInstance() { - return ClusterSessionManagerHolder.INSTANCE; - } - - private static class ClusterSessionManagerHolder { - - private ClusterSessionManagerHolder() {} - - private static final ClusterSessionManager INSTANCE = new ClusterSessionManager(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/QueryCoordinator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/QueryCoordinator.java deleted file mode 100644 index 21ec9f92addf..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/manage/QueryCoordinator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.manage; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.monitor.NodeStatus; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; - -/** - * QueryCoordinator records the spec and load of each node, deciding the order of replicas that - * should be queried - */ -public class QueryCoordinator { - - // a status is considered stale if it is older than one minute and should be updated - private static final QueryCoordinator INSTANCE = new QueryCoordinator(); - private static final NodeStatusManager STATUS_MANAGER = NodeStatusManager.getINSTANCE(); - - private final Comparator nodeComparator = Comparator.comparing(this::getNodeStatus); - - private QueryCoordinator() { - // singleton class - } - - public static QueryCoordinator getINSTANCE() { - return INSTANCE; - } - - /** - * Reorder the given nodes based on their status, the nodes that are more suitable (have low delay - * or load) are placed first. This won't change the order of the original list. - * - * @param nodes - * @return - */ - public List reorderNodes(List nodes) { - List reordered = new ArrayList<>(nodes); - reordered.sort(nodeComparator); - return reordered; - } - - public NodeStatus getNodeStatus(Node node) { - return STATUS_MANAGER.getNodeStatus(node, true); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java deleted file mode 100644 index 28fb6dae1ae7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactory.java +++ /dev/null @@ -1,1229 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.exception.RequestTimeOutException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.LocalQueryExecutor; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.filter.SlotTsFileFilter; -import org.apache.iotdb.cluster.query.groupby.RemoteGroupByExecutor; -import org.apache.iotdb.cluster.query.manage.QueryCoordinator; -import org.apache.iotdb.cluster.query.reader.mult.AbstractMultPointReader; -import org.apache.iotdb.cluster.query.reader.mult.MultBatchReader; -import org.apache.iotdb.cluster.query.reader.mult.MultDataSourceInfo; -import org.apache.iotdb.cluster.query.reader.mult.MultEmptyReader; -import org.apache.iotdb.cluster.query.reader.mult.MultSeriesRawDataPointReader; -import org.apache.iotdb.cluster.query.reader.mult.RemoteMultSeriesReader; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClusterQueryUtils; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.engine.querycontext.QueryDataSource; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.metadata.utils.ResourceByPathUtils; -import org.apache.iotdb.db.query.aggregation.AggregationType; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.db.query.externalsort.adapter.ByTimestampReaderAdapter; -import org.apache.iotdb.db.query.factory.AggregateResultFactory; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.db.query.reader.series.SeriesRawDataBatchReader; -import org.apache.iotdb.db.query.reader.series.SeriesRawDataPointReader; -import org.apache.iotdb.db.query.reader.series.SeriesReader; -import org.apache.iotdb.db.query.reader.series.SeriesReaderByTimestamp; -import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Path; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import org.apache.thrift.TApplicationException; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import static org.apache.iotdb.cluster.utils.ClusterQueryUtils.getPathStrListForRequest; - -@SuppressWarnings("java:S107") -public class ClusterReaderFactory { - - private static final Logger logger = LoggerFactory.getLogger(ClusterReaderFactory.class); - private final MetaGroupMember metaGroupMember; - - public ClusterReaderFactory(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - public void syncMetaGroup() throws CheckConsistencyException { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } - - /** - * Create an IReaderByTimestamp that can read the data of "path" by timestamp in the whole - * cluster. This will query every group and merge the result from them. - */ - public IReaderByTimestamp getReaderByTimestamp( - PartialPath path, - Set deviceMeasurements, - TSDataType dataType, - QueryContext context, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException { - // get all data groups - List partitionGroups; - try { - partitionGroups = metaGroupMember.routeFilter(null, path); - } catch (EmptyIntervalException e) { - logger.warn(e.getMessage()); - partitionGroups = Collections.emptyList(); - } - logger.debug( - "{}: Sending query of {} to {} groups", - metaGroupMember.getName(), - path, - partitionGroups.size()); - List readers = new ArrayList<>(partitionGroups.size()); - for (PartitionGroup partitionGroup : partitionGroups) { - // query each group to get a reader in that group - IReaderByTimestamp readerByTimestamp = - getSeriesReaderByTime( - partitionGroup, - path, - deviceMeasurements, - context, - dataType, - ascending, - requiredSlots); - readers.add(readerByTimestamp); - } - // merge the readers - return new MergedReaderByTime(readers); - } - - /** - * Create a IReaderByTimestamp that read data of "path" by timestamp in the given group. If the - * local node is a member of that group, query locally. Otherwise create a remote reader pointing - * to one node in that group. - */ - private IReaderByTimestamp getSeriesReaderByTime( - PartitionGroup partitionGroup, - PartialPath path, - Set deviceMeasurements, - QueryContext context, - TSDataType dataType, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the target storage group contains this node, perform a local query - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), partitionGroup.getRaftId()); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: creating a local reader for {}#{}", - metaGroupMember.getName(), - path.getFullPath(), - context.getQueryId()); - } - return getReaderByTimestamp( - path, deviceMeasurements, dataType, context, dataGroupMember, ascending, requiredSlots); - } else { - return getRemoteReaderByTimestamp( - path, deviceMeasurements, dataType, partitionGroup, context, ascending, requiredSlots); - } - } - - /** - * Create a IReaderByTimestamp that read data of "path" by timestamp in the given group that does - * not contain the local node. Send a request to one node in that group to build a reader and use - * that reader's id to build a remote reader. - */ - private IReaderByTimestamp getRemoteReaderByTimestamp( - Path path, - Set deviceMeasurements, - TSDataType dataType, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending, - Set requiredSlots) - throws StorageEngineException { - SingleSeriesQueryRequest request = - constructSingleQueryRequest( - null, - null, - dataType, - path, - deviceMeasurements, - partitionGroup, - context, - ascending, - requiredSlots); - - // reorder the nodes by their communication delays - List reorderedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - DataSourceInfo dataSourceInfo = - new DataSourceInfo( - partitionGroup, dataType, request, (RemoteQueryContext) context, reorderedNodes); - - // try building a reader from one of the nodes - boolean hasClient = dataSourceInfo.hasNextDataClient(true, Long.MIN_VALUE); - if (hasClient) { - return new RemoteSeriesReaderByTimestamp(dataSourceInfo); - } else if (dataSourceInfo.isNoData()) { - return new EmptyReader(); - } - - throw new StorageEngineException( - new RequestTimeOutException("Query by timestamp: " + path + " in " + partitionGroup)); - } - - /** - * Create a MultSeriesReader that can read the data of "path" with filters in the whole cluster. - * The data groups that should be queried will be determined by the timeFilter, then for each - * group a series reader will be created, and finally all such readers will be merged into one. - * - * @param paths all path - * @param deviceMeasurements device to measurements - * @param dataTypes data type - * @param timeFilter time filter - * @param valueFilter value filter - * @param context query context - * @param ascending asc or aesc - */ - public List getMultSeriesReader( - List paths, - Map> deviceMeasurements, - List dataTypes, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - boolean ascending) - throws StorageEngineException, EmptyIntervalException, QueryProcessException { - - Map> partitionGroupListMap = Maps.newHashMap(); - for (PartialPath partialPath : paths) { - List partitionGroups = metaGroupMember.routeFilter(timeFilter, partialPath); - partitionGroups.forEach( - partitionGroup -> - partitionGroupListMap - .computeIfAbsent(partitionGroup, n -> new ArrayList<>()) - .add(partialPath)); - } - - List multPointReaders = Lists.newArrayList(); - - // different path of the same partition group are constructed as a AbstractMultPointReader - // if be local partition, constructed a MultBatchReader - // if be a remote partition, constructed a RemoteMultSeriesReader - for (Map.Entry> entityPartitionGroup : - partitionGroupListMap.entrySet()) { - List partialPaths = entityPartitionGroup.getValue(); - Map> partitionGroupDeviceMeasurements = Maps.newHashMap(); - List partitionGroupTSDataType = Lists.newArrayList(); - partialPaths.forEach( - partialPath -> { - Set measurements = - deviceMeasurements.getOrDefault(partialPath.getDevice(), Collections.emptySet()); - partitionGroupDeviceMeasurements.put(partialPath.getFullPath(), measurements); - partitionGroupTSDataType.add(dataTypes.get(paths.lastIndexOf(partialPath))); - }); - - AbstractMultPointReader abstractMultPointReader = - getMultSeriesReader( - entityPartitionGroup.getKey(), - partialPaths, - partitionGroupTSDataType, - partitionGroupDeviceMeasurements, - timeFilter, - valueFilter, - context, - ascending); - multPointReaders.add(abstractMultPointReader); - } - return multPointReaders; - } - - /** - * Query one node in "partitionGroup" for data of "path" with "timeFilter" and "valueFilter". If - * "partitionGroup" contains the local node, a local reader will be returned. Otherwise a remote - * reader will be returned. - * - * @param timeFilter nullable - * @param valueFilter nullable - */ - private AbstractMultPointReader getMultSeriesReader( - PartitionGroup partitionGroup, - List partialPaths, - List dataTypes, - Map> deviceMeasurements, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - boolean ascending) - throws StorageEngineException, QueryProcessException { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the target storage group contains this node, perform a local query - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), - String.format( - "Query: %s, time filter: %s, queryId: %d", - partialPaths, timeFilter, context.getQueryId())); - Map partialPathPointReaderMap = Maps.newHashMap(); - for (int i = 0; i < partialPaths.size(); i++) { - PartialPath partialPath = partialPaths.get(i); - IPointReader seriesPointReader = - getSeriesPointReader( - partialPath, - deviceMeasurements.get(partialPath.getFullPath()), - dataTypes.get(i), - timeFilter, - valueFilter, - context, - dataGroupMember, - ascending, - null); - partialPathPointReaderMap.put(partialPath.getFullPath(), seriesPointReader); - } - - if (logger.isDebugEnabled()) { - logger.debug( - "{}: creating a local reader for {}#{} of {}", - metaGroupMember.getName(), - partialPaths, - context.getQueryId(), - partitionGroup.getHeader()); - } - return new MultSeriesRawDataPointReader(partialPathPointReaderMap); - } else { - return getRemoteMultSeriesPointReader( - timeFilter, - valueFilter, - dataTypes, - partialPaths, - deviceMeasurements, - partitionGroup, - context, - ascending); - } - } - - /** - * Create a ManagedSeriesReader that can read the data of "path" with filters in the whole - * cluster. The data groups that should be queried will be determined by the timeFilter, then for - * each group a series reader will be created, and finally all such readers will be merged into - * one. - * - * @param timeFilter nullable, when null, all data groups will be queried - * @param valueFilter nullable - */ - public ManagedSeriesReader getSeriesReader( - PartialPath path, - Set deviceMeasurements, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - boolean ascending) - throws StorageEngineException, EmptyIntervalException { - // find the groups that should be queried using the timeFilter - List partitionGroups = metaGroupMember.routeFilter(timeFilter, path); - logger.debug( - "{}: Sending data query of {} to {} groups", - metaGroupMember.getName(), - path, - partitionGroups.size()); - PriorityMergeReader mergeReader; - if (ascending) { - mergeReader = new ManagedPriorityMergeReader(dataType); - } else { - mergeReader = new ManagedDescPriorityMergeReader(dataType); - } - try { - // build a reader for each group and merge them - for (PartitionGroup partitionGroup : partitionGroups) { - IPointReader seriesReader = - getSeriesReader( - partitionGroup, - path, - deviceMeasurements, - timeFilter, - valueFilter, - context, - dataType, - ascending, - null); - mergeReader.addReader(seriesReader, 0); - } - } catch (IOException | QueryProcessException e) { - throw new StorageEngineException(e); - } - // The instance of merge reader is either ManagedPriorityMergeReader or - // ManagedDescPriorityMergeReader, which is safe to cast type. - return (ManagedSeriesReader) mergeReader; - } - - /** - * Query one node in "partitionGroup" for data of "path" with "timeFilter" and "valueFilter". If - * "partitionGroup" contains the local node, a local reader will be returned. Otherwise a remote - * reader will be returned. - * - * @param timeFilter nullable - * @param valueFilter nullable - */ - private IPointReader getSeriesReader( - PartitionGroup partitionGroup, - PartialPath path, - Set deviceMeasurements, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - TSDataType dataType, - boolean ascending, - Set requiredSlots) - throws IOException, StorageEngineException, QueryProcessException { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the target storage group contains this node, perform a local query - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), - String.format( - "Query: %s, time filter: %s, queryId: %d", - path, timeFilter, context.getQueryId())); - IPointReader seriesPointReader = - getSeriesPointReader( - path, - deviceMeasurements, - dataType, - timeFilter, - valueFilter, - context, - dataGroupMember, - ascending, - requiredSlots); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: creating a local reader for {}#{} of {}, empty: {}", - metaGroupMember.getName(), - path.getFullPath(), - context.getQueryId(), - partitionGroup.getHeader(), - !seriesPointReader.hasNextTimeValuePair()); - } - return seriesPointReader; - } else { - return getRemoteSeriesPointReader( - timeFilter, - valueFilter, - dataType, - path, - deviceMeasurements, - partitionGroup, - context, - ascending, - requiredSlots); - } - } - - /** - * Create an IPointReader of "path" with “timeFilter” and "valueFilter". A synchronization with - * the leader will be performed according to consistency level - * - * @param path series path - * @param dataType data type - * @param timeFilter nullable - * @param valueFilter nullable - * @param context query context - * @return reader - * @throws StorageEngineException encounter exception - */ - public IPointReader getSeriesPointReader( - PartialPath path, - Set allSensors, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - DataGroupMember dataGroupMember, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException { - // pull the newest data - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - return new SeriesRawDataPointReader( - getSeriesReader( - path, - allSensors, - dataType, - timeFilter, - valueFilter, - context, - dataGroupMember.getHeader(), - ascending, - requiredSlots)); - } - - /** - * Create a SeriesReader of "path" with “timeFilter” and "valueFilter". The consistency is not - * guaranteed here and only data slots managed by the member will be queried. - * - * @param path series path - * @param dataType data type - * @param timeFilter nullable - * @param valueFilter nullable - * @param context query context - * @return reader for series - * @throws StorageEngineException encounter exception - */ - private SeriesReader getSeriesReader( - PartialPath path, - Set allSensors, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - RaftNode header, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException { - // If requiredSlots is null, it means that this node should provide data of all slots about - // required paths. - if (requiredSlots == null) { - List nodeSlots = - ((SlotPartitionTable) metaGroupMember.getPartitionTable()).getNodeSlots(header); - requiredSlots = new HashSet<>(nodeSlots); - } - // If requiredSlots is not null, it means that this node should provide partial data as previous - // holder, in order to assist the new holder to read the complete data. - QueryDataSource queryDataSource = - QueryResourceManager.getInstance().getQueryDataSource(path, context, timeFilter, ascending); - valueFilter = queryDataSource.updateFilterUsingTTL(valueFilter); - return ResourceByPathUtils.getResourceInstance(path) - .createSeriesReader( - allSensors, - dataType, - context, - queryDataSource, - timeFilter, - valueFilter, - new SlotTsFileFilter(requiredSlots), - ascending); - } - - /** - * Query a remote node in "partitionGroup" to get the reader of "path" with "timeFilter" and - * "valueFilter". Firstly, a request will be sent to that node to construct a reader there, then - * the id of the reader will be returned so that we can fetch data from that node using the reader - * id. - * - * @param timeFilter nullable - * @param valueFilter nullable - */ - private AbstractMultPointReader getRemoteMultSeriesPointReader( - Filter timeFilter, - Filter valueFilter, - List dataType, - List paths, - Map> deviceMeasurements, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending) - throws StorageEngineException { - MultSeriesQueryRequest request = - constructMultQueryRequest( - timeFilter, - valueFilter, - dataType, - paths, - deviceMeasurements, - partitionGroup, - context, - ascending); - - // reorder the nodes such that the nodes that suit the query best (have lowest latency or - // highest throughput) will be put to the front - List orderedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - - MultDataSourceInfo dataSourceInfo = - new MultDataSourceInfo( - partitionGroup, paths, dataType, request, (RemoteQueryContext) context, orderedNodes); - - boolean hasClient = dataSourceInfo.hasNextDataClient(Long.MIN_VALUE); - if (hasClient) { - return new RemoteMultSeriesReader(dataSourceInfo); - } else if (dataSourceInfo.isNoData()) { - // there is no satisfying data on the remote node - Set fullPaths = Sets.newHashSet(); - dataSourceInfo - .getPartialPaths() - .forEach(partialPath -> fullPaths.add(partialPath.getFullPath())); - return new MultEmptyReader(fullPaths); - } - throw new StorageEngineException( - new RequestTimeOutException("Query multi-series: " + paths + " in " + partitionGroup)); - } - - /** - * Query a remote node in "partitionGroup" to get the reader of "path" with "timeFilter" and - * "valueFilter". Firstly, a request will be sent to that node to construct a reader there, then - * the id of the reader will be returned so that we can fetch data from that node using the reader - * id. - * - * @param timeFilter nullable - * @param valueFilter nullable - */ - private IPointReader getRemoteSeriesPointReader( - Filter timeFilter, - Filter valueFilter, - TSDataType dataType, - Path path, - Set deviceMeasurements, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending, - Set requiredSlots) - throws StorageEngineException { - SingleSeriesQueryRequest request = - constructSingleQueryRequest( - timeFilter, - valueFilter, - dataType, - path, - deviceMeasurements, - partitionGroup, - context, - ascending, - requiredSlots); - - // reorder the nodes such that the nodes that suit the query best (have lowest latenct or - // highest throughput) will be put to the front - List orderedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - - DataSourceInfo dataSourceInfo = - new DataSourceInfo( - partitionGroup, dataType, request, (RemoteQueryContext) context, orderedNodes); - - boolean hasClient = dataSourceInfo.hasNextDataClient(false, Long.MIN_VALUE); - if (hasClient) { - return new RemoteSimpleSeriesReader(dataSourceInfo); - } else if (dataSourceInfo.isNoData()) { - // there is no satisfying data on the remote node - return new EmptyReader(); - } - - throw new StorageEngineException( - new RequestTimeOutException("Query " + path + " in " + partitionGroup)); - } - - private MultSeriesQueryRequest constructMultQueryRequest( - Filter timeFilter, - Filter valueFilter, - List dataTypes, - List paths, - Map> deviceMeasurements, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending) { - MultSeriesQueryRequest request = new MultSeriesQueryRequest(); - if (timeFilter != null) { - request.setTimeFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - if (valueFilter != null) { - request.setValueFilterBytes(SerializeUtils.serializeFilter(valueFilter)); - } - - List fullPaths = Lists.newArrayList(); - paths.forEach(path -> fullPaths.add(getPathStrListForRequest(path))); - - List dataTypeOrdinals = Lists.newArrayList(); - dataTypes.forEach(dataType -> dataTypeOrdinals.add(dataType.ordinal())); - - request.setPath(fullPaths); - request.setHeader(partitionGroup.getHeader()); - request.setQueryId(context.getQueryId()); - request.setRequester(metaGroupMember.getThisNode()); - request.setDataTypeOrdinal(dataTypeOrdinals); - request.setDeviceMeasurements(deviceMeasurements); - request.setAscending(ascending); - return request; - } - - private SingleSeriesQueryRequest constructSingleQueryRequest( - Filter timeFilter, - Filter valueFilter, - TSDataType dataType, - Path path, - Set deviceMeasurements, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending, - Set requiredSlots) { - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - if (timeFilter != null) { - request.setTimeFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - if (valueFilter != null) { - request.setValueFilterBytes(SerializeUtils.serializeFilter(valueFilter)); - } - request.setPath(ClusterQueryUtils.getPathStrListForRequest(path)); - request.setHeader(partitionGroup.getHeader()); - request.setQueryId(context.getQueryId()); - request.setRequester(metaGroupMember.getThisNode()); - request.setDataTypeOrdinal(dataType.ordinal()); - request.setDeviceMeasurements(deviceMeasurements); - request.setAscending(ascending); - request.setRequiredSlots(requiredSlots); - return request; - } - - /** - * Get GroupByExecutors the will executor the aggregations of "aggregationTypes" over "path". - * First, the groups to be queried will be determined by the timeFilter. Then for group, a local - * or remote GroupByExecutor will be created and finally all such executors will be returned. - * - * @param timeFilter nullable - */ - public List getGroupByExecutors( - PartialPath path, - Set deviceMeasurements, - TSDataType dataType, - QueryContext context, - Filter timeFilter, - List aggregationTypes, - boolean ascending) - throws StorageEngineException, QueryProcessException { - // make sure the partition table is new - try { - metaGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new QueryProcessException(e.getMessage()); - } - // find out the groups that should be queried - List partitionGroups; - try { - partitionGroups = metaGroupMember.routeFilter(timeFilter, path); - } catch (EmptyIntervalException e) { - logger.info(e.getMessage()); - partitionGroups = Collections.emptyList(); - } - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Sending group by query of {} to {} groups", - metaGroupMember.getName(), - path, - partitionGroups.size()); - } - // create an executor for each group - List executors = new ArrayList<>(); - for (PartitionGroup partitionGroup : partitionGroups) { - GroupByExecutor groupByExecutor = - getGroupByExecutor( - path, - deviceMeasurements, - partitionGroup, - timeFilter, - context, - dataType, - aggregationTypes, - ascending); - executors.add(groupByExecutor); - } - return executors; - } - - /** - * Get a GroupByExecutor that will run "aggregationTypes" over "path" within "partitionGroup". If - * the local node is a member of the group, a local executor will be created. Otherwise a remote - * executor will be created. - * - * @param timeFilter nullable - */ - private GroupByExecutor getGroupByExecutor( - PartialPath path, - Set deviceMeasurements, - PartitionGroup partitionGroup, - Filter timeFilter, - QueryContext context, - TSDataType dataType, - List aggregationTypes, - boolean ascending) - throws StorageEngineException, QueryProcessException { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - // the target storage group contains this node, perform a local query - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), partitionGroup.getRaftId()); - LocalQueryExecutor localQueryExecutor = new LocalQueryExecutor(dataGroupMember); - logger.debug( - "{}: creating a local group by executor for {}#{}", - metaGroupMember.getName(), - path.getFullPath(), - context.getQueryId()); - return localQueryExecutor.getGroupByExecutor( - path, deviceMeasurements, dataType, timeFilter, aggregationTypes, context, ascending); - } else { - return getRemoteGroupByExecutor( - timeFilter, - aggregationTypes, - dataType, - path, - deviceMeasurements, - partitionGroup, - context, - ascending); - } - } - - /** - * Get a GroupByExecutor that will run "aggregationTypes" over "path" within a remote group - * "partitionGroup". Send a request to one node in the group to create an executor there and use - * the return executor id to fetch result later. - * - * @param timeFilter nullable - */ - private GroupByExecutor getRemoteGroupByExecutor( - Filter timeFilter, - List aggregationTypes, - TSDataType dataType, - Path path, - Set deviceMeasurements, - PartitionGroup partitionGroup, - QueryContext context, - boolean ascending) - throws StorageEngineException { - GroupByRequest request = new GroupByRequest(); - if (timeFilter != null) { - request.setTimeFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - } - request.setPath(path.getFullPath()); - request.setHeader(partitionGroup.getHeader()); - request.setQueryId(context.getQueryId()); - request.setAggregationTypeOrdinals(aggregationTypes); - request.setDataTypeOrdinal(dataType.ordinal()); - request.setRequestor(metaGroupMember.getThisNode()); - request.setDeviceMeasurements(deviceMeasurements); - request.setAscending(ascending); - - // select a node with lowest latency or highest throughput with high priority - List orderedNodes = QueryCoordinator.getINSTANCE().reorderNodes(partitionGroup); - for (Node node : orderedNodes) { - // query a remote node - logger.debug("{}: querying group by {} from {}", metaGroupMember.getName(), path, node); - - try { - Long executorId = getRemoteGroupByExecutorId(node, request); - - if (executorId == null) { - continue; - } - - if (executorId != -1) { - logger.debug( - "{}: get an executorId {} for {}@{} from {}", - metaGroupMember.getName(), - executorId, - aggregationTypes, - path, - node); - // create a remote executor with the return id - RemoteGroupByExecutor remoteGroupByExecutor = - new RemoteGroupByExecutor(executorId, node, partitionGroup.getHeader()); - for (Integer aggregationType : aggregationTypes) { - remoteGroupByExecutor.addAggregateResult( - AggregateResultFactory.getAggrResultByType( - AggregationType.values()[aggregationType], dataType, ascending)); - } - return remoteGroupByExecutor; - } else { - // an id of -1 means there is no satisfying data on the remote node, create an empty - // reader tp reduce further communication - logger.debug("{}: no data for {} from {}", metaGroupMember.getName(), path, node); - return new EmptyReader(); - } - } catch (TApplicationException e) { - logger.error(metaGroupMember.getName() + ": Cannot query " + path + " from " + node, e); - throw new StorageEngineException(e.getMessage()); - } catch (TException | IOException e) { - logger.error(metaGroupMember.getName() + ": Cannot query " + path + " from " + node, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error(metaGroupMember.getName() + ": Cannot query " + path + " from " + node, e); - } finally { - // record the queried node to release resources later - ((RemoteQueryContext) context).registerRemoteNode(node, partitionGroup.getHeader()); - } - } - throw new StorageEngineException( - new RequestTimeOutException("Query " + path + " in " + partitionGroup)); - } - - private Long getRemoteGroupByExecutorId(Node node, GroupByRequest request) - throws IOException, TException, InterruptedException { - Long executorId; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - executorId = SyncClientAdaptor.getGroupByExecutor(client, request); - } else { - SyncDataClient syncDataClient = null; - try { - syncDataClient = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - executorId = syncDataClient.getGroupByExecutor(request); - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - return executorId; - } - - public IBatchReader getSeriesBatchReader( - PartialPath path, - Set allSensors, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - DataGroupMember dataGroupMember, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException, IOException { - return getSeriesBatchReader( - path, - allSensors, - dataType, - timeFilter, - valueFilter, - context, - dataGroupMember, - ascending, - requiredSlots, - true); - } - - /** - * Create an IBatchReader of "path" with “timeFilter” and "valueFilter". A synchronization with - * the leader will be performed according to consistency level - * - * @param path - * @param dataType - * @param timeFilter nullable - * @param valueFilter nullable - * @param context - * @return an IBatchReader or null if there is no satisfying data - * @throws StorageEngineException - */ - public IBatchReader getSeriesBatchReader( - PartialPath path, - Set allSensors, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - DataGroupMember dataGroupMember, - boolean ascending, - Set requiredSlots, - boolean syncLeader) - throws StorageEngineException, QueryProcessException, IOException { - if (syncLeader) { - // pull the newest data - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - } - - // find the groups that should be queried due to data migration. - Map> holderSlotMap = dataGroupMember.getPreviousHolderSlotMap(); - - // If requiredSlots is not null, it means that this data group is the previous holder of - // required slots, which is no need to merge other resource, - if (requiredSlots == null && !holderSlotMap.isEmpty()) { - // merge remote reader and local reader - PriorityMergeReader mergeReader; - if (ascending) { - mergeReader = new ManagedPriorityMergeReader(dataType); - } else { - mergeReader = new ManagedDescPriorityMergeReader(dataType); - } - - // add local reader - IPointReader seriesPointReader = - getSeriesPointReader( - path, - allSensors, - dataType, - timeFilter, - valueFilter, - context, - dataGroupMember, - ascending, - null); - mergeReader.addReader(seriesPointReader, 1); - - // add previous holder reader due to in the stage of data migration - logger.debug( - "{}: Sending data query of {} to {} groups due to data is in the state of data migration", - metaGroupMember.getName(), - path, - holderSlotMap.size()); - for (Entry> entry : holderSlotMap.entrySet()) { - IPointReader seriesReader = - getSeriesReader( - entry.getKey(), - path, - allSensors, - timeFilter, - valueFilter, - context, - dataType, - ascending, - entry.getValue()); - mergeReader.addReader(seriesReader, 0); - } - - // The instance of merge reader is either ManagedPriorityMergeReader or - // ManagedDescPriorityMergeReader, which is safe to cast type. - return (IBatchReader) mergeReader; - } else { - // just local reader is enough - SeriesReader seriesReader = - getSeriesReader( - path, - allSensors, - dataType, - timeFilter, - valueFilter, - context, - dataGroupMember.getHeader(), - ascending, - requiredSlots); - if (seriesReader.isEmpty()) { - return null; - } - return new SeriesRawDataBatchReader(seriesReader); - } - } - - /** - * Create an IBatchReader of "path" with “timeFilter” and "valueFilter". A synchronization with - * the leader will be performed according to consistency level - * - * @param paths - * @param dataTypes - * @param timeFilter nullable - * @param valueFilter nullable - * @param context - * @return an IBatchReader or null if there is no satisfying data - * @throws StorageEngineException - */ - public IBatchReader getMultSeriesBatchReader( - List paths, - Map> allSensors, - List dataTypes, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - DataGroupMember dataGroupMember, - boolean ascending) - throws StorageEngineException, QueryProcessException, IOException { - // pull the newest data - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - - Map partialPathBatchReaderMap = Maps.newHashMap(); - - for (int i = 0; i < paths.size(); i++) { - PartialPath partialPath = paths.get(i); - IBatchReader batchReader = - getSeriesBatchReader( - partialPath, - allSensors.get(partialPath.getFullPath()), - dataTypes.get(i), - timeFilter, - valueFilter, - context, - dataGroupMember, - ascending, - null, - false); - partialPathBatchReaderMap.put(partialPath.getFullPath(), batchReader); - } - return new MultBatchReader(partialPathBatchReaderMap); - } - - /** - * Create an IReaderByTimestamp of "path". A synchronization with the leader will be performed - * according to consistency level - * - * @param path - * @param dataType - * @param context - * @return an IReaderByTimestamp or null if there is no satisfying data - * @throws StorageEngineException - */ - public IReaderByTimestamp getReaderByTimestamp( - PartialPath path, - Set allSensors, - TSDataType dataType, - QueryContext context, - DataGroupMember dataGroupMember, - boolean ascending, - Set requiredSlots) - throws StorageEngineException, QueryProcessException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - } catch (CheckConsistencyException e) { - throw new StorageEngineException(e); - } - - // find the groups that should be queried due to data migration. - // when a slot is in the status of PULLING or PULLING_WRITABLE, the read of it should merge - // result to guarantee integrity. - Map> holderSlotMap = dataGroupMember.getPreviousHolderSlotMap(); - Filter timeFilter = TimeFilter.defaultTimeFilter(ascending); - try { - // If requiredSlots is not null, it means that this data group is the previous holder of - // required slots, which is no need to merge other resource, - if (requiredSlots == null && !holderSlotMap.isEmpty()) { - // merge remote reader and local reader - PriorityMergeReader mergeReader; - if (ascending) { - mergeReader = new ManagedPriorityMergeReader(dataType); - } else { - mergeReader = new ManagedDescPriorityMergeReader(dataType); - } - - // add local reader - IPointReader seriesPointReader = - getSeriesPointReader( - path, - allSensors, - dataType, - timeFilter, - null, - context, - dataGroupMember, - ascending, - null); - mergeReader.addReader(seriesPointReader, 1); - - // add previous holder reader due to in the stage of data migration - logger.debug( - "{}: Sending data query of {} to {} groups due to data is in the state of data migration", - metaGroupMember.getName(), - path, - holderSlotMap.size()); - for (Entry> entry : holderSlotMap.entrySet()) { - IPointReader seriesReader = - getSeriesReader( - entry.getKey(), - path, - allSensors, - timeFilter, - null, - context, - dataType, - ascending, - entry.getValue()); - mergeReader.addReader(seriesReader, 0); - } - - // The instance of merge reader is either ManagedPriorityMergeReader or - // ManagedDescPriorityMergeReader, which is safe to cast type. - return new ByTimestampReaderAdapter(mergeReader); - } else { - // just local reader is enough - SeriesReader seriesReader = - getSeriesReader( - path, - allSensors, - dataType, - timeFilter, - null, - context, - dataGroupMember.getHeader(), - ascending, - requiredSlots); - - if (seriesReader.isEmpty()) { - return null; - } - - return new SeriesReaderByTimestamp(seriesReader, ascending); - } - } catch (IOException e) { - throw new QueryProcessException(e, TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java deleted file mode 100644 index ac22ad23a679..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGenerator.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.db.query.timegenerator.ServerTimeGenerator; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.ExpressionType; -import org.apache.iotdb.tsfile.read.expression.IBinaryExpression; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.read.query.timegenerator.node.AndNode; -import org.apache.iotdb.tsfile.read.query.timegenerator.node.LeafNode; -import org.apache.iotdb.tsfile.read.query.timegenerator.node.Node; -import org.apache.iotdb.tsfile.read.query.timegenerator.node.OrNode; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.List; - -public class ClusterTimeGenerator extends ServerTimeGenerator { - - private ClusterReaderFactory readerFactory; - private boolean hasLocalReader = false; - private QueryDataSet.EndPoint endPoint = null; - - /** Constructor of EngineTimeGenerator. */ - public ClusterTimeGenerator( - QueryContext context, - MetaGroupMember metaGroupMember, - RawDataQueryPlan rawDataQueryPlan, - boolean onlyCheckLocalData) - throws StorageEngineException { - super(context); - this.queryPlan = rawDataQueryPlan; - this.readerFactory = new ClusterReaderFactory(metaGroupMember); - try { - readerFactory.syncMetaGroup(); - if (onlyCheckLocalData) { - whetherHasLocalDataGroup( - queryPlan.getExpression(), metaGroupMember, queryPlan.isAscending()); - } else { - constructNode(queryPlan.getExpression()); - } - } catch (IOException | CheckConsistencyException e) { - throw new StorageEngineException(e); - } - } - - @TestOnly - public ClusterTimeGenerator( - QueryContext context, - MetaGroupMember metaGroupMember, - ClusterReaderFactory clusterReaderFactory, - RawDataQueryPlan rawDataQueryPlan, - boolean onlyCheckLocalData) - throws StorageEngineException { - super(context); - this.queryPlan = rawDataQueryPlan; - this.readerFactory = clusterReaderFactory; - try { - readerFactory.syncMetaGroup(); - if (onlyCheckLocalData) { - whetherHasLocalDataGroup( - queryPlan.getExpression(), metaGroupMember, queryPlan.isAscending()); - } else { - constructNode(queryPlan.getExpression()); - } - } catch (IOException | CheckConsistencyException e) { - throw new StorageEngineException(e); - } - } - - @Override - protected IBatchReader generateNewBatchReader(SingleSeriesExpression expression) - throws IOException { - Filter filter = expression.getFilter(); - Filter timeFilter = getTimeFilter(filter); - PartialPath path = (PartialPath) expression.getSeriesPath(); - TSDataType dataType; - ManagedSeriesReader mergeReader; - try { - dataType = IoTDB.schemaProcessor.getSeriesType(path); - mergeReader = - readerFactory.getSeriesReader( - path, - queryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - timeFilter, - filter, - context, - queryPlan.isAscending()); - } catch (Exception e) { - throw new IOException(e); - } - return mergeReader; - } - - public boolean isHasLocalReader() { - return hasLocalReader; - } - - @Override - public String toString() { - return super.toString() + ", has local reader:" + hasLocalReader; - } - - public void whetherHasLocalDataGroup( - IExpression expression, MetaGroupMember metaGroupMember, boolean isAscending) - throws IOException { - this.hasLocalReader = false; - constructNode(expression, metaGroupMember, isAscending); - } - - private Node constructNode( - IExpression expression, MetaGroupMember metaGroupMember, boolean isAscending) - throws IOException { - if (expression.getType() == ExpressionType.SERIES) { - SingleSeriesExpression singleSeriesExp = (SingleSeriesExpression) expression; - checkHasLocalReader(singleSeriesExp, metaGroupMember); - return new LeafNode(null); - } else { - Node leftChild = - constructNode(((IBinaryExpression) expression).getLeft(), metaGroupMember, isAscending); - Node rightChild = - constructNode(((IBinaryExpression) expression).getRight(), metaGroupMember, isAscending); - - if (expression.getType() == ExpressionType.OR) { - return new OrNode(leftChild, rightChild, isAscending); - } else if (expression.getType() == ExpressionType.AND) { - return new AndNode(leftChild, rightChild, isAscending); - } - throw new UnSupportedDataTypeException( - "Unsupported ExpressionType when construct OperatorNode: " + expression.getType()); - } - } - - private void checkHasLocalReader( - SingleSeriesExpression expression, MetaGroupMember metaGroupMember) throws IOException { - Filter filter = expression.getFilter(); - Filter timeFilter = getTimeFilter(filter); - PartialPath path = (PartialPath) expression.getSeriesPath(); - TSDataType dataType; - try { - dataType = IoTDB.schemaProcessor.getSeriesType(path); - - List partitionGroups = metaGroupMember.routeFilter(null, path); - for (PartitionGroup partitionGroup : partitionGroups) { - if (partitionGroup.contains(metaGroupMember.getThisNode())) { - DataGroupMember dataGroupMember = - metaGroupMember.getLocalDataMember( - partitionGroup.getHeader(), - String.format( - "Query: %s, time filter: %s, queryId: %d", path, null, context.getQueryId())); - - IPointReader pointReader = - readerFactory.getSeriesPointReader( - path, - queryPlan.getAllMeasurementsInDevice(path.getDevice()), - dataType, - timeFilter, - filter, - context, - dataGroupMember, - queryPlan.isAscending(), - null); - - if (pointReader.hasNextTimeValuePair()) { - this.hasLocalReader = true; - this.endPoint = null; - pointReader.close(); - break; - } - pointReader.close(); - } else if (endPoint == null) { - endPoint = - new QueryDataSet.EndPoint( - partitionGroup.getHeader().getNode().getClientIp(), - partitionGroup.getHeader().getNode().getClientPort()); - } - } - } catch (Exception e) { - throw new IOException(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java deleted file mode 100644 index 3d1a2dd9e505..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/DataSourceInfo.java +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -/** - * provide client which could connect to all nodes of the partitionGroup. Notice: methods like - * getter should be called only after nextDataClient() has been called - */ -public class DataSourceInfo { - - private static final Logger logger = LoggerFactory.getLogger(DataSourceInfo.class); - - private long readerId; - private Node curSource; - private PartitionGroup partitionGroup; - private TSDataType dataType; - private SingleSeriesQueryRequest request; - private RemoteQueryContext context; - private List nodes; - private int curPos; - private boolean isNoData = false; - private boolean isNoClient = false; - - public DataSourceInfo( - PartitionGroup group, - TSDataType dataType, - SingleSeriesQueryRequest request, - RemoteQueryContext context, - List nodes) { - this.readerId = -1; - this.partitionGroup = group; - this.dataType = dataType; - this.request = request; - this.context = context; - this.nodes = nodes; - // set to the last node so after nextDataClient() is called it will scan from the first node - this.curPos = nodes.size() - 1; - this.curSource = nodes.get(curPos); - } - - public boolean hasNextDataClient(boolean byTimestamp, long timestamp) { - if (this.nodes.isEmpty()) { - this.isNoData = false; - return false; - } - - int nextNodePos = (this.curPos + 1) % this.nodes.size(); - while (true) { - Node node = nodes.get(nextNodePos); - logger.debug("querying {} from {} of {}", request.path, node, partitionGroup.getHeader()); - try { - Long newReaderId = getReaderId(node, byTimestamp, timestamp); - if (newReaderId != null) { - logger.debug("get a readerId {} for {} from {}", newReaderId, request.path, node); - if (newReaderId != -1) { - this.readerId = newReaderId; - this.curSource = node; - this.curPos = nextNodePos; - return true; - } else { - // the id being -1 means there is no satisfying data on the remote node, create an - // empty reader to reduce further communication - this.isNoClient = true; - this.isNoData = true; - return false; - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Cannot query {} from {}", this.request.path, node, e); - } catch (Exception e) { - logger.error("Cannot query {} from {}", this.request.path, node, e); - } finally { - // register the node so the remote resources can be released - context.registerRemoteNode(node, partitionGroup.getHeader()); - } - nextNodePos = (nextNodePos + 1) % this.nodes.size(); - if (nextNodePos == this.curPos) { - // has iterate over all nodes - isNoClient = true; - break; - } - } - // all nodes are failed - this.isNoData = false; - return false; - } - - private Long getReaderId(Node node, boolean byTimestamp, long timestamp) - throws InterruptedException, TException, IOException { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - return applyForReaderIdAsync(node, byTimestamp, timestamp); - } - return applyForReaderIdSync(node, byTimestamp, timestamp); - } - - private Long applyForReaderIdAsync(Node node, boolean byTimestamp, long timestamp) - throws IOException, TException, InterruptedException { - Long newReaderId; - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - if (byTimestamp) { - newReaderId = SyncClientAdaptor.querySingleSeriesByTimestamp(client, request); - } else { - newReaderId = SyncClientAdaptor.querySingleSeries(client, request, timestamp); - } - return newReaderId; - } - - private Long applyForReaderIdSync(Node node, boolean byTimestamp, long timestamp) - throws IOException, TException { - - Long newReaderId; - SyncDataClient client = null; - try { - client = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - if (byTimestamp) { - newReaderId = client.querySingleSeriesByTimestamp(request); - } else { - Filter newFilter; - // add timestamp to as a timeFilter to skip the data which has been read - if (request.isSetTimeFilterBytes()) { - Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - newFilter = new AndFilter(timeFilter, TimeFilter.gt(timestamp)); - } else { - newFilter = TimeFilter.gt(timestamp); - } - request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter)); - newReaderId = client.querySingleSeries(request); - } - return newReaderId; - } catch (IOException | TException e) { - // the connection may be broken, close it to avoid it being reused - if (client != null) { - client.close(); - } - throw e; - } finally { - if (client != null) { - client.returnSelf(); - } - } - } - - public long getReaderId() { - return this.readerId; - } - - public TSDataType getDataType() { - return this.dataType; - } - - public RaftNode getHeader() { - return partitionGroup.getHeader(); - } - - Node getCurrentNode() { - return this.curSource; - } - - AsyncDataClient getCurAsyncClient(int timeout) throws IOException { - return isNoClient - ? null - : ClusterIoTDB.getInstance().getAsyncDataClient(this.curSource, timeout); - } - - SyncDataClient getCurSyncClient(int timeout) throws IOException { - return isNoClient - ? null - : ClusterIoTDB.getInstance().getSyncDataClient(this.curSource, timeout); - } - - public boolean isNoData() { - return this.isNoData; - } - - private boolean isNoClient() { - return this.isNoClient; - } - - @Override - public String toString() { - return "DataSourceInfo{" - + "readerId=" - + readerId - + ", curSource=" - + curSource - + ", partitionGroup=" - + partitionGroup - + ", request=" - + request - + '}'; - } - - /** - * Check if there is still any available client and there is still any left data. - * - * @return true if there is an available client and data to read, false all data has been read. - * @throws IOException if all clients are unavailable. - */ - boolean checkCurClient() throws IOException { - if (isNoClient()) { - if (!isNoData()) { - throw new IOException("no available client."); - } else { - // no data - return false; - } - } - - return true; - } - - boolean switchNode(boolean byTimestamp, long timeOffset) throws IOException { - boolean hasClient = hasNextDataClient(byTimestamp, timeOffset); - logger.info("Client failed, changed to {}", curSource); - if (!hasClient) { - if (!isNoData()) { - throw new IOException("no available client."); - } else { - // no data - return false; - } - } - return true; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/EmptyReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/EmptyReader.java deleted file mode 100644 index 5b7785b7dd5b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/EmptyReader.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.db.query.reader.series.BaseManagedSeriesReader; -import org.apache.iotdb.db.query.reader.series.IAggregateReader; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IPointReader; -import org.apache.iotdb.tsfile.utils.Pair; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** A placeholder when the remote node does not contain satisfying data of a series. */ -public class EmptyReader extends BaseManagedSeriesReader - implements ManagedSeriesReader, - IAggregateReader, - IPointReader, - GroupByExecutor, - IReaderByTimestamp { - - private List aggregationResults = new ArrayList<>(); - - @Override - public boolean hasNextBatch() { - return false; - } - - @Override - public BatchData nextBatch() { - return null; - } - - @Override - public boolean hasNextTimeValuePair() { - return false; - } - - @Override - public TimeValuePair nextTimeValuePair() { - return null; - } - - @Override - public TimeValuePair currentTimeValuePair() { - return null; - } - - @Override - public void close() { - // do nothing - } - - @Override - public boolean hasNextFile() { - return false; - } - - @Override - public boolean canUseCurrentFileStatistics() { - return false; - } - - @Override - public Statistics currentFileStatistics() { - return null; - } - - @Override - public void skipCurrentFile() { - // do nothing - } - - @Override - public boolean hasNextChunk() { - return false; - } - - @Override - public boolean canUseCurrentChunkStatistics() { - return false; - } - - @Override - public Statistics currentChunkStatistics() { - return null; - } - - @Override - public void skipCurrentChunk() { - // do nothing - } - - @Override - public boolean hasNextPage() { - return false; - } - - @Override - public boolean canUseCurrentPageStatistics() { - return false; - } - - @Override - public Statistics currentPageStatistics() { - return null; - } - - @Override - public void skipCurrentPage() { - // do nothing - } - - @Override - public BatchData nextPage() { - return null; - } - - @Override - public boolean isAscending() { - return false; - } - - @Override - public void addAggregateResult(AggregateResult aggrResult) { - aggregationResults.add(aggrResult); - } - - @Override - public List calcResult(long curStartTime, long curEndTime) { - return aggregationResults; - } - - @Override - public Object[] getValuesInTimestamps(long[] timestamps, int length) throws IOException { - return null; - } - - @Override - public boolean readerIsEmpty() { - return false; - } - - @Override - public Pair peekNextNotNullValue(long nextStartTime, long nextEndTime) { - return null; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java deleted file mode 100644 index 653dea4dc6f4..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedDescPriorityMergeReader.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.db.query.reader.universal.DescPriorityMergeReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.NoSuchElementException; - -@SuppressWarnings("common-java:DuplicatedBlocks") -public class ManagedDescPriorityMergeReader extends DescPriorityMergeReader - implements ManagedSeriesReader, IPointReader { - - private static final int BATCH_SIZE = 4096; - - private volatile boolean managedByPool; - private volatile boolean hasRemaining; - - private BatchData batchData; - private TSDataType dataType; - - public ManagedDescPriorityMergeReader(TSDataType dataType) { - this.dataType = dataType; - } - - @Override - public boolean isManagedByQueryManager() { - return managedByPool; - } - - @Override - public void setManagedByQueryManager(boolean managedByQueryManager) { - this.managedByPool = managedByQueryManager; - } - - @Override - public boolean hasRemaining() { - return hasRemaining; - } - - @Override - public void setHasRemaining(boolean hasRemaining) { - this.hasRemaining = hasRemaining; - } - - @Override - public boolean hasNextBatch() throws IOException { - if (batchData != null) { - return true; - } - constructBatch(); - return batchData != null; - } - - private void constructBatch() throws IOException { - if (hasNextTimeValuePair()) { - batchData = new BatchData(dataType); - while (hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) { - TimeValuePair next = nextTimeValuePair(); - batchData.putAnObject(next.getTimestamp(), next.getValue().getValue()); - } - } - } - - @Override - public BatchData nextBatch() throws IOException { - if (!hasNextBatch()) { - throw new NoSuchElementException(); - } - BatchData ret = batchData; - batchData = null; - return ret; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java deleted file mode 100644 index e57f4d92166f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/ManagedPriorityMergeReader.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import java.io.IOException; -import java.util.NoSuchElementException; - -@SuppressWarnings("common-java:DuplicatedBlocks") -public class ManagedPriorityMergeReader extends PriorityMergeReader implements ManagedSeriesReader { - - private static final int BATCH_SIZE = 4096; - - private volatile boolean managedByPool; - private volatile boolean hasRemaining; - - private BatchData batchData; - private TSDataType dataType; - - public ManagedPriorityMergeReader(TSDataType dataType) { - this.dataType = dataType; - } - - @Override - public boolean isManagedByQueryManager() { - return managedByPool; - } - - @Override - public void setManagedByQueryManager(boolean managedByQueryManager) { - this.managedByPool = managedByQueryManager; - } - - @Override - public boolean hasRemaining() { - return hasRemaining; - } - - @Override - public void setHasRemaining(boolean hasRemaining) { - this.hasRemaining = hasRemaining; - } - - @Override - public boolean hasNextBatch() throws IOException { - if (batchData != null) { - return true; - } - constructBatch(); - return batchData != null; - } - - private void constructBatch() throws IOException { - if (hasNextTimeValuePair()) { - batchData = new BatchData(dataType); - while (hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) { - TimeValuePair next = nextTimeValuePair(); - batchData.putAnObject(next.getTimestamp(), next.getValue().getValue()); - } - } - } - - @Override - public BatchData nextBatch() throws IOException { - if (!hasNextBatch()) { - throw new NoSuchElementException(); - } - BatchData ret = batchData; - batchData = null; - return ret; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/MergedReaderByTime.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/MergedReaderByTime.java deleted file mode 100644 index 802355c721a0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/MergedReaderByTime.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; - -import java.io.IOException; -import java.util.List; - -public class MergedReaderByTime implements IReaderByTimestamp { - - private List innerReaders; - - public MergedReaderByTime(List innerReaders) { - this.innerReaders = innerReaders; - } - - @Override - public Object[] getValuesInTimestamps(long[] timestamps, int length) throws IOException { - for (IReaderByTimestamp innerReader : innerReaders) { - if (innerReader != null) { - Object[] results = innerReader.getValuesInTimestamps(timestamps, length); - if (results != null) { - return results; - } - } - } - return null; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java deleted file mode 100644 index e9103cd2f7c1..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestamp.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -public class RemoteSeriesReaderByTimestamp implements IReaderByTimestamp { - - private static final Logger logger = LoggerFactory.getLogger(RemoteSeriesReaderByTimestamp.class); - private DataSourceInfo sourceInfo; - - private AtomicReference fetchResult = new AtomicReference<>(); - private GenericHandler handler; - - public RemoteSeriesReaderByTimestamp(DataSourceInfo sourceInfo) { - this.sourceInfo = sourceInfo; - handler = new GenericHandler<>(sourceInfo.getCurrentNode(), fetchResult); - } - - @Override - public Object[] getValuesInTimestamps(long[] timestamps, int length) throws IOException { - if (!sourceInfo.checkCurClient()) { - return null; - } - - ByteBuffer result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - result = fetchResultAsync(timestamps, length); - } else { - result = fetchResultSync(timestamps, length); - } - - return SerializeUtils.deserializeObjects(result); - } - - @SuppressWarnings("java:S2274") // enable timeout - private ByteBuffer fetchResultAsync(long[] timestamps, int length) throws IOException { - // convert long[] to List, which is used for thrift - List timestampList = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - timestampList.add(timestamps[i]); - } - synchronized (fetchResult) { - fetchResult.set(null); - try { - sourceInfo - .getCurAsyncClient(ClusterConstant.getReadOperationTimeoutMS()) - .fetchSingleSeriesByTimestamps( - sourceInfo.getHeader(), sourceInfo.getReaderId(), timestampList, handler); - fetchResult.wait(ClusterConstant.getReadOperationTimeoutMS()); - } catch (TException e) { - // try other node - if (!sourceInfo.switchNode(true, timestamps[0])) { - return null; - } - return fetchResultAsync(timestamps, length); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Query {} interrupted", sourceInfo); - return null; - } - } - return fetchResult.get(); - } - - private ByteBuffer fetchResultSync(long[] timestamps, int length) throws IOException { - SyncDataClient curSyncClient = null; - // convert long[] to List, which is used for thrift - List timestampList = new ArrayList<>(length); - for (int i = 0; i < length; i++) { - timestampList.add(timestamps[i]); - } - try { - curSyncClient = sourceInfo.getCurSyncClient(ClusterConstant.getReadOperationTimeoutMS()); - return curSyncClient.fetchSingleSeriesByTimestamps( - sourceInfo.getHeader(), sourceInfo.getReaderId(), timestampList); - } catch (TException e) { - curSyncClient.close(); - // try other node - if (!sourceInfo.switchNode(true, timestamps[0])) { - return null; - } - return fetchResultSync(timestamps, length); - } finally { - if (curSyncClient != null) { - curSyncClient.returnSelf(); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java deleted file mode 100644 index 8a74b5c69b7e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReader.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IPointReader; -import org.apache.iotdb.tsfile.utils.TsPrimitiveType; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.NoSuchElementException; -import java.util.concurrent.atomic.AtomicReference; - -/** - * RemoteSimpleSeriesReader is a reader without value filter that reads points from a remote side. - */ -public class RemoteSimpleSeriesReader implements IPointReader { - - private static final Logger logger = LoggerFactory.getLogger(RemoteSimpleSeriesReader.class); - private DataSourceInfo sourceInfo; - private long lastTimestamp; - - private BatchData cachedBatch; - - private AtomicReference fetchResult = new AtomicReference<>(); - private GenericHandler handler; - - public RemoteSimpleSeriesReader(DataSourceInfo sourceInfo) { - this.sourceInfo = sourceInfo; - handler = new GenericHandler<>(sourceInfo.getCurrentNode(), fetchResult); - lastTimestamp = Long.MIN_VALUE; - } - - @Override - public boolean hasNextTimeValuePair() throws IOException { - if (cachedBatch != null && cachedBatch.hasCurrent()) { - return true; - } - fetchBatch(); - return cachedBatch != null && cachedBatch.hasCurrent(); - } - - @Override - public TimeValuePair nextTimeValuePair() throws IOException { - if (!hasNextTimeValuePair()) { - throw new NoSuchElementException(); - } - this.lastTimestamp = cachedBatch.currentTime(); - TimeValuePair timeValuePair = - new TimeValuePair( - cachedBatch.currentTime(), - TsPrimitiveType.getByType(sourceInfo.getDataType(), cachedBatch.currentValue())); - cachedBatch.next(); - return timeValuePair; - } - - @Override - public TimeValuePair currentTimeValuePair() throws IOException { - if (!hasNextTimeValuePair()) { - throw new NoSuchElementException(); - } - return new TimeValuePair( - cachedBatch.currentTime(), - TsPrimitiveType.getByType(sourceInfo.getDataType(), cachedBatch.currentValue())); - } - - @Override - public void close() { - // closed by Resource manager - } - - private void fetchBatch() throws IOException { - if (!sourceInfo.checkCurClient()) { - cachedBatch = null; - return; - } - - ByteBuffer result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - result = fetchResultAsync(); - } else { - result = fetchResultSync(); - } - - cachedBatch = SerializeUtils.deserializeBatchData(result); - if (logger.isDebugEnabled()) { - logger.debug( - "Fetched a batch from {}, size:{}", - sourceInfo.getCurrentNode(), - cachedBatch == null ? 0 : cachedBatch.length()); - } - } - - @SuppressWarnings("java:S2274") // enable timeout - private ByteBuffer fetchResultAsync() throws IOException { - synchronized (fetchResult) { - fetchResult.set(null); - try { - sourceInfo - .getCurAsyncClient(ClusterConstant.getReadOperationTimeoutMS()) - .fetchSingleSeries(sourceInfo.getHeader(), sourceInfo.getReaderId(), handler); - fetchResult.wait(ClusterConstant.getReadOperationTimeoutMS()); - } catch (TException e) { - // try other node - if (!sourceInfo.switchNode(false, lastTimestamp)) { - return null; - } - return fetchResultAsync(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Query {} interrupted", sourceInfo); - return null; - } - } - return fetchResult.get(); - } - - private ByteBuffer fetchResultSync() throws IOException { - SyncDataClient curSyncClient = null; - try { - curSyncClient = sourceInfo.getCurSyncClient(ClusterConstant.getReadOperationTimeoutMS()); - return curSyncClient.fetchSingleSeries(sourceInfo.getHeader(), sourceInfo.getReaderId()); - } catch (TException e) { - curSyncClient.close(); - // try other node - if (!sourceInfo.switchNode(false, lastTimestamp)) { - return null; - } - return fetchResultSync(); - } finally { - if (curSyncClient != null) { - curSyncClient.returnSelf(); - } - } - } - - void clearCurDataForTest() { - this.cachedBatch = null; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AbstractMultPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AbstractMultPointReader.java deleted file mode 100644 index 0cd4d8993ff8..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AbstractMultPointReader.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.Set; - -public abstract class AbstractMultPointReader implements IPointReader { - - public abstract boolean hasNextTimeValuePair(String fullPath) throws IOException; - - public abstract TimeValuePair nextTimeValuePair(String fullPath) throws IOException; - - public abstract Set getAllPaths(); - - /** - * do not support this method - * - * @return only false - * @throws IOException - */ - @Override - @Deprecated - public boolean hasNextTimeValuePair() throws IOException { - return false; - } - - /** - * do not support this method - * - * @return only null - * @throws IOException - */ - @Override - @Deprecated - public TimeValuePair nextTimeValuePair() throws IOException { - return null; - } - - /** - * do not support this method - * - * @return only null - * @throws IOException - */ - @Override - @Deprecated - public TimeValuePair currentTimeValuePair() throws IOException { - return null; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java deleted file mode 100644 index 7891ac74a4ee..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathAscPriorityMergeReader.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.db.query.reader.universal.Element; -import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader; - -import java.util.PriorityQueue; - -/** - * This class extends {@link extends PriorityMergeReader} for data sources with different - * priorities. - */ -public class AssignPathAscPriorityMergeReader extends PriorityMergeReader - implements IAssignPathPriorityMergeReader { - - private String fullPath; - - public AssignPathAscPriorityMergeReader(String fullPath) { - super(); - this.fullPath = fullPath; - } - - @Override - public PriorityQueue getHeap() { - return heap; - } - - @Override - public String getFullPath() { - return fullPath; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java deleted file mode 100644 index a38f491f966e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathDescPriorityMergeReader.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.db.query.reader.universal.DescPriorityMergeReader; -import org.apache.iotdb.db.query.reader.universal.Element; - -import java.util.PriorityQueue; - -/** - * This class extends {@link extends DescPriorityMergeReader} for data sources with different - * priorities. - */ -public class AssignPathDescPriorityMergeReader extends DescPriorityMergeReader - implements IAssignPathPriorityMergeReader { - - private String fullPath; - - public AssignPathDescPriorityMergeReader(String fullPath) { - super(); - this.fullPath = fullPath; - } - - @Override - public PriorityQueue getHeap() { - return heap; - } - - @Override - public String getFullPath() { - return fullPath; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java deleted file mode 100644 index 6208966a2c6d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReader.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.NoSuchElementException; - -public class AssignPathManagedMergeReader implements ManagedSeriesReader, IPointReader { - - private static final int BATCH_SIZE = 4096; - private volatile boolean managedByPool; - private volatile boolean hasRemaining; - - private BatchData batchData; - private TSDataType dataType; - - private final IAssignPathPriorityMergeReader underlyingReader; - - public AssignPathManagedMergeReader(String fullPath, TSDataType dataType, boolean isAscending) { - underlyingReader = - isAscending - ? new AssignPathAscPriorityMergeReader(fullPath) - : new AssignPathDescPriorityMergeReader(fullPath); - this.dataType = dataType; - } - - public void addReader(AbstractMultPointReader reader, long priority) throws IOException { - underlyingReader.addReader(reader, priority); - } - - @Override - public boolean isManagedByQueryManager() { - return managedByPool; - } - - @Override - public void setManagedByQueryManager(boolean managedByQueryManager) { - this.managedByPool = managedByQueryManager; - } - - @Override - public boolean hasRemaining() { - return hasRemaining; - } - - @Override - public void setHasRemaining(boolean hasRemaining) { - this.hasRemaining = hasRemaining; - } - - @Override - public boolean hasNextBatch() throws IOException { - if (batchData != null) { - return true; - } - constructBatch(); - return batchData != null; - } - - private void constructBatch() throws IOException { - if (underlyingReader.hasNextTimeValuePair()) { - batchData = new BatchData(dataType); - while (underlyingReader.hasNextTimeValuePair() && batchData.length() < BATCH_SIZE) { - TimeValuePair next = underlyingReader.nextTimeValuePair(); - batchData.putAnObject(next.getTimestamp(), next.getValue().getValue()); - } - } - } - - @Override - public BatchData nextBatch() throws IOException { - if (!hasNextBatch()) { - throw new NoSuchElementException(); - } - BatchData ret = batchData; - batchData = null; - return ret; - } - - @Override - public boolean hasNextTimeValuePair() throws IOException { - return underlyingReader.hasNextTimeValuePair(); - } - - @Override - public TimeValuePair nextTimeValuePair() throws IOException { - return underlyingReader.nextTimeValuePair(); - } - - @Override - public TimeValuePair currentTimeValuePair() throws IOException { - return underlyingReader.currentTimeValuePair(); - } - - @Override - public void close() throws IOException { - underlyingReader.close(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java deleted file mode 100644 index a3442884bb50..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IAssignPathPriorityMergeReader.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.db.query.reader.universal.Element; -import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.PriorityQueue; - -/** Common codes of different priorityMergeReader */ -public interface IAssignPathPriorityMergeReader extends IPointReader { - PriorityQueue getHeap(); - - String getFullPath(); - - default void addReader(AbstractMultPointReader reader, long priority) throws IOException { - if (reader.hasNextTimeValuePair(getFullPath())) { - getHeap() - .add( - new MultElement( - getFullPath(), - reader, - reader.nextTimeValuePair(getFullPath()), - new PriorityMergeReader.MergeReaderPriority(priority, 0))); - } else { - reader.close(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IMultBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IMultBatchReader.java deleted file mode 100644 index 07c63cf85a7a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/IMultBatchReader.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; - -import java.io.IOException; - -public interface IMultBatchReader extends IBatchReader { - - boolean hasNextBatch(String fullPath) throws IOException; - - BatchData nextBatch(String fullPath) throws IOException; -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultBatchReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultBatchReader.java deleted file mode 100644 index dc47debb8d03..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultBatchReader.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; - -import java.io.IOException; -import java.util.Map; - -public class MultBatchReader implements IMultBatchReader { - - private Map pathBatchReaders; - - public MultBatchReader(Map pathBatchReaders) { - this.pathBatchReaders = pathBatchReaders; - } - - /** - * reader has next batch data - * - * @return true if only one reader has next batch data, otherwise false - * @throws IOException - */ - @Override - public boolean hasNextBatch() throws IOException { - for (IBatchReader reader : pathBatchReaders.values()) { - if (reader != null && reader.hasNextBatch()) { - return true; - } - } - return false; - } - - @Override - public boolean hasNextBatch(String fullPath) throws IOException { - IBatchReader reader = pathBatchReaders.get(fullPath); - return reader != null && reader.hasNextBatch(); - } - - @Override - public BatchData nextBatch(String fullPath) throws IOException { - return pathBatchReaders.get(fullPath).nextBatch(); - } - - @Override - public BatchData nextBatch() throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * close in query resource - * - * @throws IOException - */ - @Override - public void close() throws IOException {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultDataSourceInfo.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultDataSourceInfo.java deleted file mode 100644 index 64ebfc42317b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultDataSourceInfo.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.factory.FilterFactory; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -/** - * provide client which could connect to all nodes of the partitionGroup, and mult reader Notice: - * methods like getter should be called only after nextDataClient() has been called - */ -public class MultDataSourceInfo { - - private static final Logger logger = LoggerFactory.getLogger(MultDataSourceInfo.class); - - private long readerId; - private Node curSource; - private PartitionGroup partitionGroup; - private List partialPaths; - private List dataTypes; - private MultSeriesQueryRequest request; - private RemoteQueryContext context; - private List nodes; - private int curPos; - private boolean isNoData = false; - private boolean isNoClient = false; - - public MultDataSourceInfo( - PartitionGroup group, - List partialPaths, - List dataTypes, - MultSeriesQueryRequest request, - RemoteQueryContext context, - List nodes) { - this.readerId = -1; - this.partitionGroup = group; - this.partialPaths = partialPaths; - this.dataTypes = dataTypes; - this.request = request; - this.context = context; - this.nodes = nodes; - // set to the last node so after nextDataClient() is called it will scan from the first node - this.curPos = nodes.size() - 1; - this.curSource = nodes.get(curPos); - } - - public boolean hasNextDataClient(long timestamp) { - if (this.nodes.isEmpty()) { - this.isNoData = false; - return false; - } - - int nextNodePos = (this.curPos + 1) % this.nodes.size(); - while (true) { - Node node = nodes.get(nextNodePos); - logger.debug("querying {} from {} of {}", request.path, node, partitionGroup.getHeader()); - try { - Long newReaderId = getReaderId(node, timestamp); - if (newReaderId != null) { - logger.debug("get a readerId {} for {} from {}", newReaderId, request.path, node); - if (newReaderId != -1) { - this.readerId = newReaderId; - this.curSource = node; - this.curPos = nextNodePos; - return true; - } else { - // the id being -1 means there is no satisfying data on the remote node, create an - // empty reader to reduce further communication - this.isNoClient = true; - this.isNoData = true; - return false; - } - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Cannot query {} from {}", this.request.path, node, e); - } catch (Exception e) { - logger.error("Cannot query {} from {}", this.request.path, node, e); - } finally { - // register the node so the remote resources can be released - context.registerRemoteNode(node, partitionGroup.getHeader()); - } - nextNodePos = (nextNodePos + 1) % this.nodes.size(); - if (nextNodePos == this.curPos) { - // has iterate over all nodes - isNoClient = true; - break; - } - } - // all nodes are failed - this.isNoData = false; - return false; - } - - public List getPartialPaths() { - return partialPaths; - } - - private Long getReaderId(Node node, long timestamp) - throws TException, InterruptedException, IOException { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - return applyForReaderIdAsync(node, timestamp); - } - return applyForReaderIdSync(node, timestamp); - } - - private Long applyForReaderIdAsync(Node node, long timestamp) - throws IOException, TException, InterruptedException { - AsyncDataClient client = - ClusterIoTDB.getInstance() - .getAsyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(client.getNode(), result); - Filter newFilter; - // add timestamp to as a timeFilter to skip the data which has been read - if (request.isSetTimeFilterBytes()) { - Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - newFilter = new AndFilter(timeFilter, TimeFilter.gt(timestamp)); - } else { - newFilter = TimeFilter.gt(timestamp); - } - request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter)); - client.queryMultSeries(request, handler); - synchronized (result) { - if (result.get() == null && handler.getException() == null) { - result.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - } - return result.get(); - } - - private Long applyForReaderIdSync(Node node, long timestamp) throws IOException, TException { - - long newReaderId; - SyncDataClient client = null; - try { - client = - ClusterIoTDB.getInstance() - .getSyncDataClient(node, ClusterConstant.getReadOperationTimeoutMS()); - Filter newFilter; - // add timestamp to as a timeFilter to skip the data which has been read - if (request.isSetTimeFilterBytes()) { - Filter timeFilter = FilterFactory.deserialize(request.timeFilterBytes); - newFilter = new AndFilter(timeFilter, TimeFilter.gt(timestamp)); - } else { - newFilter = TimeFilter.gt(timestamp); - } - request.setTimeFilterBytes(SerializeUtils.serializeFilter(newFilter)); - newReaderId = client.queryMultSeries(request); - return newReaderId; - } catch (TException e) { - // the connection may be broken, close it to avoid it being reused - client.close(); - throw e; - } finally { - if (client != null) { - client.returnSelf(); - } - } - } - - public long getReaderId() { - return this.readerId; - } - - public List getDataTypes() { - return this.dataTypes; - } - - public RaftNode getHeader() { - return partitionGroup.getHeader(); - } - - AsyncDataClient getCurAsyncClient(int timeout) throws IOException { - return isNoClient - ? null - : ClusterIoTDB.getInstance().getAsyncDataClient(this.curSource, timeout); - } - - SyncDataClient getCurSyncClient(int timeout) throws IOException { - return isNoClient - ? null - : ClusterIoTDB.getInstance().getSyncDataClient(this.curSource, timeout); - } - - public boolean isNoData() { - return this.isNoData; - } - - private boolean isNoClient() { - return this.isNoClient; - } - - @Override - public String toString() { - return "DataSourceInfo{" - + "readerId=" - + readerId - + ", curSource=" - + curSource - + ", partitionGroup=" - + partitionGroup - + ", request=" - + request - + '}'; - } - - /** - * Check if there is still any available client and there is still any left data. - * - * @return true if there is an available client and data to read, false all data has been read. - * @throws IOException if all clients are unavailable. - */ - boolean checkCurClient() throws IOException { - if (isNoClient()) { - if (!isNoData()) { - throw new IOException("no available client."); - } else { - // no data - return false; - } - } - return true; - } - - Node getCurrentNode() { - return this.curSource; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultElement.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultElement.java deleted file mode 100644 index 991c249470a4..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultElement.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.db.query.reader.universal.Element; -import org.apache.iotdb.db.query.reader.universal.PriorityMergeReader; -import org.apache.iotdb.tsfile.read.TimeValuePair; - -import java.io.IOException; - -/** a special Element implementation when querying data from multi readers */ -public class MultElement extends Element { - private final String fullPath; - - public MultElement( - String fullPath, - AbstractMultPointReader reader, - TimeValuePair timeValuePair, - PriorityMergeReader.MergeReaderPriority priority) { - super(reader, timeValuePair, priority); - this.fullPath = fullPath; - } - - @Override - public boolean hasNext() throws IOException { - return ((AbstractMultPointReader) reader).hasNextTimeValuePair(fullPath); - } - - @Override - public void next() throws IOException { - timeValuePair = ((AbstractMultPointReader) reader).nextTimeValuePair(fullPath); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultEmptyReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultEmptyReader.java deleted file mode 100644 index 2bb463ca221e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultEmptyReader.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.tsfile.read.TimeValuePair; - -import java.io.IOException; -import java.util.Set; - -/** empty mult reader */ -public class MultEmptyReader extends AbstractMultPointReader { - - private Set fullPaths; - - public MultEmptyReader(Set fullPaths) { - this.fullPaths = fullPaths; - } - - @Override - public boolean hasNextTimeValuePair(String fullPath) throws IOException { - return false; - } - - @Override - public TimeValuePair nextTimeValuePair(String fullPath) throws IOException { - return null; - } - - @Override - public Set getAllPaths() { - return fullPaths; - } - - @Override - public void close() {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReader.java deleted file mode 100644 index 0561cfe2aaba..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReader.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import java.io.IOException; -import java.util.Map; -import java.util.Set; - -/** mult reader of local partition group */ -public class MultSeriesRawDataPointReader extends AbstractMultPointReader { - private Map partitalPathReaders; - - public MultSeriesRawDataPointReader(Map partitalPathReaders) { - this.partitalPathReaders = partitalPathReaders; - } - - @Override - public boolean hasNextTimeValuePair(String fullPath) throws IOException { - IPointReader seriesRawDataPointReader = partitalPathReaders.get(fullPath); - return seriesRawDataPointReader.hasNextTimeValuePair(); - } - - @Override - public TimeValuePair nextTimeValuePair(String fullPath) throws IOException { - IPointReader seriesRawDataPointReader = partitalPathReaders.get(fullPath); - return seriesRawDataPointReader.nextTimeValuePair(); - } - - @Override - public Set getAllPaths() { - return partitalPathReaders.keySet(); - } - - @Override - public void close() {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReader.java b/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReader.java deleted file mode 100644 index f0b480284795..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReader.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.utils.TsPrimitiveType; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicReference; - -/** multi reader without value filter that reads points from a remote side. */ -public class RemoteMultSeriesReader extends AbstractMultPointReader { - - private static final Logger logger = LoggerFactory.getLogger(RemoteMultSeriesReader.class); - private static final int FETCH_BATCH_DATA_SIZE = 10; - - private MultDataSourceInfo sourceInfo; - - private Map> cachedBatchs; - - private AtomicReference> fetchResult = new AtomicReference<>(); - private GenericHandler> handler; - - private BatchStrategy batchStrategy; - - private Map currentBatchDatas; - - private Map pathToDataType; - - public RemoteMultSeriesReader(MultDataSourceInfo sourceInfo) { - this.sourceInfo = sourceInfo; - this.handler = new GenericHandler<>(sourceInfo.getCurrentNode(), fetchResult); - this.currentBatchDatas = Maps.newHashMap(); - this.batchStrategy = new DefaultBatchStrategy(); - - this.cachedBatchs = Maps.newHashMap(); - this.pathToDataType = Maps.newHashMap(); - for (int i = 0; i < sourceInfo.getPartialPaths().size(); i++) { - String fullPath = sourceInfo.getPartialPaths().get(i).getFullPath(); - this.cachedBatchs.put(fullPath, new ConcurrentLinkedQueue<>()); - this.pathToDataType.put(fullPath, sourceInfo.getDataTypes().get(i)); - } - } - - @Override - public synchronized boolean hasNextTimeValuePair(String fullPath) throws IOException { - BatchData batchData = currentBatchDatas.get(fullPath); - if (batchData != null && batchData.hasCurrent()) { - return true; - } - fetchBatch(); - return checkPathBatchData(fullPath); - } - - private boolean checkPathBatchData(String fullPath) { - BatchData batchData = cachedBatchs.get(fullPath).peek(); - return batchData != null && !batchData.isEmpty(); - } - - @Override - public synchronized TimeValuePair nextTimeValuePair(String fullPath) throws IOException { - BatchData batchData = currentBatchDatas.get(fullPath); - if ((batchData == null || !batchData.hasCurrent()) && checkPathBatchData(fullPath)) { - batchData = cachedBatchs.get(fullPath).poll(); - currentBatchDatas.put(fullPath, batchData); - } - - if (!hasNextTimeValuePair(fullPath)) { - throw new NoSuchElementException(); - } - - TimeValuePair timeValuePair = - new TimeValuePair( - batchData.currentTime(), - TsPrimitiveType.getByType(pathToDataType.get(fullPath), batchData.currentValue())); - batchData.next(); - return timeValuePair; - } - - @Override - public Set getAllPaths() { - return cachedBatchs.keySet(); - } - - /** query resource deal close there is not dealing. */ - @Override - public void close() {} - - private void fetchBatch() throws IOException { - if (!sourceInfo.checkCurClient()) { - cachedBatchs = null; - return; - } - List paths = batchStrategy.selectBatchPaths(this.cachedBatchs); - if (paths.isEmpty()) { - return; - } - - Map result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - result = fetchResultAsync(paths); - } else { - result = fetchResultSync(paths); - } - - if (result == null) { - return; - } - - for (String path : result.keySet()) { - - BatchData batchData = SerializeUtils.deserializeBatchData(result.get(path)); - if (logger.isDebugEnabled()) { - logger.debug( - "Fetched a batch from {}, size:{}", - sourceInfo.getCurrentNode(), - batchData == null ? 0 : batchData.length()); - } - // if data query end, batchData is null, - // will create empty BatchData, and add queue. - if (batchData == null) { - batchData = new BatchData(); - } - cachedBatchs - .computeIfAbsent(path, n -> new ConcurrentLinkedQueue()) - .add(batchData); - } - } - - @SuppressWarnings("java:S2274") // enable timeout - private Map fetchResultAsync(List paths) throws IOException { - synchronized (fetchResult) { - fetchResult.set(null); - try { - sourceInfo - .getCurAsyncClient(ClusterConstant.getReadOperationTimeoutMS()) - .fetchMultSeries(sourceInfo.getHeader(), sourceInfo.getReaderId(), paths, handler); - fetchResult.wait(ClusterConstant.getReadOperationTimeoutMS()); - } catch (TException | InterruptedException e) { - logger.error("Failed to fetch result async, connect to {}", sourceInfo, e); - return null; - } - } - return fetchResult.get(); - } - - private Map fetchResultSync(List paths) throws IOException { - SyncDataClient curSyncClient = null; - try { - curSyncClient = sourceInfo.getCurSyncClient(ClusterConstant.getReadOperationTimeoutMS()); - return curSyncClient.fetchMultSeries(sourceInfo.getHeader(), sourceInfo.getReaderId(), paths); - } catch (TException e) { - curSyncClient.close(); - logger.error("Failed to fetch result sync, connect to {}", sourceInfo, e); - return null; - } finally { - if (curSyncClient != null) { - curSyncClient.returnSelf(); - } - } - } - - /** select path, which could batch-fetch result */ - interface BatchStrategy { - List selectBatchPaths(Map> cacheBatchs); - } - - static class DefaultBatchStrategy implements BatchStrategy { - - @Override - public List selectBatchPaths(Map> cacheBatchs) { - List paths = Lists.newArrayList(); - - for (String path : cacheBatchs.keySet()) { - Queue batchDataQueue = cacheBatchs.get(path); - BatchData batchData = batchDataQueue.peek(); - - // data read finished, so can not batch get data - if (batchData != null && batchData.isEmpty()) { - continue; - } - - if (batchDataQueue.size() < FETCH_BATCH_DATA_SIZE) { - paths.add(path); - } - } - return paths; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCService.java deleted file mode 100644 index 7bdb9de7a24a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCService.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.exception.runtime.RPCServiceException; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftService; -import org.apache.iotdb.commons.service.ThriftServiceThread; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.service.thrift.ProcessorWithMetrics; -import org.apache.iotdb.db.service.thrift.handler.RPCServiceThriftHandler; -import org.apache.iotdb.metrics.config.MetricConfigDescriptor; -import org.apache.iotdb.service.rpc.thrift.TSIService.Processor; - -public class ClusterRPCService extends ThriftService implements ClusterRPCServiceMBean { - - private ClusterTSServiceImpl impl; - - private ClusterRPCService() {} - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_RPC_SERVICE; - } - - @Override - public void initSyncedServiceImpl(Object serviceImpl) { - impl = (ClusterTSServiceImpl) serviceImpl; - super.initSyncedServiceImpl(serviceImpl); - } - - @Override - public void initTProcessor() throws InstantiationException { - if (impl == null) { - throw new InstantiationException("ClusterTSServiceImpl is null"); - } - if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { - processor = new ProcessorWithMetrics(impl); - } else { - processor = new Processor<>(impl); - } - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - try { - thriftServiceThread = - new ThriftServiceThread( - processor, - getID().getName(), - ThreadName.CLUSTER_RPC_CLIENT.getName(), - getBindIP(), - getBindPort(), - config.getRpcMaxConcurrentClientNum(), - config.getThriftServerAwaitTimeForStopService(), - new RPCServiceThriftHandler(impl), - IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable()); - } catch (RPCServiceException e) { - throw new IllegalAccessException(e.getMessage()); - } - thriftServiceThread.setName(ThreadName.CLUSTER_RPC_SERVICE.getName()); - } - - @Override - public String getBindIP() { - return IoTDBDescriptor.getInstance().getConfig().getRpcAddress(); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getClusterRpcPort(); - } - - @Override - public int getRPCPort() { - return getBindPort(); - } - - public static ClusterRPCService getInstance() { - return ClusterRPCServiceHolder.INSTANCE; - } - - private static class ClusterRPCServiceHolder { - - private static final ClusterRPCService INSTANCE = new ClusterRPCService(); - - private ClusterRPCServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCServiceMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCServiceMBean.java deleted file mode 100644 index 694a822ccd16..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterRPCServiceMBean.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.commons.exception.StartupException; - -public interface ClusterRPCServiceMBean { - - String getRPCServiceStatus(); - - int getRPCPort(); - - void startService() throws StartupException; - - void restartService() throws StartupException; - - void stopService(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterTSServiceImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterTSServiceImpl.java deleted file mode 100644 index 0c7528dc2212..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/ClusterTSServiceImpl.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.cluster.server.basic.ClusterServiceProvider; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.service.thrift.impl.TSServiceImpl; - -/** - * ClusterTSServiceImpl is the cluster version of TSServiceImpl, which is responsible for the - * processing of the user requests (sqls and session api). It inherits the basic procedures from - * TSServiceImpl, but redirect the queries of data and metadata to a MetaGroupMember of the local - * node. - */ -public class ClusterTSServiceImpl extends TSServiceImpl { - - private final ClusterServiceProvider clusterServiceProvider; - - public ClusterTSServiceImpl() { - clusterServiceProvider = (ClusterServiceProvider) IoTDB.serviceProvider; - } - - /** Redirect the plan to the local Coordinator so that it will be processed cluster-wide. */ - @Override - protected TSStatus executeNonQueryPlan(PhysicalPlan plan) { - return clusterServiceProvider.executeNonQueryPlan(plan); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/HardLinkCleaner.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/HardLinkCleaner.java deleted file mode 100644 index 5e3a83776176..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/HardLinkCleaner.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.db.conf.directories.DirectoryManager; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.List; - -public class HardLinkCleaner implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(HardLinkCleaner.class); - // hardlinks are kept for 3 days - private static final long HARDLINK_LIFE_MS = 3 * 24 * 3600 * 1000L; - - @Override - public void run() { - scanFolders(DirectoryManager.getInstance().getAllSequenceFileFolders()); - if (Thread.interrupted()) { - return; - } - scanFolders(DirectoryManager.getInstance().getAllUnSequenceFileFolders()); - } - - private void scanFolders(List folders) { - for (String folder : folders) { - scanFolder(folder); - } - } - - private void scanFolder(String folder) { - File folderFile = new File(folder); - scanFile(folderFile); - } - - private void scanFile(File file) { - if (!file.exists()) { - return; - } - if (file.isDirectory()) { - File[] files = file.listFiles(); - if (files != null) { - for (File file1 : files) { - scanFile(file1); - if (Thread.interrupted()) { - Thread.currentThread().interrupt(); - return; - } - } - } - return; - } - long hardLinkCreateTime = getHardLinkCreateTime(file); - long currentTime = System.currentTimeMillis(); - if (hardLinkCreateTime != -1 && currentTime - hardLinkCreateTime >= HARDLINK_LIFE_MS) { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.debug( - "Hardlink {} cannot be removed, leave it to the next try: {}", file, e.getMessage()); - } - } - } - - /** - * @param file - * @return -1 if the file is not a hardlink or its created time - */ - private long getHardLinkCreateTime(File file) { - String fileName = file.getName(); - // hardlinks have a suffix like ".[createTime]_[randomNumber]" - int suffixIndex = fileName.lastIndexOf('.'); - if (suffixIndex > 0 && suffixIndex < fileName.length()) { - String suffix = fileName.substring(suffixIndex + 1); - String[] split = suffix.split("_"); - if (split.length != 2) { - return -1; - } - try { - return Long.parseLong(split[0]); - } catch (NumberFormatException e) { - return -1; - } - } else { - return -1; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/NodeCharacter.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/NodeCharacter.java deleted file mode 100644 index a4df0642b96e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/NodeCharacter.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.server; - -public enum NodeCharacter { - // the leader in this group is unknown - ELECTOR, - // this node has known the leader in the group - FOLLOWER, - // this node is the leader in the group - LEADER -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/PullSnapshotHintService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/PullSnapshotHintService.java deleted file mode 100644 index 3cec37e55302..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/PullSnapshotHintService.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.snapshot.PullSnapshotTaskDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ConcurrentLinkedDeque; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.apache.iotdb.cluster.config.ClusterConstant.THREAD_POLL_WAIT_TERMINATION_TIME_S; - -public class PullSnapshotHintService { - - private static final Logger logger = LoggerFactory.getLogger(PullSnapshotHintService.class); - - private DataGroupMember member; - private ScheduledExecutorService service; - private ConcurrentLinkedDeque hints; - - public PullSnapshotHintService(DataGroupMember member) { - this.member = member; - this.hints = new ConcurrentLinkedDeque<>(); - } - - public void start() { - this.service = IoTDBThreadPoolFactory.newScheduledThreadPool(1, "PullSnapshotHint"); - this.service.scheduleAtFixedRate(this::sendHints, 0, 10, TimeUnit.MILLISECONDS); - } - - public void stop() { - if (service == null) { - return; - } - - service.shutdownNow(); - try { - service.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("{}: PullSnapshotHintService exiting interrupted", member.getName()); - } - service = null; - } - - public void registerHint(PullSnapshotTaskDescriptor descriptor) { - PullSnapshotHint hint = new PullSnapshotHint(); - hint.partitionGroup = descriptor.getPreviousHolders(); - hint.receivers = new PartitionGroup(hint.partitionGroup); - hint.slots = descriptor.getSlots(); - hints.add(hint); - } - - private void sendHints() { - for (Iterator iterator = hints.iterator(); iterator.hasNext(); ) { - PullSnapshotHint hint = iterator.next(); - for (Iterator iter = hint.receivers.iterator(); iter.hasNext(); ) { - Node receiver = iter.next(); - // If the receiver is the removed node, ignore the hint - if (!member.getMetaGroupMember().getPartitionTable().getAllNodes().contains(receiver)) { - iter.remove(); - } else { - try { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: start to send hint to target group {}, receiver {}, slot is {} and other {}", - member.getName(), - hint.partitionGroup, - receiver, - hint.slots.get(0), - hint.slots.size() - 1); - } - boolean result = sendHint(receiver, hint); - if (result) { - iter.remove(); - } - } catch (TException e) { - logger.warn("Cannot send pull snapshot hint to {}", receiver); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Sending hint to {} interrupted", receiver); - } - } - } - // all nodes in remote group know the hint, the hint can be removed - if (hint.receivers.isEmpty()) { - iterator.remove(); - } - } - } - - private boolean sendHint(Node receiver, PullSnapshotHint hint) - throws TException, InterruptedException { - boolean result; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - result = sendHintsAsync(receiver, hint); - } else { - result = sendHintSync(receiver, hint); - } - return result; - } - - private boolean sendHintsAsync(Node receiver, PullSnapshotHint hint) - throws TException, InterruptedException { - AsyncDataClient asyncDataClient = (AsyncDataClient) member.getAsyncClient(receiver); - if (asyncDataClient == null) { - return false; - } - return SyncClientAdaptor.onSnapshotApplied(asyncDataClient, hint.getHeader(), hint.slots); - } - - private boolean sendHintSync(Node receiver, PullSnapshotHint hint) throws TException { - SyncDataClient syncDataClient = null; - try { - syncDataClient = (SyncDataClient) member.getSyncClient(receiver); - if (syncDataClient == null) { - return false; - } - return syncDataClient.onSnapshotApplied(hint.getHeader(), hint.slots); - } catch (TException e) { - syncDataClient.close(); - throw e; - } finally { - if (syncDataClient != null) { - syncDataClient.returnSelf(); - } - } - } - - private static class PullSnapshotHint { - - /** Nodes to send this hint */ - private PartitionGroup receivers; - - private PartitionGroup partitionGroup; - - private List slots; - - public RaftNode getHeader() { - return partitionGroup.getHeader(); - } - - public int getRaftId() { - return receivers.getRaftId(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/Response.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/Response.java deleted file mode 100644 index 6b286c1635ba..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/Response.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -/** - * Response defines the numeric responses that have special meanings. Enum class is not used for - * thrift compatibility and to reduce communication cost. - */ -public class Response { - - // the request is successfully accepted - public static final long RESPONSE_AGREE = -1; - // cannot find the previous log of the log sent to the follower, a catch-up is required - public static final long RESPONSE_LOG_MISMATCH = -2; - // the request is rejected but the detailed reason depends on the type of the request - public static final long RESPONSE_REJECT = -3; - // the partition table is not established yet, so related requests are not available - public static final long RESPONSE_PARTITION_TABLE_UNAVAILABLE = -4; - // the identifier of the node which wants to join conflicts with one of the joined node's - public static final long RESPONSE_IDENTIFIER_CONFLICT = -5; - // the requested node is unreachable in the network - public static final long RESPONSE_NO_CONNECTION = -6; - // the node does not give a vote because its leader does not time out. This is to avoid a - // node which cannot connect to the leader changing the leader in the group frequently. - public static final long RESPONSE_LEADER_STILL_ONLINE = -7; - // the operation is rejected because the cluster will not be able to have enough replicas after - // this operation - public static final long RESPONSE_CLUSTER_TOO_SMALL = -8; - // the new node, which tries to join the cluster, contains conflicted parameters with the - // cluster, so the operation is rejected. - public static final long RESPONSE_NEW_NODE_PARAMETER_CONFLICT = -9; - // the data migration of previous add/remove node operations is not finished. - public static final long RESPONSE_DATA_MIGRATION_NOT_FINISH = -10; - // the node has removed from the group, so the operation is rejected. - public static final long RESPONSE_NODE_IS_NOT_IN_GROUP = -11; - // the request is not executed locally anc should be forwarded - public static final long RESPONSE_NULL = Long.MIN_VALUE; - // the meta engine is not ready (except for the partitionTable is ready) - public static final long RESPONSE_META_NOT_READY = -12; - // the cluster is too busy to reject new committed logs - public static final long RESPONSE_TOO_BUSY = -13; - - private Response() { - // enum-like class - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/StoppedMemberManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/StoppedMemberManager.java deleted file mode 100644 index c8efe3963cab..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/StoppedMemberManager.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.DataGroupMember.Factory; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - * When a node is added or removed, several partition groups are affected and nodes may exit some - * groups. For example, the local node is #5 and it is in a data group of [1, 3, 5], then node #3 is - * added, so the group becomes [1, 3, 4] and the local node must leave the group. However, #5 may - * have data that #4 needs to pull, so the Member of #5 in this group is stopped but not removed yet - * and when system recovers, we need to resume the groups so that they can keep providing snapshots - * for data transfers. - */ -public class StoppedMemberManager { - - private static final Logger logger = LoggerFactory.getLogger(StoppedMemberManager.class); - private String stoppedMembersFileName = - IoTDBDescriptor.getInstance().getConfig().getSystemDir() + File.separator + "removedMembers"; - - private static final String REMOVED = "0"; - private static final String RESUMED = "1"; - - private Map removedMemberMap = new HashMap<>(); - private DataGroupMember.Factory memberFactory; - - public StoppedMemberManager(Factory memberFactory) { - this.memberFactory = memberFactory; - recover(); - } - - /** - * When a DataGroupMember is removed, add it here and record this removal, so in next start-up we - * can recover it as a data source for data transfers. - * - * @param raftNode When a DataGroupMember is removed, add it here and record this removal, so in - * next start-up we can recover it as a data source for data transfers. - * @param dataGroupMember - */ - public synchronized void put(RaftNode raftNode, DataGroupMember dataGroupMember) { - removedMemberMap.put(raftNode, dataGroupMember); - try (BufferedWriter writer = new BufferedWriter(new FileWriter(stoppedMembersFileName, true))) { - StringBuilder builder = new StringBuilder(REMOVED); - builder.append(";").append(raftNode.raftId); - for (Node node : dataGroupMember.getAllNodes()) { - builder.append(";").append(node.toString()); - } - writer.write(builder.toString()); - writer.newLine(); - } catch (IOException e) { - logger.error("Cannot record removed member of header {}", raftNode, e); - } - } - - /** - * When a DataGroupMember is resumed, add it here and record this removal, so in next start-up we - * will not recover it here. - * - * @param raftNode - */ - public synchronized void remove(RaftNode raftNode) { - removedMemberMap.remove(raftNode); - try (BufferedWriter writer = new BufferedWriter(new FileWriter(stoppedMembersFileName, true))) { - writer.write(RESUMED + ";" + raftNode.getRaftId() + ";" + raftNode.getNode().toString()); - writer.newLine(); - } catch (IOException e) { - logger.error("Cannot record resumed member of header {}", raftNode, e); - } - } - - public synchronized DataGroupMember get(RaftNode raftNode) { - return removedMemberMap.get(raftNode); - } - - private void recover() { - File stoppedMembersFile = new File(stoppedMembersFileName); - if (!stoppedMembersFile.exists()) { - return; - } - try (BufferedReader reader = new BufferedReader(new FileReader(stoppedMembersFileName))) { - String line; - while ((line = reader.readLine()) != null) { - parseLine(line); - } - } catch (IOException e) { - logger.error("Cannot recover members from file", e); - } - } - - private void parseLine(String line) { - if ("".equalsIgnoreCase(line)) { - return; - } - try { - String[] split = line.split(";"); - String type = split[0]; - if (REMOVED.equalsIgnoreCase(type)) { - parseRemoved(split); - } else if (RESUMED.equalsIgnoreCase(type)) { - parseResumed(split); - } - } catch (Exception e) { - logger.warn("Fail to analyze {}, skipping", line); - } - } - - private void parseRemoved(String[] split) { - PartitionGroup partitionGroup = new PartitionGroup(); - int raftId = Integer.parseInt(split[1]); - partitionGroup.setRaftId(raftId); - for (int i = 2; i < split.length; i++) { - Node node = ClusterUtils.stringToNode(split[i]); - partitionGroup.add(node); - } - DataGroupMember member = memberFactory.create(partitionGroup); - member.setReadOnly(); - removedMemberMap.put(partitionGroup.getHeader(), member); - } - - private void parseResumed(String[] split) { - int raftId = Integer.parseInt(split[1]); - Node header = ClusterUtils.stringToNode(split[2]); - removedMemberMap.remove(new RaftNode(header, raftId)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/basic/ClusterServiceProvider.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/basic/ClusterServiceProvider.java deleted file mode 100644 index 76199ebbdc07..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/basic/ClusterServiceProvider.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.basic; - -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.query.ClusterPlanExecutor; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.manage.ClusterSessionManager; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineReadonlyException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.db.qp.physical.sys.SetSystemModePlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.service.basic.ServiceProvider; -import org.apache.iotdb.rpc.RpcUtils; -import org.apache.iotdb.rpc.TSStatusCode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class ClusterServiceProvider extends ServiceProvider { - private static final Logger logger = LoggerFactory.getLogger(ClusterServiceProvider.class); - - /** - * The Coordinator of the local node. Through this node queries data and meta from the cluster and - * performs data manipulations to the cluster. - */ - private final Coordinator coordinator; - - public ClusterServiceProvider(Coordinator coordinator, MetaGroupMember metaGroupMember) - throws QueryProcessException { - super(new ClusterPlanExecutor(metaGroupMember)); - this.coordinator = coordinator; - } - - /** Redirect the plan to the local Coordinator so that it will be processed cluster-wide. */ - public TSStatus executeNonQueryPlan(PhysicalPlan plan) { - try { - plan.checkIntegrity(); - if (!(plan instanceof SetSystemModePlan) - && !(plan instanceof FlushPlan) - && IoTDBDescriptor.getInstance().getConfig().isReadOnly()) { - return RpcUtils.getStatus( - TSStatusCode.READ_ONLY_SYSTEM_ERROR, StorageEngineReadonlyException.ERROR_MESSAGE); - } - } catch (QueryProcessException e) { - logger.warn("Illegal plan detected: {}", plan); - return RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, e.getMessage()); - } - - return coordinator.executeNonQueryPlan(plan); - } - - /** - * Generate and cache a QueryContext using "queryId". In the distributed version, the QueryContext - * is a RemoteQueryContext. - * - * @return a RemoteQueryContext using queryId - */ - @Override - public QueryContext genQueryContext( - long queryId, boolean debug, long startTime, String statement, long timeout) { - RemoteQueryContext context = - new RemoteQueryContext(queryId, debug, startTime, statement, timeout); - ClusterSessionManager.getInstance().putContext(queryId, context); - return context; - } - - @Override - public boolean executeNonQuery(PhysicalPlan plan) { - TSStatus tsStatus = executeNonQueryPlan(plan); - return tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServer.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServer.java deleted file mode 100644 index d525790f4f82..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServer.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.clusterinfo; - -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.ClusterInfoService.Processor; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.exception.runtime.RPCServiceException; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftService; -import org.apache.iotdb.commons.service.ThriftServiceThread; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -public class ClusterInfoServer extends ThriftService implements ClusterInfoServerMBean { - private ClusterInfoServiceImpl serviceImpl; - - public static ClusterInfoServer getInstance() { - return ClusterMonitorServiceHolder.INSTANCE; - } - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_INFO_SERVICE; - } - - @Override - public void initTProcessor() { - initSyncedServiceImpl(null); - serviceImpl = new ClusterInfoServiceImpl(); - processor = new Processor<>(serviceImpl); - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig(); - IoTDBConfig nodeConfig = IoTDBDescriptor.getInstance().getConfig(); - try { - thriftServiceThread = - new ThriftServiceThread( - processor, - getID().getName(), - ThreadName.CLUSTER_INFO_SERVICE.getName(), - nodeConfig.getRpcAddress(), - clusterConfig.getClusterInfoRpcPort(), - nodeConfig.getRpcMaxConcurrentClientNum(), - nodeConfig.getThriftServerAwaitTimeForStopService(), - new ClusterInfoServiceThriftHandler(serviceImpl), - IoTDBDescriptor.getInstance().getConfig().isRpcThriftCompressionEnable()); - } catch (RPCServiceException e) { - throw new IllegalAccessException(e.getMessage()); - } - thriftServiceThread.setName(ThreadName.CLUSTER_INFO_SERVICE.getName() + "Service"); - } - - @Override - public String getBindIP() { - return IoTDBDescriptor.getInstance().getConfig().getRpcAddress(); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getClusterInfoRpcPort(); - } - - private static class ClusterMonitorServiceHolder { - - private static final ClusterInfoServer INSTANCE = new ClusterInfoServer(); - - private ClusterMonitorServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerMBean.java deleted file mode 100644 index 1003d464193d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerMBean.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.clusterinfo; - -public interface ClusterInfoServerMBean {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImpl.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImpl.java deleted file mode 100644 index 6891a6ea5d64..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImpl.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.clusterinfo; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.ClusterInfoService; -import org.apache.iotdb.cluster.rpc.thrift.DataPartitionEntry; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitor; - -import org.apache.commons.collections4.map.MultiKeyMap; -import org.apache.thrift.TException; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -public class ClusterInfoServiceImpl implements ClusterInfoService.Iface { - - @Override - public List getRing() throws TException { - return ClusterMonitor.INSTANCE.getRing(); - } - - @Override - public List getDataPartition(String path, long startTime, long endTime) { - MultiKeyMap partitions = - ClusterMonitor.INSTANCE.getDataPartition(path, startTime, endTime); - List result = new ArrayList<>(partitions.size()); - partitions.forEach( - (multikey, nodes) -> - result.add(new DataPartitionEntry(multikey.getKey(0), multikey.getKey(1), nodes))); - return result; - } - - @Override - public List getMetaPartition(String path) throws TException { - return ClusterMonitor.INSTANCE.getMetaPartition(path); - } - - @Override - public Map getAllNodeStatus() throws TException { - return ClusterMonitor.INSTANCE.getAllNodeStatus(); - } - - @Override - public String getInstrumentingInfo() throws TException { - return ClusterMonitor.INSTANCE.getInstrumentingInfo(); - } - - public void handleClientExit() { - // do something when a client connection exits. - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceThriftHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceThriftHandler.java deleted file mode 100644 index 8fc8783d7a38..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceThriftHandler.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.clusterinfo; - -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.server.ServerContext; -import org.apache.thrift.server.TServerEventHandler; -import org.apache.thrift.transport.TTransport; - -public class ClusterInfoServiceThriftHandler implements TServerEventHandler { - private ClusterInfoServiceImpl serviceImpl; - - ClusterInfoServiceThriftHandler(ClusterInfoServiceImpl serviceImpl) { - this.serviceImpl = serviceImpl; - } - - @Override - public ServerContext createContext(TProtocol arg0, TProtocol arg1) { - // nothing - return null; - } - - @Override - public void deleteContext(ServerContext arg0, TProtocol arg1, TProtocol arg2) { - // release query resources. - serviceImpl.handleClientExit(); - } - - @Override - public void preServe() { - // nothing - } - - @Override - public void processContext(ServerContext arg0, TTransport arg1, TTransport arg2) { - // nothing - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandler.java deleted file mode 100644 index e83857bb6f71..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandler.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; - -/** - * AppendGroupEntryHandler checks if the log is successfully appended by the quorum or some node has - * rejected it for some reason when one node has finished the AppendEntryRequest. The target of the - * log is the data groups, the consistency can be reached as long as quorum data groups agree, even - * if the actually agreed nodes can be less than quorum, because the same nodes may say "yes" for - * multiple groups. - */ -public class AppendGroupEntryHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(AppendGroupEntryHandler.class); - - private RaftMember member; - private Log log; - // the number of nodes that accept the log in each group - // to succeed, each number should reach zero - // for example: assuming there are 4 nodes and 3 replicas, then the initial array will be: - // [2, 2, 2, 2]. And if node0 accepted the log, as node0 is in group 2,3,0, the array will be - // [1, 2, 1, 1]. - private int[] groupReceivedCounter; - // the index of the node which the request sends log to, if the node accepts the log, all - // groups' counters the node is in should decrease - private int receiverNodeIndex; - private Node receiverNode; - // store the flag of leadership lost and the new leader's term - private AtomicBoolean leaderShipStale; - private AtomicLong newLeaderTerm; - private int replicationNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - - private AtomicInteger erroredNodeNum = new AtomicInteger(0); - - public AppendGroupEntryHandler( - int[] groupReceivedCounter, - int receiverNodeIndex, - Node receiverNode, - AtomicBoolean leaderShipStale, - Log log, - AtomicLong newLeaderTerm, - RaftMember member) { - this.groupReceivedCounter = groupReceivedCounter; - this.receiverNodeIndex = receiverNodeIndex; - this.receiverNode = receiverNode; - this.leaderShipStale = leaderShipStale; - this.log = log; - this.newLeaderTerm = newLeaderTerm; - this.member = member; - } - - @Override - public void onComplete(Long response) { - if (leaderShipStale.get()) { - // someone has rejected this log because the leadership is stale - return; - } - - long resp = response; - - if (resp == RESPONSE_AGREE) { - processAgreement(); - } else if (resp > 0) { - // a response > 0 is the term fo the follower - synchronized (groupReceivedCounter) { - // the leader ship is stale, abort and wait for the new leader's heartbeat - long previousNewTerm = newLeaderTerm.get(); - if (previousNewTerm < resp) { - newLeaderTerm.set(resp); - } - leaderShipStale.set(true); - groupReceivedCounter.notifyAll(); - } - } - // rejected because the follower's logs are stale or the follower has no cluster info, just - // wait for the heartbeat to handle - } - - /** - * Decrease all related counters of the receiver node. See the field "groupReceivedCounter" for an - * example. If all counters reach 0, wake the waiting thread to welcome the success. - */ - private void processAgreement() { - synchronized (groupReceivedCounter) { - logger.debug("{}: Node {} has accepted log {}", member.getName(), receiverNode, log); - // this node is contained in REPLICATION_NUM groups, decrease the counters of these groups - for (int i = 0; i < replicationNum; i++) { - int nodeIndex = receiverNodeIndex - i; - if (nodeIndex < 0) { - nodeIndex += groupReceivedCounter.length; - } - groupReceivedCounter[nodeIndex]--; - } - - // examine if all groups has agreed - boolean allAgreed = true; - for (int remaining : groupReceivedCounter) { - if (remaining > 0) { - allAgreed = false; - break; - } - } - if (allAgreed) { - // wake up the parent thread to welcome the new node - groupReceivedCounter.notifyAll(); - } - } - } - - @Override - public void onError(Exception exception) { - logger.error( - "{}: Cannot send the add node request to node {}", - member.getName(), - receiverNode, - exception); - if (erroredNodeNum.incrementAndGet() >= replicationNum / 2) { - synchronized (groupReceivedCounter) { - logger.error( - "{}: Over half of the nodes failed, the request is rejected", member.getName()); - groupReceivedCounter.notifyAll(); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandler.java deleted file mode 100644 index 2945726c084d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandler.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.server.monitor.Timer.Statistic; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.ConnectException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; - -/** - * AppendNodeEntryHandler checks if the log is successfully appended by the quorum or some node has - * rejected it for some reason when one node has finished the AppendEntryRequest. The target of the - * log is the single nodes, it requires the agreement from the quorum of the nodes to reach - * consistency. - */ -public class AppendNodeEntryHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(AppendNodeEntryHandler.class); - - private RaftMember member; - private AtomicLong receiverTerm; - private Log log; - private AtomicInteger voteCounter; - private AtomicBoolean leaderShipStale; - private Node receiver; - private Peer peer; - // initialized as the quorum size, and decrease by 1 each time when we receive a rejection or - // an exception, upon decreased to zero, the request will be early-aborted - private int failedDecreasingCounter; - - // nano start time when the send begins - private long sendStart = Long.MIN_VALUE; - - public AppendNodeEntryHandler() { - if (Timer.ENABLE_INSTRUMENTING - && ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - sendStart = System.nanoTime(); - } - } - - @Override - public void onComplete(Long response) { - if (Timer.ENABLE_INSTRUMENTING) { - Statistic.RAFT_SENDER_SEND_LOG_ASYNC.calOperationCostTimeFromStart(sendStart); - } - if (voteCounter.get() == Integer.MAX_VALUE) { - // the request already failed - return; - } - logger.debug( - "{}: Append response {} from {} for log {}", member.getName(), response, receiver, log); - if (leaderShipStale.get()) { - // someone has rejected this log because the leadership is stale - return; - } - long resp = response; - synchronized (voteCounter) { - if (resp == RESPONSE_AGREE) { - int remaining = voteCounter.decrementAndGet(); - logger.debug( - "{}: Received an agreement from {} for {}, remaining votes to succeed: {}", - member.getName(), - receiver, - log, - remaining); - if (remaining == 0) { - logger.debug( - "{}: Log [{}] {} is accepted by the quorum", - member.getName(), - log.getCurrLogIndex(), - log); - voteCounter.notifyAll(); - } - peer.setMatchIndex(Math.max(log.getCurrLogIndex(), peer.getMatchIndex())); - } else if (resp > 0) { - // a response > 0 is the follower's term - // the leader ship is stale, wait for the new leader's heartbeat - long prevReceiverTerm = receiverTerm.get(); - logger.debug( - "{}: Received a rejection from {} because term is stale: {}/{} for log {}", - member.getName(), - receiver, - prevReceiverTerm, - resp, - log); - if (resp > prevReceiverTerm) { - receiverTerm.set(resp); - } - leaderShipStale.set(true); - voteCounter.notifyAll(); - } else { - // e.g., Response.RESPONSE_LOG_MISMATCH - logger.debug( - "{}: The log {} is rejected by {} because: {}", member.getName(), log, receiver, resp); - onFail(); - } - // rejected because the receiver's logs are stale or the receiver has no cluster info, just - // wait for the heartbeat to handle - } - } - - @Override - public void onError(Exception exception) { - if (exception instanceof ConnectException) { - logger.warn( - "{}: Cannot append log {}: cannot connect to {}: {}", - member.getName(), - log, - receiver, - exception.getMessage()); - } else { - logger.warn("{}: Cannot append log {} to {}", member.getName(), log, receiver, exception); - } - onFail(); - } - - private void onFail() { - synchronized (voteCounter) { - failedDecreasingCounter--; - if (failedDecreasingCounter <= 0) { - // quorum members have failed, there is no need to wait for others - voteCounter.set(Integer.MAX_VALUE); - voteCounter.notifyAll(); - } - } - } - - public void setLog(Log log) { - this.log = log; - } - - public void setMember(RaftMember member) { - this.member = member; - } - - public void setVoteCounter(AtomicInteger voteCounter) { - this.voteCounter = voteCounter; - this.failedDecreasingCounter = - ClusterDescriptor.getInstance().getConfig().getReplicationNum() - voteCounter.get(); - } - - public void setLeaderShipStale(AtomicBoolean leaderShipStale) { - this.leaderShipStale = leaderShipStale; - } - - public void setPeer(Peer peer) { - this.peer = peer; - } - - public void setReceiver(Node follower) { - this.receiver = follower; - } - - public void setReceiverTerm(AtomicLong receiverTerm) { - this.receiverTerm = receiverTerm; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java deleted file mode 100644 index 9f16f3ff7605..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandler.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.service.metrics.Metric; -import org.apache.iotdb.db.service.metrics.MetricsService; -import org.apache.iotdb.db.service.metrics.Tag; -import org.apache.iotdb.metrics.config.MetricConfigDescriptor; -import org.apache.iotdb.metrics.utils.MetricLevel; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.ConnectException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; -import static org.apache.iotdb.cluster.server.Response.RESPONSE_LEADER_STILL_ONLINE; -import static org.apache.iotdb.cluster.server.Response.RESPONSE_NODE_IS_NOT_IN_GROUP; - -/** - * ElectionHandler checks the result from a voter and decides whether the election goes on, succeeds - * or fails. - */ -public class ElectionHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(ElectionHandler.class); - - private RaftMember raftMember; - private String memberName; - private Node voter; - private long currTerm; - private AtomicInteger requiredVoteNum; - private AtomicBoolean terminated; - // when set to true, the elector wins the election - private AtomicBoolean electionValid; - private AtomicInteger failingVoteCounter; - - public ElectionHandler( - RaftMember raftMember, - Node voter, - long currTerm, - AtomicInteger requiredVoteNum, - AtomicBoolean terminated, - AtomicBoolean electionValid, - AtomicInteger failingVoteCounter) { - this.raftMember = raftMember; - this.voter = voter; - this.currTerm = currTerm; - this.requiredVoteNum = requiredVoteNum; - this.terminated = terminated; - this.electionValid = electionValid; - this.memberName = raftMember.getName(); - this.failingVoteCounter = failingVoteCounter; - } - - @Override - public void onComplete(Long resp) { - long voterResp = resp; - String result = "fail"; - synchronized (raftMember.getTerm()) { - if (terminated.get()) { - // a voter has rejected this election, which means the term or the log id falls behind - // this node is not able to be the leader - logger.info( - "{}: Terminated election received a election response {} from {}", - memberName, - voterResp, - voter); - return; - } - - if (voterResp == RESPONSE_AGREE) { - long remaining = requiredVoteNum.decrementAndGet(); - logger.info( - "{}: Received a grant vote from {}, remaining votes to succeed: {}", - memberName, - voter, - remaining); - if (remaining == 0) { - // the election is valid - electionValid.set(true); - terminated.set(true); - raftMember.getTerm().notifyAll(); - raftMember.onElectionWins(); - result = "win"; - logger.info("{}: Election {} is won", memberName, currTerm); - } - // still need more votes - } else if (voterResp != RESPONSE_LEADER_STILL_ONLINE) { - if (voterResp < currTerm) { - // the rejection from a node with a smaller term means the log of this node falls behind - logger.info("{}: Election {} rejected: code {}", memberName, currTerm, voterResp); - onFail(); - } else if (voterResp == RESPONSE_NODE_IS_NOT_IN_GROUP) { - logger.info("{}: This node has removed from the group", memberName); - onFail(); - } else { - // the election is rejected by a node with a bigger term, update current term to it - logger.info( - "{}: Election {} rejected from {}: The term of this node is no bigger than {}", - memberName, - currTerm, - voter, - voterResp); - raftMember.stepDown(voterResp, false); - // the election is rejected - terminated.set(true); - raftMember.getTerm().notifyAll(); - } - } - } - if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { - MetricsService.getInstance() - .getMetricManager() - .count( - 1, - Metric.CLUSTER_ELECT.toString(), - MetricLevel.IMPORTANT, - Tag.NAME.toString(), - raftMember.getThisNode().internalIp, - Tag.STATUS.toString(), - result); - } - } - - @Override - public void onError(Exception exception) { - if (exception instanceof ConnectException) { - logger.warn("{}: Cannot connect to {}: {}", memberName, voter, exception.getMessage()); - } else { - logger.warn("{}: A voter {} encountered an error:", memberName, voter, exception); - } - onFail(); - } - - private void onFail() { - int failingVoteRemaining = failingVoteCounter.decrementAndGet(); - if (failingVoteRemaining <= 0) { - synchronized (raftMember.getTerm()) { - // wake up heartbeat thread to start the next election - raftMember.getTerm().notifyAll(); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandler.java deleted file mode 100644 index 31aa4a8ee58a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.ConnectException; -import java.util.concurrent.atomic.AtomicReference; - -/** - * GenericHandler simply put the response into an AtomicReference and wake up the caller. Notice: - * the caller should wait on "result" if it wants to get the result. Please refer to the current - * usages before using this class. - * - * @param - */ -public class GenericHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(GenericHandler.class); - - private Node source; - private AtomicReference result; - private Exception e; - - public GenericHandler(Node source, AtomicReference result) { - this.source = source; - this.result = result; - } - - @Override - public void onComplete(T response) { - if (result != null) { - synchronized (result) { - result.set(response); - result.notifyAll(); - } - } - } - - @Override - public void onError(Exception exception) { - if (!(exception instanceof ConnectException)) { - logger.error("Cannot receive result from {}", source, exception); - } else { - logger.warn("Cannot receive result from {} : {}", source, exception.getMessage()); - } - - if (result != null) { - synchronized (result) { - result.notifyAll(); - e = exception; - } - } - } - - public Exception getException() { - return e; - } - - @SuppressWarnings("java:S2274") // enable timeout - public T getResult(long timeout) throws InterruptedException, TException { - synchronized (result) { - if (result.get() == null && getException() == null) { - result.wait(timeout); - } - } - if (getException() != null) { - throw new TException(getException()); - } - return result.get(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelHandler.java deleted file mode 100644 index f3bad1439f8e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; - -public class GetChildNodeNextLevelHandler implements AsyncMethodCallback> { - - private static final Logger logger = LoggerFactory.getLogger(GetChildNodeNextLevelHandler.class); - - private Node contact; - private AtomicReference> result; - - @Override - public void onComplete(Set resp) { - logger.info("Received child node next level from {}", contact); - synchronized (result) { - result.set(resp); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot get child node next level from {}, because", contact, exception); - } - - public void setResponse(AtomicReference> response) { - this.result = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelPathHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelPathHandler.java deleted file mode 100644 index 071c8457caee..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetChildNodeNextLevelPathHandler.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; - -public class GetChildNodeNextLevelPathHandler implements AsyncMethodCallback> { - - private static final Logger logger = - LoggerFactory.getLogger(GetChildNodeNextLevelPathHandler.class); - - private Node contact; - private AtomicReference> result; - - @Override - public void onComplete(Set resp) { - logger.info("Received child node next level path from {}", contact); - synchronized (result) { - result.set(resp); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot get child node next level path from {}, because", contact, exception); - } - - public void setResponse(AtomicReference> response) { - this.result = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetDevicesHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetDevicesHandler.java deleted file mode 100644 index 7a4d717f6b20..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetDevicesHandler.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicReference; - -@SuppressWarnings("common-java:DuplicatedBlocks") -public class GetDevicesHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(GetDevicesHandler.class); - - private Node contact; - private AtomicReference result; - - @Override - public void onComplete(ByteBuffer resp) { - logger.debug("Received devices schema from {}", contact); - synchronized (result) { - result.set(resp); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot get devices schema from {}, because ", contact, exception); - } - - public void setResponse(AtomicReference response) { - this.result = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetNodesListHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetNodesListHandler.java deleted file mode 100644 index d909c4d3272c..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetNodesListHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -public class GetNodesListHandler implements AsyncMethodCallback> { - - private static final Logger logger = LoggerFactory.getLogger(GetNodesListHandler.class); - - private Node contact; - private AtomicReference> result; - - @Override - public void onComplete(List resp) { - logger.info("Received node lists of size {} from {}", resp.size(), contact); - synchronized (result) { - result.set(resp); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot get node list from {}, because", contact, exception); - } - - public void setResponse(AtomicReference> response) { - this.result = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetTimeseriesSchemaHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetTimeseriesSchemaHandler.java deleted file mode 100644 index 48c484f42ec6..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/GetTimeseriesSchemaHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicReference; - -public class GetTimeseriesSchemaHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(GetTimeseriesSchemaHandler.class); - - private Node contact; - private AtomicReference result; - - @Override - public void onComplete(ByteBuffer resp) { - logger.info("Received timeseries schema from {}", contact); - synchronized (result) { - result.set(resp); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot get timeseries schema from {}, because ", contact, exception); - } - - public void setResponse(AtomicReference response) { - this.result = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandler.java deleted file mode 100644 index 9fa1c6647f56..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandler.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.ConnectException; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; - -/** - * HeartbeatHandler checks the response of a heartbeat and decides whether to start a catch-up or - * give up the leadership due to the term is stale. - */ -public class HeartbeatHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(HeartbeatHandler.class); - - private RaftMember localMember; - private String memberName; - private Node receiver; - - public HeartbeatHandler(RaftMember localMember, Node receiver) { - this.localMember = localMember; - this.receiver = receiver; - this.memberName = localMember.getName(); - } - - @Override - public void onComplete(HeartBeatResponse resp) { - long followerTerm = resp.getTerm(); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Received a heartbeat response {} for last log index {}", - memberName, - followerTerm, - resp.getLastLogIndex()); - } - if (followerTerm == RESPONSE_AGREE) { - // current leadership is still valid - handleNormalHeartbeatResponse(resp); - } else { - // current leadership is invalid because the follower has a larger term - synchronized (localMember.getTerm()) { - long currTerm = localMember.getTerm().get(); - if (currTerm < followerTerm) { - logger.info( - "{}: Losing leadership because current term {} is smaller than {}", - memberName, - currTerm, - followerTerm); - localMember.stepDown(followerTerm, false); - } - } - } - } - - private void handleNormalHeartbeatResponse(HeartBeatResponse resp) { - // additional process depending on member type - localMember.processValidHeartbeatResp(resp, receiver); - - // check the necessity of performing a catch up - Node follower = resp.getFollower(); - long lastLogIdx = resp.getLastLogIndex(); - long lastLogTerm = resp.getLastLogTerm(); - long localLastLogIdx = localMember.getLogManager().getLastLogIndex(); - long localLastLogTerm = localMember.getLogManager().getLastLogTerm(); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Node {} is still alive, log index: {}/{}, log term: {}/{}", - memberName, - follower, - lastLogIdx, - localLastLogIdx, - lastLogTerm, - localLastLogTerm); - } - - Peer peer = - localMember - .getPeerMap() - .computeIfAbsent( - follower, k -> new Peer(localMember.getLogManager().getLastLogIndex())); - if (!localMember.getLogManager().isLogUpToDate(lastLogTerm, lastLogIdx) - || !localMember.getLogManager().matchTerm(lastLogTerm, lastLogIdx)) { - // the follower is not up-to-date - if (lastLogIdx == -1) { - // maybe the follower has restarted, so we need to find its match index again, because - // some logs may be lost due to restart - peer.setMatchIndex(-1); - } - - // only start a catch up when the follower's lastLogIndex remains stall and unchanged for 5 - // heartbeats. If the follower is installing snapshot currently, we reset the counter. - if (lastLogIdx == peer.getLastHeartBeatIndex() && !resp.isInstallingSnapshot()) { - // the follower's lastLogIndex is unchanged, increase inconsistent counter - int inconsistentNum = peer.incInconsistentHeartbeatNum(); - if (inconsistentNum >= 5) { - logger.info( - "{}: catching up node {}, index-term: {}-{}/{}-{}, peer match index {}", - memberName, - follower, - lastLogIdx, - lastLogTerm, - localLastLogIdx, - localLastLogTerm, - peer.getMatchIndex()); - localMember.catchUp(follower, lastLogIdx); - } - } else { - // the follower's lastLogIndex is changed, which means the follower is not down yet, we - // reset the counter to see if it can eventually catch up by itself - peer.resetInconsistentHeartbeatNum(); - } - } else { - // the follower is up-to-date - peer.setMatchIndex(Math.max(peer.getMatchIndex(), lastLogIdx)); - peer.resetInconsistentHeartbeatNum(); - } - peer.setLastHeartBeatIndex(lastLogIdx); - } - - @Override - public void onError(Exception exception) { - if (exception instanceof ConnectException) { - logger.warn("{}: Cannot connect to {}: {}", memberName, receiver, exception.getMessage()); - } else { - logger.error( - "{}: Heart beat error, receiver {}, {}", memberName, receiver, exception.getMessage()); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandler.java deleted file mode 100644 index 2cf848c65c1f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandler.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicReference; - -/** - * JoinClusterHandler wakes up the main thread when the response of joining a cluster has arrived. - */ -public class JoinClusterHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(JoinClusterHandler.class); - - private Node contact; - private AtomicReference response; - - @Override - public void onComplete(AddNodeResponse resp) { - logger.info("Received a join cluster response {} from {}", resp.getRespNum(), contact); - synchronized (response) { - response.set(resp); - response.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.warn("Cannot join the cluster from {}, because", contact, exception); - synchronized (response) { - response.notifyAll(); - } - } - - public void setResponse(AtomicReference response) { - this.response = response; - } - - public void setContact(Node contact) { - this.contact = contact; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandler.java deleted file mode 100644 index 7eb5e72489c1..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandler.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; -import static org.apache.iotdb.cluster.server.Response.RESPONSE_LOG_MISMATCH; -import static org.apache.iotdb.cluster.server.Response.RESPONSE_TOO_BUSY; - -/** - * LogCatchUpHandler checks the result of appending a log in a catch-up task and decides to abort - * the catch up or not. - */ -public class LogCatchUpHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(LogCatchUpHandler.class); - - private Node follower; - private Log log; - private AtomicBoolean appendSucceed; - private String memberName; - private RaftMember raftMember; - - @Override - public void onComplete(Long response) { - logger.debug("{}: Received a catch-up result of {} from {}", memberName, log, follower); - long resp = response; - if (resp == RESPONSE_AGREE) { - synchronized (appendSucceed) { - appendSucceed.set(true); - appendSucceed.notifyAll(); - } - logger.debug("{}: Succeeded to send log {}", memberName, log); - } else if (resp == RESPONSE_LOG_MISMATCH) { - // this may occur when the follower suddenly received a lot of logs, committed them and - // discarded the old ones, so we consider in this case the appending succeeded - logger.debug("{}: Log mismatch occurred when sending log {}", memberName, log); - synchronized (appendSucceed) { - appendSucceed.set(true); - appendSucceed.notifyAll(); - } - } else if (resp == RESPONSE_TOO_BUSY) { - // this may occur when the follower has too many logs unapplied, so we abort the - // catch-up task - logger.info("{}: Catchup task rejected by receiver {}", memberName, follower); - synchronized (appendSucceed) { - appendSucceed.set(false); - appendSucceed.notifyAll(); - } - } else { - // the follower's term has updated, which means a new leader is elected - logger.debug("{}: Received a rejection because term is updated to: {}", memberName, resp); - raftMember.stepDown(resp, false); - synchronized (appendSucceed) { - appendSucceed.notifyAll(); - } - logger.warn("{}: Catch-up aborted because leadership is lost", memberName); - } - } - - @Override - public void onError(Exception exception) { - synchronized (appendSucceed) { - appendSucceed.notifyAll(); - } - logger.warn("{}: Catch-up fails when sending log {}", memberName, log, exception); - } - - public void setLog(Log log) { - this.log = log; - } - - public void setAppendSucceed(AtomicBoolean appendSucceed) { - this.appendSucceed = appendSucceed; - } - - public void setRaftMember(RaftMember raftMember) { - this.raftMember = raftMember; - this.memberName = raftMember.getName(); - } - - public void setFollower(Node follower) { - this.follower = follower; - } - - public AtomicBoolean getAppendSucceed() { - return appendSucceed; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpInBatchHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpInBatchHandler.java deleted file mode 100644 index 07d2aa1dafbc..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpInBatchHandler.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.iotdb.cluster.server.Response.RESPONSE_AGREE; -import static org.apache.iotdb.cluster.server.Response.RESPONSE_LOG_MISMATCH; - -public class LogCatchUpInBatchHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(LogCatchUpInBatchHandler.class); - - private Node follower; - private List logs; - private AtomicBoolean appendSucceed; - private String memberName; - private RaftMember raftMember; - - @Override - public void onComplete(Long response) { - logger.debug( - "{}: Received a catch-up result size of {} from {}", memberName, logs.size(), follower); - - long resp = response; - if (resp == RESPONSE_AGREE) { - synchronized (appendSucceed) { - appendSucceed.set(true); - appendSucceed.notifyAll(); - } - logger.debug("{}: Succeeded to send logs, size is {}", memberName, logs.size()); - - } else if (resp == RESPONSE_LOG_MISMATCH) { - // this is not probably possible - logger.error( - "{}: Log mismatch occurred when sending logs, whose size is {}", memberName, logs.size()); - synchronized (appendSucceed) { - appendSucceed.notifyAll(); - } - } else { - // the follower's term has updated, which means a new leader is elected - logger.debug( - "{}: Received a rejection because term is updated to {} when sending {} logs", - memberName, - resp, - logs.size()); - raftMember.stepDown(resp, false); - - synchronized (appendSucceed) { - appendSucceed.notifyAll(); - } - logger.warn( - "{}: Catch-up with {} logs aborted because leadership is lost", logs.size(), memberName); - } - } - - @Override - public void onError(Exception exception) { - synchronized (appendSucceed) { - appendSucceed.notifyAll(); - } - logger.warn( - "{}: Catch-up fails when sending log, whose size is {}", - memberName, - logs.size(), - exception); - } - - public void setAppendSucceed(AtomicBoolean appendSucceed) { - this.appendSucceed = appendSucceed; - } - - public void setRaftMember(RaftMember raftMember) { - this.raftMember = raftMember; - this.memberName = raftMember.getName(); - } - - public void setFollower(Node follower) { - this.follower = follower; - } - - public void setLogs(List logs) { - this.logs = logs; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/NodeStatusHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/NodeStatusHandler.java deleted file mode 100644 index 45493236535f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/NodeStatusHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.nodetool.function.Status; - -import org.apache.thrift.async.AsyncMethodCallback; - -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -public class NodeStatusHandler implements AsyncMethodCallback { - - private Map nodeStatusMap; - - private AtomicInteger countResponse; - - public NodeStatusHandler(Map nodeStatusMap) { - this.nodeStatusMap = nodeStatusMap; - this.countResponse = new AtomicInteger(); - } - - @Override - public void onComplete(Node response) { - synchronized (nodeStatusMap) { - if (response == null) { - return; - } - nodeStatusMap.put(response, Status.LIVE); - // except for this node itself - if (countResponse.incrementAndGet() == nodeStatusMap.size() - 1) { - nodeStatusMap.notifyAll(); - } - } - } - - @Override - public void onError(Exception exception) { - // unused - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PreviousFillHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PreviousFillHandler.java deleted file mode 100644 index d688a8b59061..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PreviousFillHandler.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.tsfile.read.TimeValuePair; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -public class PreviousFillHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(PreviousFillHandler.class); - private static final long MAX_WAIT_MIN = 3; - private CountDownLatch latch; - private TimeValuePair result = new TimeValuePair(Long.MIN_VALUE, null); - private List exceptions; - - public PreviousFillHandler(CountDownLatch latch) { - this.latch = latch; - this.exceptions = new ArrayList<>(); - } - - @Override - public synchronized void onComplete(ByteBuffer response) { - if (response != null && (response.limit() - response.position()) != 0) { - TimeValuePair timeValuePair = SerializeUtils.deserializeTVPair(response); - if (timeValuePair != null && timeValuePair.getTimestamp() > result.getTimestamp()) { - result = timeValuePair; - } - } - latch.countDown(); - } - - public synchronized void onComplete(TimeValuePair timeValuePair) { - if (timeValuePair.getTimestamp() > result.getTimestamp()) { - result = timeValuePair; - } - latch.countDown(); - } - - @Override - public synchronized void onError(Exception exception) { - logger.error("Cannot get previous fill result", exception); - this.exceptions.add(exception); - latch.countDown(); - } - - public TimeValuePair getResult() throws QueryProcessException { - if (!exceptions.isEmpty()) { - QueryProcessException e = - new QueryProcessException( - "Exception happened when performing previous fill. " - + "See the suppressed exceptions for causes."); - for (Exception exception : exceptions) { - e.addSuppressed(exception); - } - throw e; - } - - try { - if (!latch.await(MAX_WAIT_MIN, TimeUnit.MINUTES)) { - logger.warn( - "Not all nodes returned previous fill result when timed out, remaining {}", - latch.getCount()); - throw new QueryProcessException( - "Failed to get the previous fill result since " - + latch.getCount() - + " nodes didn't respond"); - } - } catch (InterruptedException e) { - throw new QueryProcessException(e.getMessage()); - } - return result; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandler.java deleted file mode 100644 index c456ef5a9729..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandler.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.VectorMeasurementSchema; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -public class PullMeasurementSchemaHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(PullMeasurementSchemaHandler.class); - - private Node owner; - private List prefixPaths; - private AtomicReference> timeseriesSchemas; - - public PullMeasurementSchemaHandler( - Node owner, - List prefixPaths, - AtomicReference> timeseriesSchemas) { - this.owner = owner; - this.prefixPaths = prefixPaths; - this.timeseriesSchemas = timeseriesSchemas; - } - - @Override - public void onComplete(PullSchemaResp response) { - ByteBuffer buffer = response.schemaBytes; - int size = buffer.getInt(); - List schemas = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - schemas.add( - buffer.get() == 0 - ? MeasurementSchema.partialDeserializeFrom(buffer) - : VectorMeasurementSchema.partialDeserializeFrom(buffer)); - } - synchronized (timeseriesSchemas) { - timeseriesSchemas.set(schemas); - timeseriesSchemas.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.error("Cannot pull time series schema of {} from {}", prefixPaths, owner, exception); - synchronized (timeseriesSchemas) { - timeseriesSchemas.notifyAll(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandler.java deleted file mode 100644 index ab6937e7fe51..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandler.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicReference; - -/** PullSnapshotHandler receives the result of pulling a data partition from a node. */ -public class PullSnapshotHandler - implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(PullSnapshotHandler.class); - private AtomicReference> resultRef; - private Node node; - private List slot; - private SnapshotFactory factory; - - public PullSnapshotHandler( - AtomicReference> resultRef, - Node owner, - List slots, - SnapshotFactory factory) { - this.resultRef = resultRef; - this.node = owner; - this.slot = slots; - this.factory = factory; - } - - @Override - public void onComplete(PullSnapshotResp response) { - synchronized (resultRef) { - Map ret = new HashMap<>(); - Map snapshotBytes = response.snapshotBytes; - for (Entry entry : snapshotBytes.entrySet()) { - T snapshot = factory.create(); - snapshot.deserialize(entry.getValue()); - ret.put(entry.getKey(), snapshot); - } - resultRef.set(ret); - resultRef.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.error("Cannot pull snapshot of {} from {}", slot.size(), node, exception); - synchronized (resultRef) { - resultRef.notifyAll(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullTimeseriesSchemaHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullTimeseriesSchemaHandler.java deleted file mode 100644 index 4bc59a718b75..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/PullTimeseriesSchemaHandler.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -public class PullTimeseriesSchemaHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(PullTimeseriesSchemaHandler.class); - - private Node owner; - private List prefixPaths; - private AtomicReference> timeseriesSchemas; - - public PullTimeseriesSchemaHandler( - Node owner, - List prefixPaths, - AtomicReference> timeseriesSchemas) { - this.owner = owner; - this.prefixPaths = prefixPaths; - this.timeseriesSchemas = timeseriesSchemas; - } - - @Override - public void onComplete(PullSchemaResp response) { - ByteBuffer buffer = response.schemaBytes; - int size = buffer.getInt(); - List schemas = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - schemas.add(TimeseriesSchema.deserializeFrom(buffer)); - } - synchronized (timeseriesSchemas) { - timeseriesSchemas.set(schemas); - timeseriesSchemas.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.error("Cannot pull time series schema of {} from {}", prefixPaths, owner, exception); - synchronized (timeseriesSchemas) { - timeseriesSchemas.notifyAll(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ShowTimeSeriesHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ShowTimeSeriesHandler.java deleted file mode 100644 index a285f22f5323..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/ShowTimeSeriesHandler.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.query.dataset.ShowTimeSeriesResult; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -/** Handler for getting the schemas from each data group concurrently. */ -public class ShowTimeSeriesHandler implements AsyncMethodCallback> { - - private static final Logger logger = LoggerFactory.getLogger(ShowTimeSeriesHandler.class); - - /** String representation of a partial path for logging */ - private final String path; - - private final CountDownLatch countDownLatch; - private final long startTimeInMs; - - private final Map timeSeriesNameToResult = new HashMap<>(); - private final List exceptions = new ArrayList<>(); - - public ShowTimeSeriesHandler(int numGroup, PartialPath path) { - this.countDownLatch = new CountDownLatch(numGroup); - this.path = path.toString(); - this.startTimeInMs = System.currentTimeMillis(); - } - - @Override - public synchronized void onComplete(List response) { - for (ShowTimeSeriesResult r : response) { - timeSeriesNameToResult.put(r.getName(), r); - } - countDownLatch.countDown(); - logger.debug( - "Got {} timeseries in path {}. Remaining count: {}", - response.size(), - path, - countDownLatch.getCount()); - } - - @Override - public synchronized void onError(Exception exception) { - exceptions.add(exception); - countDownLatch.countDown(); - logger.error("Failed to get timeseries in path {} because of {}", path, exception.getMessage()); - } - - public List getResult() throws MetadataException { - if (!exceptions.isEmpty()) { - MetadataException e = - new MetadataException( - "Exception happened when getting the result." - + " See the suppressed exceptions for causes."); - for (Exception exception : exceptions) { - e.addSuppressed(exception); - } - throw e; - } - - // Wait for the results and ignore the interruptions. - long timeout = IoTDBDescriptor.getInstance().getConfig().getQueryTimeoutThreshold(); - while (System.currentTimeMillis() - startTimeInMs < timeout) { - try { - if (countDownLatch.await( - System.currentTimeMillis() - startTimeInMs, TimeUnit.MILLISECONDS)) { - break; - } - } catch (InterruptedException ignored) { - } - } - - if (countDownLatch.getCount() != 0) { - String errMsg = - String.format( - "Failed to get the show timeseries result" - + " since %d nodes didn't respond after %d ms", - countDownLatch.getCount(), timeout); - logger.error(errMsg); - throw new MetadataException(errMsg); - } - - return timeSeriesNameToResult.values().stream() - .sorted( - Comparator.comparingLong(ShowTimeSeriesResult::getLastTime) - .reversed() - .thenComparing(ShowTimeSeriesResult::getName)) - .collect(Collectors.toList()); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandler.java deleted file mode 100644 index a46a7f04f48e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** SnapshotCatchUpHandler receives the result of sending a snapshot to a stale node. */ -public class SnapshotCatchUpHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(SnapshotCatchUpHandler.class); - - private AtomicBoolean succeed; - private Node receiver; - private Snapshot snapshot; - - public SnapshotCatchUpHandler(AtomicBoolean succeed, Node receiver, Snapshot snapshot) { - this.succeed = succeed; - this.receiver = receiver; - this.snapshot = snapshot; - } - - @Override - public void onComplete(Void resp) { - synchronized (succeed) { - succeed.set(true); - succeed.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - logger.error("Cannot send snapshot {} to {}", snapshot, receiver, exception); - synchronized (succeed) { - succeed.notifyAll(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/package-info.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/package-info.java deleted file mode 100644 index d271619ed77e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/caller/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** Handlers in this package are handlers of the callers. */ -package org.apache.iotdb.cluster.server.handlers.caller; diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandler.java deleted file mode 100644 index 0ca430d12e05..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandler.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.forwarder; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicReference; - -public class ForwardPlanHandler implements AsyncMethodCallback { - - private static final Logger logger = LoggerFactory.getLogger(ForwardPlanHandler.class); - private PhysicalPlan plan; - private AtomicReference result; - private Node node; - - public ForwardPlanHandler(AtomicReference result, PhysicalPlan plan, Node node) { - this.result = result; - this.plan = plan; - this.node = node; - } - - @Override - public void onComplete(TSStatus response) { - synchronized (result) { - result.set(response); - result.notifyAll(); - } - } - - @Override - public void onError(Exception exception) { - if (exception instanceof IOException) { - logger.warn("Cannot send plan {} to node {}: {}", plan, node, exception.getMessage()); - } else { - logger.error("Cannot send plan {} to node {}", plan, node, exception); - } - synchronized (result) { - TSStatus status = StatusUtils.getStatus(StatusUtils.INTERNAL_ERROR, exception.getMessage()); - result.set(status); - result.notifyAll(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThread.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThread.java deleted file mode 100644 index 1044f1bea6dd..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThread.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; - -public class DataHeartbeatThread extends HeartbeatThread { - - private static final int MAX_ELECTIONS_TO_SKIP = 5; - - private DataGroupMember dataGroupMember; - private int skippedElectionNumber = 0; - - public DataHeartbeatThread(DataGroupMember raftMember) { - super(raftMember); - this.dataGroupMember = raftMember; - } - - @Override - void sendHeartbeatSync(Node node) { - request.setHeader(dataGroupMember.getHeader()); - super.sendHeartbeatSync(node); - } - - @Override - void sendHeartbeatAsync(Node node) { - request.setHeader(dataGroupMember.getHeader()); - super.sendHeartbeatAsync(node); - } - - /** - * Different from the election of the meta group, the leader of a data group should have the - * newest meta log to guarantee it will not receive the data of the slots that no longer belongs - * to it. So the progress of meta logs is also examined. - */ - @Override - void startElection() { - // skip first few elections to let the header have a larger chance to become the leader, so - // possibly each node will only be one leader at the same time - if (!dataGroupMember.getThisNode().equals(dataGroupMember.getHeader().getNode()) - && skippedElectionNumber < MAX_ELECTIONS_TO_SKIP - && !hasHadLeader) { - skippedElectionNumber++; - return; - } - electionRequest.setHeader(dataGroupMember.getHeader()); - - super.startElection(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThread.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThread.java deleted file mode 100644 index 38a314dce2d5..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThread.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.handlers.caller.ElectionHandler; -import org.apache.iotdb.cluster.server.handlers.caller.HeartbeatHandler; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.TException; -import org.apache.thrift.transport.TTransportException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; -import java.util.Random; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * HeartbeatThread takes the responsibility to send heartbeats (when this node is a leader), check - * if the leader is still online (when this node is a follower) or start elections (when this node - * is a elector). - */ -public class HeartbeatThread implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(HeartbeatThread.class); - - private RaftMember localMember; - private String memberName; - HeartBeatRequest request = new HeartBeatRequest(); - ElectionRequest electionRequest = new ElectionRequest(); - - private Random random = new Random(); - boolean hasHadLeader = false; - - HeartbeatThread(RaftMember localMember) { - this.localMember = localMember; - memberName = localMember.getName(); - } - - @Override - public void run() { - logger.info("{}: Heartbeat thread starts...", memberName); - // sleep random time to reduce first election conflicts - long electionWait = getElectionRandomWaitMs(); - try { - logger.info("{}: Sleep {}ms before first election", memberName, electionWait); - Thread.sleep(electionWait); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - while (!Thread.interrupted()) { - try { - switch (localMember.getCharacter()) { - case LEADER: - // send heartbeats to the followers - sendHeartbeats(); - synchronized (localMember.getHeartBeatWaitObject()) { - localMember.getHeartBeatWaitObject().wait(ClusterConstant.getHeartbeatIntervalMs()); - } - hasHadLeader = true; - break; - case FOLLOWER: - // check if heartbeat times out - long heartbeatInterval = - System.currentTimeMillis() - localMember.getLastHeartbeatReceivedTime(); - - long randomElectionTimeout = - ClusterConstant.getElectionTimeoutMs() + getElectionRandomWaitMs(); - if (heartbeatInterval >= randomElectionTimeout) { - // the leader is considered dead, an election will be started in the next loop - logger.info("{}: The leader {} timed out", memberName, localMember.getLeader()); - localMember.setCharacter(NodeCharacter.ELECTOR); - localMember.setLeader(ClusterConstant.EMPTY_NODE); - } else { - logger.debug( - "{}: Heartbeat from leader {} is still valid", - memberName, - localMember.getLeader()); - synchronized (localMember.getHeartBeatWaitObject()) { - // we sleep to next possible heartbeat timeout point - long leastWaitTime = - localMember.getLastHeartbeatReceivedTime() - + randomElectionTimeout - - System.currentTimeMillis(); - localMember.getHeartBeatWaitObject().wait(leastWaitTime); - } - } - hasHadLeader = true; - break; - case ELECTOR: - default: - onElectionsStart(); - startElections(); - onElectionsEnd(); - break; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } catch (Exception e) { - logger.error("{}: Unexpected heartbeat exception:", memberName, e); - } - } - - logger.info("{}: Heartbeat thread exits", memberName); - } - - protected void onElectionsStart() { - logger.info("{}: Start elections", memberName); - } - - protected void onElectionsEnd() { - logger.info("{}: End elections", memberName); - } - - /** Send each node (except the local node) in the group of the member a heartbeat. */ - protected void sendHeartbeats() { - synchronized (localMember.getTerm()) { - request.setTerm(localMember.getTerm().get()); - request.setLeader(localMember.getThisNode()); - request.setCommitLogIndex(localMember.getLogManager().getCommitLogIndex()); - request.setCommitLogTerm(localMember.getLogManager().getCommitLogTerm()); - - sendHeartbeats(localMember.getAllNodes()); - } - } - - /** Send each node (except the local node) in list a heartbeat. */ - @SuppressWarnings("java:S2445") - private void sendHeartbeats(Collection nodes) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: Send heartbeat to {} followers, commit log index = {}", - memberName, - nodes.size() - 1, - request.getCommitLogIndex()); - } - synchronized (nodes) { - // avoid concurrent modification - for (Node node : nodes) { - if (node.equals(localMember.getThisNode())) { - continue; - } - if (Thread.currentThread().isInterrupted()) { - Thread.currentThread().interrupt(); - return; - } - - if (localMember.getCharacter() != NodeCharacter.LEADER) { - // if the character changes, abort the remaining heartbeats - logger.warn("The leadership of node {} is ended.", localMember.getThisNode()); - return; - } - - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - sendHeartbeatAsync(node); - } else { - sendHeartbeatSync(node); - } - } - } - } - - /** - * Send a heartbeat to "node" through "client". - * - * @param node - */ - void sendHeartbeatAsync(Node node) { - AsyncClient client = localMember.getAsyncHeartbeatClient(node); - if (client != null) { - // connecting to the local node results in a null - try { - logger.debug("{}: Sending heartbeat to {}", memberName, node); - client.sendHeartbeat(request, new HeartbeatHandler(localMember, node)); - } catch (Exception e) { - logger.warn("{}: Cannot send heart beat to node {}", memberName, node, e); - } - } - } - - void sendHeartbeatSync(Node node) { - HeartbeatHandler heartbeatHandler = new HeartbeatHandler(localMember, node); - HeartBeatRequest req = new HeartBeatRequest(); - req.setCommitLogTerm(request.commitLogTerm); - req.setCommitLogIndex(request.commitLogIndex); - req.setRegenerateIdentifier(request.regenerateIdentifier); - req.setRequireIdentifier(request.requireIdentifier); - req.setTerm(request.term); - req.setLeader(localMember.getThisNode()); - if (request.isSetHeader()) { - req.setHeader(request.header); - } - if (request.isSetPartitionTableBytes()) { - req.partitionTableBytes = request.partitionTableBytes; - req.setPartitionTableBytesIsSet(true); - } - localMember - .getSerialToParallelPool() - .submit( - () -> { - Client client = localMember.getSyncHeartbeatClient(node); - if (client != null) { - try { - logger.debug("{}: Sending heartbeat to {}", memberName, node); - HeartBeatResponse heartBeatResponse = client.sendHeartbeat(req); - heartbeatHandler.onComplete(heartBeatResponse); - } catch (TTransportException e) { - if (ClusterIoTDB.getInstance().shouldPrintClientConnectionErrorStack()) { - logger.warn( - "{}: Cannot send heartbeat to node {} due to network", memberName, node, e); - } else { - logger.warn( - "{}: Cannot send heartbeat to node {} due to network", memberName, node); - } - client.getInputProtocol().getTransport().close(); - } catch (Exception e) { - logger.warn( - memberName + ": Cannot send heart beat to node " + node.toString(), e); - } finally { - localMember.returnSyncClient(client); - } - } - }); - } - - /** - * Start elections until this node becomes a leader or a follower. - * - * @throws InterruptedException - */ - private void startElections() throws InterruptedException { - if (localMember.getAllNodes().size() == 1) { - // single node group, this node is always the leader - localMember.setCharacter(NodeCharacter.LEADER); - localMember.setLeader(localMember.getThisNode()); - logger.info("{}: Winning the election because the node is the only node.", memberName); - } - - // the election goes on until this node becomes a follower or a leader - while (localMember.getCharacter() == NodeCharacter.ELECTOR) { - startElection(); - if (localMember.getCharacter() == NodeCharacter.ELECTOR) { - // sleep random time to reduce election conflicts - long electionWait = getElectionRandomWaitMs(); - logger.info("{}: Sleep {}ms until next election", memberName, electionWait); - Thread.sleep(electionWait); - } - } - // take the election request as the first heartbeat - localMember.setLastHeartbeatReceivedTime(System.currentTimeMillis()); - } - - /** - * Start one round of election. Increase the local term, ask for vote from each of the nodes in - * the group and become the leader if at least half of them agree. - */ - @SuppressWarnings({"java:S2274"}) - // enable timeout - void startElection() { - if (localMember.isSkipElection()) { - logger.info("{}: Skip election because this node has stopped.", memberName); - return; - } - synchronized (localMember.getTerm()) { - long nextTerm = localMember.getTerm().incrementAndGet(); - localMember.setVoteFor(localMember.getThisNode()); - localMember.updateHardState(nextTerm, this.localMember.getVoteFor()); - - // the number of votes needed to become a leader, - // quorumNum should be equal to localMember.getAllNodes().size() / 2 + 1, - // but since it doesn’t need to vote for itself here, it directly decreases 1 - int quorumNum = localMember.getAllNodes().size() / 2; - logger.info("{}: Election {} starts, quorum: {}", memberName, nextTerm, quorumNum); - // set to true when the election has a result (rejected or succeeded) - AtomicBoolean electionTerminated = new AtomicBoolean(false); - // set to true when the election is won - AtomicBoolean electionValid = new AtomicBoolean(false); - // a decreasing vote counter - AtomicInteger quorum = new AtomicInteger(quorumNum); - - // NOTICE, failingVoteCounter should be equal to quorumNum + 1 - AtomicInteger failingVoteCounter = new AtomicInteger(quorumNum + 1); - - electionRequest.setTerm(nextTerm); - electionRequest.setElector(localMember.getThisNode()); - electionRequest.setLastLogTerm(localMember.getLogManager().getLastLogTerm()); - electionRequest.setLastLogIndex(localMember.getLogManager().getLastLogIndex()); - - requestVote( - localMember.getAllNodes(), - electionRequest, - nextTerm, - quorum, - electionTerminated, - electionValid, - failingVoteCounter); - // erase the log index so it can be updated in the next heartbeat - electionRequest.unsetLastLogIndex(); - - try { - logger.info( - "{}: Wait for {}ms until election time out", - memberName, - ClusterConstant.getElectionTimeoutMs()); - localMember.getTerm().wait(ClusterConstant.getElectionTimeoutMs()); - } catch (InterruptedException e) { - logger.info( - "{}: Unexpected interruption when waiting the result of election {}", - memberName, - nextTerm); - Thread.currentThread().interrupt(); - } - - // if the election times out, the remaining votes do not matter - electionTerminated.set(true); - if (electionValid.get()) { - logger.info("{}: Election {} accepted", memberName, nextTerm); - localMember.setCharacter(NodeCharacter.LEADER); - localMember.setLeader(localMember.getThisNode()); - } - } - } - - /** - * Request a vote from each of the "nodes". Each for vote will decrease the counter "quorum" and - * when it reaches 0, the flag "electionValid" and "electionTerminated" will be set to true. Any - * against vote will set the flag "electionTerminated" to true and ends the election. - * - * @param nodes - * @param request - * @param nextTerm the term of the election - * @param quorum - * @param electionTerminated - * @param electionValid - */ - @SuppressWarnings("java:S2445") - private void requestVote( - Collection nodes, - ElectionRequest request, - long nextTerm, - AtomicInteger quorum, - AtomicBoolean electionTerminated, - AtomicBoolean electionValid, - AtomicInteger failingVoteCounter) { - synchronized (nodes) { - // avoid concurrent modification - for (Node node : nodes) { - if (node.equals(localMember.getThisNode())) { - continue; - } - - ElectionHandler handler = - new ElectionHandler( - localMember, - node, - nextTerm, - quorum, - electionTerminated, - electionValid, - failingVoteCounter); - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - requestVoteAsync(node, handler, request); - } else { - requestVoteSync(node, handler, request); - } - } - } - } - - private void requestVoteAsync(Node node, ElectionHandler handler, ElectionRequest request) { - AsyncClient client = localMember.getAsyncHeartbeatClient(node); - if (client != null) { - logger.info("{}: Requesting a vote from {}", memberName, node); - try { - client.startElection(request, handler); - } catch (Exception e) { - logger.error("{}: Cannot request a vote from {}", memberName, node, e); - } - } - } - - private void requestVoteSync(Node node, ElectionHandler handler, ElectionRequest request) { - localMember - .getSerialToParallelPool() - .submit( - () -> { - Client client = localMember.getSyncHeartbeatClient(node); - if (client != null) { - logger.info("{}: Requesting a vote from {}", memberName, node); - try { - long result = client.startElection(request); - handler.onComplete(result); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - logger.warn( - memberName - + ": Cannot request a vote from " - + node.toString() - + " due to network", - e); - handler.onError(e); - } catch (Exception e) { - handler.onError(e); - } finally { - localMember.returnSyncClient(client); - } - } - }); - } - - private long getElectionRandomWaitMs() { - return Math.abs(random.nextLong() % ClusterConstant.getElectionMaxWaitMs()); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThread.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThread.java deleted file mode 100644 index f8ce0b8b23ce..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThread.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MetaHeartbeatThread extends HeartbeatThread { - - private static final Logger logger = LoggerFactory.getLogger(MetaHeartbeatThread.class); - private MetaGroupMember localMetaMember; - - public MetaHeartbeatThread(MetaGroupMember metaMember) { - super(metaMember); - this.localMetaMember = metaMember; - } - - private void presendHeartbeat(Node node) { - // if the node's identifier is not clear, require it - request.setRequireIdentifier(!node.isSetNodeIdentifier()); - synchronized (localMetaMember.getIdConflictNodes()) { - request.unsetRegenerateIdentifier(); - if (localMetaMember.getIdConflictNodes().contains(node)) { - request.setRegenerateIdentifier(true); - } - } - - // if the node requires the partition table and it is ready, send it - if (localMetaMember.isNodeBlind(node) && localMetaMember.getPartitionTable() != null) { - logger.debug("Send partition table to {}", node); - request.setPartitionTableBytes(localMetaMember.getPartitionTable().serialize()); - // if the node does not receive the list, it will require it in the next heartbeat, so - // we can remove it now - localMetaMember.removeBlindNode(node); - } - } - - @Override - void sendHeartbeatSync(Node node) { - presendHeartbeat(node); - super.sendHeartbeatSync(node); - // erase the sent partition table so it will not be sent in the next heartbeat - request.unsetPartitionTableBytes(); - } - - @Override - void sendHeartbeatAsync(Node node) { - presendHeartbeat(node); - super.sendHeartbeatAsync(node); - // erase the sent partition table so it will not be sent in the next heartbeat - request.unsetPartitionTableBytes(); - } - - @Override - void startElection() { - super.startElection(); - - if (localMetaMember.getCharacter() == NodeCharacter.LEADER) { - // if the node becomes the leader, - localMetaMember.buildMetaEngineServiceIfNotReady(); - - // A new raft leader needs to have at least one log in its term for committing logs with older - // terms. - // In the meta group, log frequency is very low. When the leader is changed whiling changing - // membership, it's necessary to process an empty log to make sure that cluster expansion - // operation can be carried out in time. - localMetaMember - .getAppendLogThreadPool() - .submit(() -> localMetaMember.processEmptyContentLog()); - // this is a risk that (1) put a task into a pool - // and (2) the task puts more sub-tasks into the same pool, especially the task can only - // terminal when all sub-tasks finish. - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMember.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMember.java deleted file mode 100644 index 7928feca671a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMember.java +++ /dev/null @@ -1,1164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.applier.AsyncDataLogApplier; -import org.apache.iotdb.cluster.log.applier.DataLogApplier; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.log.manage.FilePartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.log.manage.PartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot; -import org.apache.iotdb.cluster.log.snapshot.PartitionedSnapshot; -import org.apache.iotdb.cluster.log.snapshot.PullSnapshotTask; -import org.apache.iotdb.cluster.log.snapshot.PullSnapshotTaskDescriptor; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotManager; -import org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus; -import org.apache.iotdb.cluster.partition.slot.SlotNodeAdditionResult; -import org.apache.iotdb.cluster.partition.slot.SlotNodeRemovalResult; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotTimePartitionFilter; -import org.apache.iotdb.cluster.query.LocalQueryExecutor; -import org.apache.iotdb.cluster.query.manage.ClusterQueryManager; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.PullSnapshotHintService; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.heartbeat.DataHeartbeatThread; -import org.apache.iotdb.cluster.server.monitor.NodeReport.DataMemberReport; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.server.monitor.Timer.Statistic; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.service.JMXService; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion.TimePartitionFilter; -import org.apache.iotdb.db.exception.BatchProcessException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.BatchPlan; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertMultiTabletsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsOfOneDevicePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.db.qp.physical.sys.LogPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.apache.thrift.protocol.TProtocolFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.apache.iotdb.cluster.config.ClusterConstant.THREAD_POLL_WAIT_TERMINATION_TIME_S; - -public class DataGroupMember extends RaftMember implements DataGroupMemberMBean { - - private final String mbeanName; - - private static final Logger logger = LoggerFactory.getLogger(DataGroupMember.class); - - /** - * The MetaGroupMember that in charge of the DataGroupMember. Mainly for providing partition table - * and MetaLogManager. - */ - private MetaGroupMember metaGroupMember; - - /** The thread pool that runs the pull snapshot tasks. Pool size is the # of CPU cores. */ - private ExecutorService pullSnapshotService; - - /** - * When the member applies a pulled snapshot, it register hints in this service which will - * periodically inform the data source that one member has pulled snapshot. - */ - private PullSnapshotHintService pullSnapshotHintService; - - /** - * "queryManger" records the remote nodes which have queried this node, and the readers or - * executors this member has created for those queries. When the queries end, an EndQueryRequest - * will be sent to this member and related resources will be released. - */ - private ClusterQueryManager queryManager; - - /** - * "slotManager" tracks the status of slots during data transfers so that we can know whether the - * slot has non-pulled data. - */ - protected SlotManager slotManager; - - private LocalQueryExecutor localQueryExecutor; - - LogApplier dataLogApplier; - /** - * When a new partition table is installed, all data members will be checked if unchanged. If not, - * such members will be removed. - */ - private boolean unchanged; - - private LastAppliedPatitionTableVersion lastAppliedPartitionTableVersion; - - @TestOnly - public DataGroupMember(PartitionGroup nodes) { - // constructor for test - this.name = - "Data-" - + nodes.getHeader().getNode().getInternalIp() - + "-" - + nodes.getHeader().getNode().getDataPort() - + "-raftId-" - + nodes.getRaftId() - + ""; - allNodes = nodes; - mbeanName = - String.format( - "%s:%s=%s%d", - "org.apache.iotdb.cluster.service", - IoTDBConstant.JMX_TYPE, - "DataMember", - getRaftGroupId()); - setQueryManager(new ClusterQueryManager()); - localQueryExecutor = new LocalQueryExecutor(this); - lastAppliedPartitionTableVersion = new LastAppliedPatitionTableVersion(getMemberDir()); - } - - DataGroupMember(TProtocolFactory factory, PartitionGroup nodes, MetaGroupMember metaGroupMember) { - // The name is used in JMX, so we have to avoid to use "(" "," "=" ")" - super( - "Data-" - + nodes.getHeader().getNode().getInternalIp() - + "-" - + nodes.getHeader().getNode().getDataPort() - + "-raftId-" - + nodes.getRaftId() - + "", - new ClientManager( - ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(), - ClientManager.Type.DataGroupClient)); - this.metaGroupMember = metaGroupMember; - allNodes = nodes; - mbeanName = - String.format( - "%s:%s=%s%d", - "org.apache.iotdb.cluster.service", - IoTDBConstant.JMX_TYPE, - "DataMember", - getRaftGroupId()); - setQueryManager(new ClusterQueryManager()); - slotManager = new SlotManager(ClusterConstant.SLOT_NUM, getMemberDir(), getName()); - dataLogApplier = new DataLogApplier(metaGroupMember, this); - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncApplier() - && ClusterDescriptor.getInstance().getConfig().getReplicationNum() != 1) { - dataLogApplier = new AsyncDataLogApplier(dataLogApplier, name); - } - logManager = - new FilePartitionedSnapshotLogManager( - dataLogApplier, metaGroupMember.getPartitionTable(), allNodes.get(0), thisNode, this); - initPeerMap(); - term.set(logManager.getHardState().getCurrentTerm()); - voteFor = logManager.getHardState().getVoteFor(); - localQueryExecutor = new LocalQueryExecutor(this); - lastAppliedPartitionTableVersion = new LastAppliedPatitionTableVersion(getMemberDir()); - } - - /** - * Start heartbeat, catch-up, pull snapshot services and start all unfinished pull-snapshot-tasks. - * Calling the method twice does not induce side effects. - */ - @Override - public void start() { - if (heartBeatService != null) { - return; - } - logger.info("Starting DataGroupMember {}... RaftGroupID: {}", name, getRaftGroupId()); - JMXService.registerMBean(this, mbeanName); - super.start(); - heartBeatService.submit(new DataHeartbeatThread(this)); - pullSnapshotService = - IoTDBThreadPoolFactory.newFixedThreadPool( - Runtime.getRuntime().availableProcessors(), "pullSnapshot"); - pullSnapshotHintService = new PullSnapshotHintService(this); - pullSnapshotHintService.start(); - resumePullSnapshotTasks(); - } - - /** - * Stop heartbeat, catch-up and pull snapshot services and release all query resources. Calling - * the method twice does not induce side effects. - */ - @Override - public void stop() { - logger.info("Stopping DataGroupMember {}... RaftGroupID: {}", name, getRaftGroupId()); - JMXService.deregisterMBean(mbeanName); - super.stop(); - if (pullSnapshotService != null) { - pullSnapshotService.shutdownNow(); - try { - pullSnapshotService.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for pullSnapshotService to end", e); - } - pullSnapshotService = null; - pullSnapshotHintService.stop(); - } - - try { - getQueryManager().endAllQueries(); - } catch (StorageEngineException e) { - logger.error("Cannot release queries of {}", name, e); - } - logger.info("{}: stopped", name); - } - - @Override - long checkElectorLogProgress(ElectionRequest electionRequest) { - Node elector = electionRequest.getElector(); - // check if the node is in the group - if (!allNodes.contains(elector)) { - logger.info( - "{}: the elector {} is not in the data group {}, so reject this election.", - name, - getPartitionGroup(), - elector); - return Response.RESPONSE_NODE_IS_NOT_IN_GROUP; - } - return super.checkElectorLogProgress(electionRequest); - } - - /** - * The first node (on the hash ring) in this data group is the header. It determines the duty - * (what range on the ring do the group take responsibility for) of the group and although other - * nodes in this may change, this node is unchangeable unless the data group is dismissed. It is - * also the identifier of this data group. - */ - @Override - public RaftNode getHeader() { - return allNodes.getHeader(); - } - - public ClusterQueryManager getQueryManager() { - return queryManager; - } - - protected void setQueryManager(ClusterQueryManager queryManager) { - this.queryManager = queryManager; - } - - public static class Factory { - - private TProtocolFactory protocolFactory; - private MetaGroupMember metaGroupMember; - - public Factory(TProtocolFactory protocolFactory, MetaGroupMember metaGroupMember) { - this.protocolFactory = protocolFactory; - this.metaGroupMember = metaGroupMember; - } - - public DataGroupMember create(PartitionGroup partitionGroup) { - return new DataGroupMember(protocolFactory, partitionGroup, metaGroupMember); - } - } - - public boolean preAddNode(Node node) { - if (logger.isDebugEnabled()) { - logger.debug("{}: start to pre adding node {}", name, node); - } - synchronized (allNodes) { - if (allNodes.contains(node)) { - return false; - } - int insertIndex = -1; - // find the position to insert the new node, the nodes are ordered by their identifiers - for (int i = 0; i < allNodes.size() - 1; i++) { - Node prev = allNodes.get(i); - Node next = allNodes.get(i + 1); - if (prev.nodeIdentifier < node.nodeIdentifier && node.nodeIdentifier < next.nodeIdentifier - || prev.nodeIdentifier < node.nodeIdentifier - && next.nodeIdentifier < prev.nodeIdentifier - || node.nodeIdentifier < next.nodeIdentifier - && next.nodeIdentifier < prev.nodeIdentifier) { - insertIndex = i + 1; - break; - } - } - if (insertIndex > 0) { - allNodes.add(insertIndex, node); - peerMap.putIfAbsent(node, new Peer(logManager.getLastLogIndex())); - // if the local node is the last node and the insertion succeeds, this node should leave - // the group - logger.debug("{}: Node {} is inserted into the data group {}", name, node, allNodes); - } - return insertIndex > 0; - } - } - - /** - * Try to add a Node into the group to which the member belongs. - * - * @return true if this node should leave the group because of the addition of the node, false - * otherwise - */ - public boolean addNode(Node node, NodeAdditionResult result) { - if (logger.isDebugEnabled()) { - logger.debug("{}: start to add node {}", name, node); - } - - // mark slots that do not belong to this group any more - Set lostSlots = - ((SlotNodeAdditionResult) result) - .getLostSlots() - .getOrDefault(getHeader(), Collections.emptySet()); - for (Integer lostSlot : lostSlots) { - slotManager.setToSending(lostSlot, false); - } - slotManager.save(); - - synchronized (allNodes) { - preAddNode(node); - if (allNodes.contains(node) && allNodes.size() > config.getReplicationNum()) { - // remove the last node because the group size is fixed to replication number - Node removedNode = allNodes.remove(allNodes.size() - 1); - peerMap.remove(removedNode); - - if (removedNode.equals(leader.get()) && !removedNode.equals(thisNode)) { - // if the leader is removed, also start an election immediately - synchronized (term) { - setCharacter(NodeCharacter.ELECTOR); - setLeader(null); - } - synchronized (getHeartBeatWaitObject()) { - getHeartBeatWaitObject().notifyAll(); - } - } - return removedNode.equals(thisNode); - } - return false; - } - } - - /** - * Deserialize and install a snapshot sent by the leader. The type of the snapshot must be - * currently PartitionedSnapshot with FileSnapshot inside. - */ - public void receiveSnapshot(SendSnapshotRequest request) throws SnapshotInstallationException { - logger.info( - "{}: received a snapshot from {} with size {}", - name, - request.getHeader(), - request.getSnapshotBytes().length); - PartitionedSnapshot snapshot = - new PartitionedSnapshot<>(FileSnapshot.Factory.INSTANCE); - - snapshot.deserialize(ByteBuffer.wrap(request.getSnapshotBytes())); - if (logger.isDebugEnabled()) { - logger.debug("{} received a snapshot {}", name, snapshot); - } - snapshot.getDefaultInstaller(this).install(snapshot, -1, false); - } - - /** Send the requested snapshots to the applier node. */ - public PullSnapshotResp getSnapshot(PullSnapshotRequest request) throws IOException { - // if the requester pulls the snapshots because the header of the group is removed, then the - // member should no longer receive new data - if (request.isRequireReadOnly()) { - setReadOnly(); - } - - // Make sure local data is complete. - if (character != NodeCharacter.LEADER - && lastAppliedPartitionTableVersion.getVersion() - != metaGroupMember.getPartitionTable().getLastMetaLogIndex()) { - return null; - } - - List requiredSlots = request.getRequiredSlots(); - for (Integer requiredSlot : requiredSlots) { - // wait if the data of the slot is in another node - slotManager.waitSlot(requiredSlot); - } - if (logger.isDebugEnabled()) { - logger.debug( - "{}: {} slots are requested, first:{}, last: {}", - name, - requiredSlots.size(), - requiredSlots.get(0), - requiredSlots.get(requiredSlots.size() - 1)); - } - - // If the logs between [currCommitLogIndex, currLastLogIndex] are committed after the - // snapshot is generated, they will be invisible to the new slot owner and thus lost forever - long currLastLogIndex = logManager.getLastLogIndex(); - logger.info( - "{}: Waiting for logs to commit before snapshot, {}/{}", - name, - logManager.getCommitLogIndex(), - currLastLogIndex); - while (logManager.getCommitLogIndex() < currLastLogIndex) { - try { - Thread.sleep(10); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("{}: Unexpected interruption when waiting for logs to commit", name, e); - } - } - - // this synchronized should work with the one in AppendEntry when a log is going to commit, - // which may prevent the newly arrived data from being invisible to the new header. - synchronized (logManager) { - PullSnapshotResp resp = new PullSnapshotResp(); - Map resultMap = new HashMap<>(); - ((PartitionedSnapshotLogManager) logManager) - .takeSnapshotForSpecificSlots(requiredSlots, false); - - PartitionedSnapshot allSnapshot = (PartitionedSnapshot) logManager.getSnapshot(); - for (int requiredSlot : requiredSlots) { - Snapshot snapshot = allSnapshot.getSnapshot(requiredSlot); - if (snapshot != null) { - resultMap.put(requiredSlot, snapshot.serialize()); - } - } - resp.setSnapshotBytes(resultMap); - logger.debug("{}: Sending {} snapshots to the requester", name, resultMap.size()); - return resp; - } - } - - /** Pull snapshots from the previous holders after newNode joins the cluster. */ - public void pullNodeAdditionSnapshots(List slots, Node newNode) { - // group the slots by their owners - Map> holderSlotsMap = new HashMap<>(); - synchronized (logManager) { - logger.info("{} pulling {} slots from remote", name, slots.size()); - PartitionedSnapshot snapshot = (PartitionedSnapshot) logManager.getSnapshot(); - Map prevHolders = - ((SlotPartitionTable) metaGroupMember.getPartitionTable()) - .getPreviousNodeMap(new RaftNode(newNode, getRaftGroupId())); - - for (int slot : slots) { - // skip the slot if the corresponding data is already replicated locally - if (snapshot.getSnapshot(slot) == null) { - PartitionGroup group = prevHolders.get(slot); - if (group != null) { - holderSlotsMap.computeIfAbsent(group, n -> new ArrayList<>()).add(slot); - } - } - } - } - - // pull snapshots from each owner's data group - for (Entry> entry : holderSlotsMap.entrySet()) { - List nodeSlots = entry.getValue(); - PullSnapshotTaskDescriptor taskDescriptor = - new PullSnapshotTaskDescriptor(entry.getKey(), nodeSlots, false); - pullFileSnapshot(taskDescriptor, null); - } - } - - /** - * Pull FileSnapshots (timeseries schemas and lists of TsFiles) of "nodeSlots" from one of the - * "prevHolders". The actual pulling will be performed in a separate thread. - * - * @param descriptor - * @param snapshotSave set to the corresponding disk file if the task is resumed from disk, or set - * ot null otherwise - */ - private void pullFileSnapshot(PullSnapshotTaskDescriptor descriptor, File snapshotSave) { - // If this node is the member of previous holder, it's unnecessary to pull data again - if (descriptor.getPreviousHolders().contains(thisNode)) { - logger.info( - "{}: {} and other {} don't need to pull because there already has such data locally", - name, - descriptor.getSlots().get(0), - descriptor.getSlots().size() - 1); - // inform the previous holders that one member has successfully pulled snapshot directly - registerPullSnapshotHint(descriptor); - return; - } - Iterator iterator = descriptor.getSlots().iterator(); - while (iterator.hasNext()) { - Integer nodeSlot = iterator.next(); - SlotStatus status = slotManager.getStatus(nodeSlot); - if (status != SlotStatus.NULL) { - // the pulling may already be issued during restart, skip it in that case - iterator.remove(); - } else { - // mark the slot as pulling to control reads and writes of the pulling slot - slotManager.setToPulling( - nodeSlot, descriptor.getPreviousHolders().getHeader().getNode(), false); - } - } - slotManager.save(); - - if (descriptor.getSlots().isEmpty()) { - return; - } - if (logger.isInfoEnabled()) { - logger.info( - "{}: {} and other {} slots are set to pulling", - name, - descriptor.getSlots().get(0), - descriptor.getSlots().size() - 1); - } - - pullSnapshotService.submit( - new PullSnapshotTask<>(descriptor, this, FileSnapshot.Factory.INSTANCE, snapshotSave)); - } - - /** Restart all unfinished pull-snapshot-tasks of the member. */ - private void resumePullSnapshotTasks() { - File snapshotTaskDir = new File(getPullSnapshotTaskDir()); - if (!snapshotTaskDir.exists()) { - return; - } - - File[] files = snapshotTaskDir.listFiles(); - if (files != null) { - for (File file : files) { - if (!file.getName().endsWith(PullSnapshotTask.TASK_SUFFIX)) { - continue; - } - try (DataInputStream dataInputStream = - new DataInputStream(new BufferedInputStream(new FileInputStream(file)))) { - PullSnapshotTaskDescriptor descriptor = new PullSnapshotTaskDescriptor(); - descriptor.deserialize(dataInputStream); - pullFileSnapshot(descriptor, file); - } catch (IOException e) { - logger.error("Cannot resume pull-snapshot-task in file {}", file, e); - try { - Files.delete(file.toPath()); - } catch (IOException ex) { - logger.debug("Cannot remove pull snapshot task file {}", file, e); - } - } - } - } - } - - /** @return a directory that stores the information of ongoing pulling snapshot tasks. */ - public String getPullSnapshotTaskDir() { - return getMemberDir() + "snapshot_task" + File.separator; - } - - /** @return the path of the directory that is provided exclusively for the member. */ - private String getMemberDir() { - return IoTDBDescriptor.getInstance().getConfig().getSystemDir() - + File.separator - + "raft" - + File.separator - + getHeader().getNode().nodeIdentifier - + File.separator - + getRaftGroupId() - + File.separator; - } - - public MetaGroupMember getMetaGroupMember() { - return metaGroupMember; - } - - /** - * If the member is the leader, let all members in the group close the specified partition of a - * storage group, else just return false. - */ - boolean closePartition(String storageGroupName, long partitionId, boolean isSeq) { - if (character != NodeCharacter.LEADER) { - return false; - } - CloseFileLog log = new CloseFileLog(storageGroupName, partitionId, isSeq); - synchronized (logManager) { - log.setCurrLogTerm(getTerm().get()); - log.setCurrLogIndex(logManager.getLastLogIndex() + 1); - - logManager.append(log); - - logger.info("Send the close file request of {} to other nodes", log); - } - try { - return appendLogInGroup(log); - } catch (LogExecutionException e) { - logger.error("Cannot close partition {}#{} seq:{}", storageGroupName, partitionId, isSeq, e); - } - return false; - } - - public boolean flushFileWhenDoSnapshot( - Map>> storageGroupPartitions, - List requiredSlots, - boolean needLeader) { - if (needLeader && character != NodeCharacter.LEADER) { - return false; - } - - Map>> localDataMemberStorageGroupPartitions = - new HashMap<>(); - for (Entry>> entry : storageGroupPartitions.entrySet()) { - List> localListPair = new ArrayList<>(); - - String storageGroupName = entry.getKey(); - List> tmpPairList = entry.getValue(); - for (Pair pair : tmpPairList) { - long timestamp = pair.left * StorageEngine.getTimePartitionInterval(); - int slotId = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByTime(storageGroupName, timestamp, ClusterConstant.SLOT_NUM); - if (requiredSlots.contains(slotId)) { - localListPair.add(pair); - } - } - try { - localDataMemberStorageGroupPartitions.put(new PartialPath(storageGroupName), localListPair); - } catch (IllegalPathException e) { - // ignore - } - } - - if (localDataMemberStorageGroupPartitions.size() <= 0) { - logger.info("{}: have no data to flush", name); - return true; - } - FlushPlan flushPlan = new FlushPlan(null, true, localDataMemberStorageGroupPartitions); - try { - PlanExecutor.flushSpecifiedStorageGroups(flushPlan); - return true; - } catch (StorageGroupNotSetException e) { - logger.error("Some SGs are missing while flushing", e); - } - return false; - } - - /** - * Execute a non-query plan. If the member is a leader, a log for the plan will be created and - * process through the raft procedure, otherwise the plan will be forwarded to the leader. - * - * @param plan a non-query plan. - */ - @Override - public TSStatus executeNonQueryPlan(PhysicalPlan plan) { - if (ClusterDescriptor.getInstance().getConfig().getReplicationNum() == 1) { - try { - if (plan instanceof LogPlan) { - Log log; - try { - log = LogParser.getINSTANCE().parse(((LogPlan) plan).getLog()); - } catch (UnknownLogTypeException e) { - logger.error("Can not parse LogPlan {}", plan, e); - return StatusUtils.PARSE_LOG_ERROR; - } - handleChangeMembershipLogWithoutRaft(log); - } else { - ((DataLogApplier) dataLogApplier).applyPhysicalPlan(plan); - } - return StatusUtils.OK; - } catch (Exception e) { - Throwable cause = IOUtils.getRootCause(e); - boolean hasCreated = false; - try { - if (plan instanceof InsertPlan - && ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema()) { - if (plan instanceof InsertRowsPlan || plan instanceof InsertMultiTabletsPlan) { - if (e instanceof BatchProcessException) { - for (TSStatus status : ((BatchProcessException) e).getFailingStatus()) { - if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()) { - hasCreated = createTimeseriesForFailedInsertion(((InsertPlan) plan)); - ((BatchPlan) plan).getResults().clear(); - break; - } - } - } - } else if (cause instanceof PathNotExistException) { - hasCreated = createTimeseriesForFailedInsertion(((InsertPlan) plan)); - } - } - } catch (MetadataException | CheckConsistencyException ex) { - logger.error("{}: Cannot auto-create timeseries for {}", name, plan, e); - return StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR, ex.getMessage()); - } - if (hasCreated) { - return executeNonQueryPlan(plan); - } - return handleLogExecutionException(plan, cause); - } - } else { - TSStatus status = executeNonQueryPlanWithKnownLeader(plan); - if (!StatusUtils.NO_LEADER.equals(status)) { - return status; - } - - long startTime = Timer.Statistic.DATA_GROUP_MEMBER_WAIT_LEADER.getOperationStartTime(); - waitLeader(); - Timer.Statistic.DATA_GROUP_MEMBER_WAIT_LEADER.calOperationCostTimeFromStart(startTime); - - return executeNonQueryPlanWithKnownLeader(plan); - } - } - - @Override - ClientCategory getClientCategory() { - return ClientCategory.DATA; - } - - @Override - public String getMBeanName() { - return mbeanName; - } - - private void handleChangeMembershipLogWithoutRaft(Log log) { - if (log instanceof AddNodeLog) { - if (!metaGroupMember - .getPartitionTable() - .deserialize(((AddNodeLog) log).getPartitionTable())) { - return; - } - preAddNode(((AddNodeLog) log).getNewNode()); - setAndSaveLastAppliedPartitionTableVersion(((AddNodeLog) log).getMetaLogIndex()); - } else if (log instanceof RemoveNodeLog) { - if (!metaGroupMember - .getPartitionTable() - .deserialize(((RemoveNodeLog) log).getPartitionTable())) { - return; - } - preRemoveNode(((RemoveNodeLog) log).getRemovedNode()); - setAndSaveLastAppliedPartitionTableVersion(((RemoveNodeLog) log).getMetaLogIndex()); - } else { - logger.error("Unsupported log: {}", log); - } - } - - private TSStatus executeNonQueryPlanWithKnownLeader(PhysicalPlan plan) { - if (character == NodeCharacter.LEADER) { - long startTime = Statistic.DATA_GROUP_MEMBER_LOCAL_EXECUTION.getOperationStartTime(); - TSStatus status = processPlanLocally(plan); - boolean hasCreated = false; - try { - if (plan instanceof InsertPlan - && ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema()) { - if (plan instanceof InsertRowsPlan || plan instanceof InsertMultiTabletsPlan) { - if (status.getCode() == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { - for (TSStatus tmpStatus : status.getSubStatus()) { - if (tmpStatus.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()) { - hasCreated = createTimeseriesForFailedInsertion(((InsertPlan) plan)); - ((BatchPlan) plan).getResults().clear(); - break; - } - } - } - } else { - if (status.getCode() == TSStatusCode.TIMESERIES_NOT_EXIST.getStatusCode()) { - hasCreated = createTimeseriesForFailedInsertion(((InsertPlan) plan)); - } - } - } - } catch (MetadataException | CheckConsistencyException e) { - logger.error("{}: Cannot auto-create timeseries for {}", name, plan, e); - return StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR, e.getMessage()); - } - - if (hasCreated) { - status = processPlanLocally(plan); - } - Statistic.DATA_GROUP_MEMBER_LOCAL_EXECUTION.calOperationCostTimeFromStart(startTime); - if (status != null) { - return status; - } - } else if (leader.get() != null && !ClusterConstant.EMPTY_NODE.equals(leader.get())) { - long startTime = Timer.Statistic.DATA_GROUP_MEMBER_FORWARD_PLAN.getOperationStartTime(); - TSStatus result = forwardPlan(plan, leader.get(), getHeader()); - Timer.Statistic.DATA_GROUP_MEMBER_FORWARD_PLAN.calOperationCostTimeFromStart(startTime); - if (!StatusUtils.NO_LEADER.equals(result)) { - result.setRedirectNode( - new TEndPoint(leader.get().getClientIp(), leader.get().getClientPort())); - return result; - } - } - return StatusUtils.NO_LEADER; - } - - private boolean createTimeseriesForFailedInsertion(InsertPlan plan) - throws CheckConsistencyException, IllegalPathException { - logger.debug("create time series for failed insertion {}", plan); - // apply measurements according to failed measurements - if (plan instanceof InsertMultiTabletsPlan) { - for (InsertTabletPlan insertPlan : - ((InsertMultiTabletsPlan) plan).getInsertTabletPlanList()) { - if (insertPlan.getFailedMeasurements() != null) { - insertPlan.getPlanFromFailed(); - } - } - } - - if (plan instanceof InsertRowsPlan) { - for (InsertRowPlan insertPlan : ((InsertRowsPlan) plan).getInsertRowPlanList()) { - if (insertPlan.getFailedMeasurements() != null) { - insertPlan.getPlanFromFailed(); - } - } - } - - if (plan instanceof InsertRowsOfOneDevicePlan) { - for (InsertRowPlan insertPlan : ((InsertRowsOfOneDevicePlan) plan).getRowPlans()) { - if (insertPlan.getFailedMeasurements() != null) { - insertPlan.getPlanFromFailed(); - } - } - } - - if (plan.getFailedMeasurements() != null) { - plan.getPlanFromFailed(); - } - - return ((CSchemaProcessor) IoTDB.schemaProcessor).createTimeseries(plan); - } - - /** - * When the node does not play a member in a group any more, the corresponding local data should - * be removed. - */ - public void removeLocalData(List slots) { - if (slots.isEmpty()) { - return; - } - - Set slotSet = new HashSet<>(slots); - List allStorageGroupNames = IoTDB.schemaProcessor.getAllStorageGroupPaths(); - TimePartitionFilter filter = - (storageGroupName, timePartitionId) -> { - int slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum( - storageGroupName, timePartitionId, ClusterConstant.SLOT_NUM); - /** - * If this slot is just held by different raft groups in the same node, it should keep the - * data of slot. - */ - if (((SlotPartitionTable) metaGroupMember.getPartitionTable()) - .judgeHoldSlot(thisNode, slot)) { - return false; - } - return slotSet.contains(slot); - }; - for (PartialPath sg : allStorageGroupNames) { - StorageEngine.getInstance().removePartitions(sg, filter); - } - for (Integer slot : slots) { - slotManager.setToNull(slot, false); - } - slotManager.save(); - - if (logger.isInfoEnabled()) { - logger.info( - "{}: data of {} and other {} slots are removed", name, slots.get(0), slots.size() - 1); - } - } - - public void preRemoveNode(Node removedNode) { - if (logger.isDebugEnabled()) { - logger.debug("{}: start to pre remove node {}", name, removedNode); - } - synchronized (allNodes) { - if (allNodes.contains(removedNode) && allNodes.size() == config.getReplicationNum()) { - // update the group if the deleted node was in it - PartitionGroup newGroup = - metaGroupMember.getPartitionTable().getPartitionGroup(getHeader()); - if (newGroup == null) { - return; - } - Node newNodeToGroup = newGroup.get(newGroup.size() - 1); - allNodes.add(newNodeToGroup); - peerMap.putIfAbsent(newNodeToGroup, new Peer(logManager.getLastLogIndex())); - } - } - } - - /** - * Return a TimePartitionFilter that tells whether a time partition is managed by this member. - * - * @return a TimePartitionFilter that tells whether a time partition is managed by this member. - */ - public TimePartitionFilter getTimePartitionFilter() { - Set slotSet = - new HashSet<>( - ((SlotPartitionTable) metaGroupMember.getPartitionTable()).getNodeSlots(getHeader())); - return new SlotTimePartitionFilter(slotSet); - } - - /** - * When a node is removed and IT IS NOT THE HEADER of the group, the member should take over some - * slots from the removed group, and add a new node to the group the removed node was in the - * group. - */ - @SuppressWarnings("java:S2445") // the reference of allNodes is unchanged - public void removeNode(Node removedNode) { - if (logger.isDebugEnabled()) { - logger.debug("{}: start to remove node {}", name, removedNode); - } - - synchronized (allNodes) { - preRemoveNode(removedNode); - if (allNodes.contains(removedNode)) { - // update the group if the deleted node was in it - allNodes.remove(removedNode); - peerMap.remove(removedNode); - if (removedNode.equals(leader.get())) { - // if the leader is removed, also start an election immediately - synchronized (term) { - setCharacter(NodeCharacter.ELECTOR); - setLeader(null); - } - synchronized (getHeartBeatWaitObject()) { - getHeartBeatWaitObject().notifyAll(); - } - } - } - } - } - - public void pullSlots(NodeRemovalResult removalResult) { - List slotsToPull = - ((SlotNodeRemovalResult) removalResult).getNewSlotOwners().get(getHeader()); - if (slotsToPull != null) { - // pull the slots that should be taken over - PullSnapshotTaskDescriptor taskDescriptor = - new PullSnapshotTaskDescriptor( - removalResult.getRemovedGroup(getRaftGroupId()), new ArrayList<>(slotsToPull), true); - pullFileSnapshot(taskDescriptor, null); - } - } - - /** - * Generate a report containing the character, leader, term, last log term, last log index, header - * and readOnly or not of this member. - */ - public DataMemberReport genReport() { - long prevLastLogIndex = lastReportedLogIndex; - lastReportedLogIndex = logManager.getLastLogIndex(); - return new DataMemberReport( - character, - leader.get(), - term.get(), - logManager.getLastLogTerm(), - lastReportedLogIndex, - logManager.getCommitLogIndex(), - logManager.getCommitLogTerm(), - getHeader(), - readOnly, - NodeStatusManager.getINSTANCE().getLastResponseLatency(getHeader().getNode()), - lastHeartbeatReceivedTime, - prevLastLogIndex, - logManager.getMaxHaveAppliedCommitIndex()); - } - - @TestOnly - public void setMetaGroupMember(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - this.localQueryExecutor = new LocalQueryExecutor(this); - } - - @TestOnly - void setLogManager(PartitionedSnapshotLogManager logManager) { - if (this.logManager != null) { - this.logManager.close(); - } - this.logManager = logManager; - super.setLogManager(logManager); - initPeerMap(); - } - - public SlotManager getSlotManager() { - return slotManager; - } - - public boolean onSnapshotInstalled(List slots) { - if (getMetaGroupMember().getPartitionTable().getAllNodes().contains(thisNode)) { - getMetaGroupMember() - .syncLocalApply(getMetaGroupMember().getPartitionTable().getLastMetaLogIndex(), false); - } - if (logger.isDebugEnabled()) { - logger.debug( - "{} received one replication snapshot installed of slot {} and other {} slots", - name, - slots.get(0), - slots.size() - 1); - } - List removableSlots = new ArrayList<>(); - for (Integer slot : slots) { - int sentReplicaNum = slotManager.sentOneReplication(slot, false); - if (sentReplicaNum >= config.getReplicationNum()) { - removableSlots.add(slot); - } - } - slotManager.save(); - removeLocalData(removableSlots); - return true; - } - - public void registerPullSnapshotHint(PullSnapshotTaskDescriptor descriptor) { - pullSnapshotHintService.registerHint(descriptor); - } - - /** - * Find the groups that should be queried due to data migration. When a slot is in the status of - * PULLING or PULLING_WRITABLE, the read of it should merge result to guarantee integrity. - */ - public Map> getPreviousHolderSlotMap() { - Map> holderSlotMap = new HashMap<>(); - RaftNode header = getHeader(); - Map> previousHolderMap = - ((SlotPartitionTable) getMetaGroupMember().getPartitionTable()).getPreviousNodeMap(); - if (previousHolderMap.containsKey(header)) { - for (Entry entry : previousHolderMap.get(header).entrySet()) { - int slot = entry.getKey(); - PartitionGroup holder = entry.getValue(); - if (slotManager.checkSlotInDataMigrationStatus(slot)) { - holderSlotMap.computeIfAbsent(holder, n -> new HashSet<>()).add(slot); - } - } - } - return holderSlotMap; - } - - public LocalQueryExecutor getLocalQueryExecutor() { - return localQueryExecutor; - } - - @TestOnly - public void setLocalQueryExecutor(LocalQueryExecutor localQueryExecutor) { - this.localQueryExecutor = localQueryExecutor; - } - - public boolean isUnchanged() { - return unchanged; - } - - public void setUnchanged(boolean unchanged) { - this.unchanged = unchanged; - } - - public void setAndSaveLastAppliedPartitionTableVersion(long version) { - lastAppliedPartitionTableVersion.setVersion(version); - lastAppliedPartitionTableVersion.save(); - } - - private class LastAppliedPatitionTableVersion { - - private static final String VERSION_FILE_NAME = "LAST_PARTITION_TABLE_VERSION"; - - private long version = -1; - - private String filePath; - - public LastAppliedPatitionTableVersion(String memberDir) { - this.filePath = memberDir + File.separator + VERSION_FILE_NAME; - load(); - } - - private void load() { - File versionFile = new File(filePath); - if (!versionFile.exists()) { - return; - } - try (FileInputStream fileInputStream = new FileInputStream(filePath); - DataInputStream dataInputStream = new DataInputStream(fileInputStream)) { - version = dataInputStream.readLong(); - } catch (Exception e) { - logger.warn("Cannot deserialize last partition table version from {}", filePath, e); - } - } - - public synchronized void save() { - File versionFile = new File(filePath); - if (!versionFile.getParentFile().exists() && !versionFile.getParentFile().mkdirs()) { - logger.warn("Cannot mkdirs for {}", versionFile); - } - try (FileOutputStream outputStream = new FileOutputStream(versionFile); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream)) { - dataOutputStream.writeLong(version); - } catch (IOException e) { - logger.warn("Last partition table version in {} cannot be saved", filePath, e); - } - } - - public long getVersion() { - return version; - } - - public void setVersion(long version) { - this.version = version; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMemberMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMemberMBean.java deleted file mode 100644 index 964b5c49e9b3..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/DataGroupMemberMBean.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -public interface DataGroupMemberMBean extends RaftMemberMBean { - - String getCharacterAsString(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMember.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMember.java deleted file mode 100644 index bc2d75f876c7..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMember.java +++ /dev/null @@ -1,1954 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.exception.AddSelfException; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.ConfigInconsistentException; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.PartitionTableUnavailableException; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.exception.StartUpCheckFailureException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.applier.MetaLogApplier; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.log.manage.MetaSingleSnapshotLogManager; -import org.apache.iotdb.cluster.log.snapshot.MetaSimpleSnapshot; -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.ClusterPlanRouter; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService.AsyncClient; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.handlers.caller.NodeStatusHandler; -import org.apache.iotdb.cluster.server.heartbeat.MetaHeartbeatThread; -import org.apache.iotdb.cluster.server.monitor.NodeReport.MetaMemberReport; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.server.service.DataGroupEngine; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.cluster.utils.PartitionUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.cluster.utils.nodetool.function.Status; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.service.IService; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.TimeValuePairUtils; -import org.apache.iotdb.db.utils.TimeValuePairUtils.Intervals; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TProtocolFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.iotdb.cluster.utils.ClusterUtils.WAIT_START_UP_CHECK_TIME_SEC; -import static org.apache.iotdb.cluster.utils.ClusterUtils.analyseStartUpCheckResult; - -@SuppressWarnings("java:S1135") -public class MetaGroupMember extends RaftMember implements IService, MetaGroupMemberMBean { - - private static final String mbeanName = - String.format( - "%s:%s=%s", - "org.apache.iotdb.cluster.service", IoTDBConstant.JMX_TYPE, "MetaGroupEngine"); - - /** the file that contains the identifier of this node */ - static final String NODE_IDENTIFIER_FILE_NAME = - IoTDBDescriptor.getInstance().getConfig().getSystemDir() + File.separator + "node_identifier"; - /** the file that contains the serialized partition table */ - static final String PARTITION_FILE_NAME = - IoTDBDescriptor.getInstance().getConfig().getSystemDir() + File.separator + "partitions"; - /** in case of data loss, some file changes would be made to a temporary file first */ - private static final String TEMP_SUFFIX = ".tmp"; - - private static final Logger logger = LoggerFactory.getLogger(MetaGroupMember.class); - /** - * when joining a cluster this node will retry at most "DEFAULT_JOIN_RETRY" times before returning - * a failure to the client - */ - private static final int DEFAULT_JOIN_RETRY = 10; - - /** - * blind nodes are nodes that do not have the partition table, and if this node is the leader, the - * partition table should be sent to them at the next heartbeat - */ - private final Set blindNodes = new HashSet<>(); - /** - * as a leader, when a follower sent this node its identifier, the identifier may conflict with - * other nodes', such conflicting nodes will be recorded and at the next heartbeat, and they will - * be required to regenerate an identifier. - */ - private final Set idConflictNodes = new HashSet<>(); - /** - * the identifier and its belonging node, for conflict detection, may be used in more places in - * the future - */ - private Map idNodeMap = null; - - /** nodes in the cluster and data partitioning */ - private PartitionTable partitionTable; - /** router calculates the partition groups that a partitioned plan should be sent to */ - private ClusterPlanRouter router; - - /** - * containing configurations that should be kept the same cluster-wide, and must be checked before - * establishing a cluster or joining a cluster. - */ - private StartUpStatus startUpStatus; - - private Coordinator coordinator; - - public void setCoordinator(Coordinator coordinator) { - this.coordinator = coordinator; - } - - public Coordinator getCoordinator() { - return this.coordinator; - } - - public ClusterPlanRouter getRouter() { - return router; - } - - public boolean isReady() { - return ready; - } - - public void setReady(boolean ready) { - this.ready = ready; - } - - // whether the MetaEngine has been ready. - boolean ready = false; - - @TestOnly - public MetaGroupMember() {} - - public MetaGroupMember(TProtocolFactory factory, Node thisNode, Coordinator coordinator) { - super( - "Meta", - new ClientManager( - ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(), - ClientManager.Type.MetaGroupClient)); - allNodes = new PartitionGroup(); - initPeerMap(); - - // committed logs are applied to the state machine (the IoTDB instance) through the applier - LogApplier metaLogApplier = new MetaLogApplier(this); - logManager = new MetaSingleSnapshotLogManager(metaLogApplier, this); - term.set(logManager.getHardState().getCurrentTerm()); - voteFor = logManager.getHardState().getVoteFor(); - - setThisNode(thisNode); - // load the identifier from the disk or generate a new one - loadIdentifier(); - allNodes.add(thisNode); - - startUpStatus = getNewStartUpStatus(); - - // try loading the partition table if there was a previous cluster - this.coordinator = coordinator; - coordinator.linkMetaGroupMember(this); - loadPartitionTable(); - } - - /** - * Find the DataGroupMember that manages the partition of "storageGroupName"@"partitionId", and - * close the partition through that member. Notice: only partitions owned by this node can be - * closed by the method. - */ - public boolean closePartition(String storageGroupName, long partitionId, boolean isSeq) { - RaftNode raftNode = - partitionTable.routeToHeaderByTime( - storageGroupName, partitionId * StorageEngine.getTimePartitionInterval()); - DataGroupMember localDataMember = getLocalDataMember(raftNode); - if (localDataMember == null || localDataMember.getCharacter() != NodeCharacter.LEADER) { - return false; - } - return localDataMember.closePartition(storageGroupName, partitionId, isSeq); - } - - DataGroupEngine getDataGroupEngine() { - return ClusterIoTDB.getInstance().getDataGroupEngine(); - } - - /** - * Add seed nodes from the config, start the heartbeat and catch-up thread pool, initialize - * QueryCoordinator and FileFlushPolicy, then start the reportThread. Calling the method twice - * does not induce side effect. - */ - @Override - public void start() { - if (heartBeatService != null) { - return; - } - addSeedNodes(); - NodeStatusManager.getINSTANCE().setMetaGroupMember(this); - super.start(); - } - - /** - * Stop the heartbeat and catch-up thread pool, DataClusterServer, ClusterTSServiceImpl and - * reportThread. Calling the method twice does not induce side effects. - */ - @Override - public void stop() { - super.stop(); - logger.info("{}: stopped", name); - } - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_META_ENGINE; - } - - /** - * Parse the seed nodes from the cluster configuration and add them into the node list. Each - * seedUrl should be like "{hostName}:{metaPort}" Ignore bad-formatted seedUrls. - */ - protected void addSeedNodes() { - if (allNodes.size() > 1) { - // a local partition table was loaded and allNodes were updated, there is no need to add - // nodes from seedUrls - return; - } - List seedUrls = config.getSeedNodeUrls(); - // initialize allNodes - for (String seedUrl : seedUrls) { - Node node = ClusterUtils.parseNode(seedUrl); - if (node != null - && (!node.getInternalIp().equals(thisNode.internalIp) - || node.getMetaPort() != thisNode.getMetaPort()) - && !allNodes.contains(node)) { - // do not add the local node since it is added in the constructor - allNodes.add(node); - } - } - } - - /** - * Apply the addition of a new node. Register its identifier, add it to the node list and - * partition table, serialize the partition table and update the DataGroupMembers. - */ - public void applyAddNode(AddNodeLog addNodeLog) { - - long startTime = System.currentTimeMillis(); - Node newNode = addNodeLog.getNewNode(); - synchronized (allNodes) { - if (logger.isDebugEnabled()) { - logger.debug("{}: adding a new node {} into {}", name, newNode, allNodes); - } - - if (!allNodes.contains(newNode)) { - registerNodeIdentifier(newNode, newNode.getNodeIdentifier()); - allNodes.add(newNode); - } - - // update the partition table - savePartitionTable(); - - // update local data members - NodeAdditionResult result = partitionTable.getNodeAdditionResult(newNode); - getDataGroupEngine().addNode(newNode, result); - if (logger.isDebugEnabled()) { - logger.debug("{}: success to add a new node {} into {}", name, newNode, allNodes); - } - } - logger.info( - "{}: execute adding node {} cost {} ms", - name, - newNode, - (System.currentTimeMillis()) - startTime); - } - - /** - * This node itself is a seed node, and it is going to build the initial cluster with other seed - * nodes. This method is to skip one-by-one additions to establish a large cluster quickly. - */ - public void buildCluster() throws ConfigInconsistentException, StartUpCheckFailureException { - // see if the seed nodes have consistent configurations - checkSeedNodesStatus(); - // just establish the heartbeat thread and it will do the remaining - threadTaskInit(); - if (allNodes.size() == 1) { - // if there is only one node in the cluster, no heartbeat will be received, and - // consequently data group will not be built, so we directly build data members here - if (partitionTable == null) { - partitionTable = new SlotPartitionTable(allNodes, thisNode); - logger.info("Partition table is set up"); - } - initIdNodeMap(); - router = new ClusterPlanRouter(partitionTable); - this.coordinator.setRouter(router); - rebuildDataGroups(); - ready = true; - } - // else, we have to wait the meta group elects the Leader, and let the leader confirm the - // correct PartitionTable. - // then we can set the meta group Engine ready. - } - - private void threadTaskInit() { - heartBeatService.submit(new MetaHeartbeatThread(this)); - } - - /** - * This node is not a seed node and wants to join an established cluster. Pick up a node randomly - * from the seed nodes and send a join request to it. - */ - public void joinCluster() throws ConfigInconsistentException, StartUpCheckFailureException { - if (allNodes.size() == 1) { - logger.error("Seed nodes not provided, cannot join cluster"); - throw new ConfigInconsistentException(); - } - - int retry = DEFAULT_JOIN_RETRY; - while (retry > 0) { - // randomly pick up a node to try - Node node = allNodes.get(random.nextInt(allNodes.size())); - if (node.equals(thisNode)) { - continue; - } - logger.info("start joining the cluster with the help of {}", node); - try { - if (joinCluster(node, startUpStatus)) { - logger.info("Joined a cluster, starting the heartbeat thread"); - setCharacter(NodeCharacter.FOLLOWER); - setLastHeartbeatReceivedTime(System.currentTimeMillis()); - threadTaskInit(); - return; - } - // wait 5s to start the next try - Thread.sleep(ClusterDescriptor.getInstance().getConfig().getJoinClusterTimeOutMs()); - } catch (TException e) { - logger.warn("Cannot join the cluster from {}, because:", node, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Unexpected interruption when waiting to join a cluster", e); - } - // start the next try - retry--; - } - // all tries failed - logger.error("Cannot join the cluster after {} retries", DEFAULT_JOIN_RETRY); - throw new StartUpCheckFailureException(); - } - - public StartUpStatus getNewStartUpStatus() { - StartUpStatus newStartUpStatus = new StartUpStatus(); - newStartUpStatus.setPartitionInterval( - IoTDBDescriptor.getInstance().getConfig().getPartitionInterval()); - newStartUpStatus.setHashSalt(ClusterConstant.HASH_SALT); - newStartUpStatus.setReplicationNumber( - ClusterDescriptor.getInstance().getConfig().getReplicationNum()); - newStartUpStatus.setClusterName(ClusterDescriptor.getInstance().getConfig().getClusterName()); - newStartUpStatus.setMultiRaftFactor( - ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor()); - List seedUrls = ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls(); - List seedNodeList = new ArrayList<>(); - for (String seedUrl : seedUrls) { - seedNodeList.add(ClusterUtils.parseNode(seedUrl)); - } - newStartUpStatus.setSeedNodeList(seedNodeList); - return newStartUpStatus; - } - - /** - * Send a join cluster request to "node". If the joining is accepted, set the partition table, - * start DataClusterServer and ClusterTSServiceImpl and initialize DataGroupMembers. - * - * @return true if the node has successfully joined the cluster, false otherwise. - */ - private boolean joinCluster(Node node, StartUpStatus startUpStatus) - throws TException, InterruptedException, ConfigInconsistentException { - - AddNodeResponse resp; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncMetaClient client = (AsyncMetaClient) getAsyncClient(node); - if (client == null) { - return false; - } - resp = SyncClientAdaptor.addNode(client, thisNode, startUpStatus); - } else { - SyncMetaClient client = (SyncMetaClient) getSyncClient(node); - if (client == null) { - return false; - } - try { - resp = client.addNode(thisNode, startUpStatus); - } catch (TException e) { - client.close(); - throw e; - } finally { - client.returnSelf(); - } - } - - if (resp == null) { - logger.warn("Join cluster request timed out"); - } else if (resp.getRespNum() == Response.RESPONSE_AGREE) { - logger.info("Node {} admitted this node into the cluster", node); - ByteBuffer partitionTableBuffer = resp.partitionTableBytes; - acceptVerifiedPartitionTable(partitionTableBuffer, true); - return true; - } else if (resp.getRespNum() == Response.RESPONSE_IDENTIFIER_CONFLICT) { - logger.info( - "The identifier {} conflicts the existing ones, regenerate a new one", - thisNode.getNodeIdentifier()); - setNodeIdentifier(genNodeIdentifier()); - } else if (resp.getRespNum() == Response.RESPONSE_NEW_NODE_PARAMETER_CONFLICT) { - handleConfigInconsistency(resp); - } else if (resp.getRespNum() == Response.RESPONSE_DATA_MIGRATION_NOT_FINISH) { - logger.warn( - "The data migration of the previous membership change operation is not finished. Please try again later"); - } else { - logger.warn("Joining the cluster is rejected by {} for response {}", node, resp.getRespNum()); - } - return false; - } - - private void handleConfigInconsistency(AddNodeResponse resp) throws ConfigInconsistentException { - CheckStatusResponse checkStatusResponse = resp.getCheckStatusResponse(); - String parameters = - (checkStatusResponse.isPartitionalIntervalEquals() ? "" : ", partition interval") - + (checkStatusResponse.isHashSaltEquals() ? "" : ", hash salt") - + (checkStatusResponse.isReplicationNumEquals() ? "" : ", replication number") - + (checkStatusResponse.isSeedNodeEquals() ? "" : ", seedNodes") - + (checkStatusResponse.isClusterNameEquals() ? "" : ", clusterName") - + (checkStatusResponse.isMultiRaftFactorEquals() ? "" : ", multiRaftFactor"); - logger.error( - "The start up configuration{} conflicts the cluster. Please reset the configurations. ", - parameters.substring(1)); - throw new ConfigInconsistentException(); - } - - @Override - long checkElectorLogProgress(ElectionRequest electionRequest) { - Node elector = electionRequest.getElector(); - // check if the node is in the group - if (partitionTable != null && !allNodes.contains(elector)) { - logger.info( - "{}: the elector {} is not in the data group {}, so reject this election.", - name, - getPartitionGroup(), - elector); - return Response.RESPONSE_NODE_IS_NOT_IN_GROUP; - } - return super.checkElectorLogProgress(electionRequest); - } - - /** - * This is the behavior of a follower: - * - *

Process the heartbeat request from a valid leader. Generate and tell the leader the - * identifier of the node if necessary. If the partition table is missing, use the one from the - * request or require it in the response. TODO should go to RPC Service - */ - @Override - void processValidHeartbeatReq(HeartBeatRequest request, HeartBeatResponse response) { - if (request.isRequireIdentifier()) { - // the leader wants to know who the node is - if (request.isRegenerateIdentifier()) { - // the previously sent id conflicted, generate a new one - setNodeIdentifier(genNodeIdentifier()); - } - logger.debug("Send identifier {} to the leader", thisNode.getNodeIdentifier()); - response.setFollowerIdentifier(thisNode.getNodeIdentifier()); - } - - if (partitionTable == null) { - // this node does not have a partition table yet - if (request.isSetPartitionTableBytes()) { - synchronized (this) { - // if the leader has sent the partition table then accept it - if (partitionTable == null) { - ByteBuffer byteBuffer = request.partitionTableBytes; - acceptVerifiedPartitionTable(byteBuffer, true); - } - } - } else { - // require the partition table - logger.debug("Request cluster nodes from the leader"); - response.setRequirePartitionTable(true); - } - } - - // if isReady, then it means the node has receives partitionTable from the leader, skip the - // following logic. - if (!ready) { - // if the node does not provide necessary info, wait for the next heartbeat. - if (response.isSetFollowerIdentifier()) { - return; - } - if (response.isSetRequirePartitionTable()) { - return; - } - // if the commitIndex is the same, ok we can start our datagroup service. - if (request.getTerm() == term.get() - && request.getCommitLogIndex() == getLogManager().getCommitLogIndex()) { - logger.info("Meta Group is ready"); - rebuildDataGroups(); - ready = true; - } - } - } - - /** - * Deserialize a partition table from the buffer, save it locally, add nodes from the partition - * table and start DataClusterServer and ClusterTSServiceImpl. - */ - protected synchronized void acceptPartitionTable( - ByteBuffer partitionTableBuffer, boolean needSerialization) { - SlotPartitionTable newTable = new SlotPartitionTable(thisNode); - newTable.deserialize(partitionTableBuffer); - - // avoid overwriting current partition table with a previous one - if (partitionTable != null) { - long currIndex = partitionTable.getLastMetaLogIndex(); - long incomingIndex = newTable.getLastMetaLogIndex(); - logger.info( - "Current partition table index {}, new partition table index {}", - currIndex, - incomingIndex); - if (currIndex >= incomingIndex) { - return; - } - } - partitionTable = newTable; - - if (needSerialization) { - // if the partition table is read locally, there is no need to serialize it again - savePartitionTable(); - } - - router = new ClusterPlanRouter(newTable); - this.coordinator.setRouter(router); - - updateNodeList(newTable.getAllNodes()); - } - - // this is the behavior of the follower - public synchronized void acceptVerifiedPartitionTable( - ByteBuffer partitionTableBuffer, boolean needSerialization) { - logger.info("new Partition Table is received."); - acceptPartitionTable(partitionTableBuffer, needSerialization); - rebuildDataGroups(); - logger.info("The Meta Engine is ready"); - ready = true; - } - - private void updateNodeList(Collection nodes) { - allNodes = new PartitionGroup(nodes); - initPeerMap(); - logger.info("All nodes in the partition table: {}", allNodes); - initIdNodeMap(); - for (Node n : allNodes) { - idNodeMap.put(n.getNodeIdentifier(), n); - } - } - - /** - * This is the behavior of the Leader: - * - *

Process a HeartBeatResponse from a follower. If the follower has provided its identifier, - * try registering for it and if all nodes have registered and there is no available partition - * table, initialize a new one and start the ClusterTSServiceImpl and DataClusterServer. If the - * follower requires a partition table, add it to the blind node list so that at the next - * heartbeat this node will send it a partition table - */ - @Override - public void processValidHeartbeatResp(HeartBeatResponse response, Node receiver) { - // register the id of the node - if (response.isSetFollowerIdentifier()) { - // register the follower, the response.getFollower() contains the node information of the - // receiver. - registerNodeIdentifier(response.getFollower(), response.getFollowerIdentifier()); - buildMetaEngineServiceIfNotReady(); - } - // record the requirement of partition table of the follower - if (response.isRequirePartitionTable()) { - addBlindNode(receiver); - } - } - - public void buildMetaEngineServiceIfNotReady() { - // if all nodes' ids are known, we can build the partition table - if (!ready && allNodesIdKnown()) { - // Notice that this should only be called once. - - // When the meta raft group is established, the follower reports its node information to the - // leader through the first heartbeat. After the leader knows the node information of all - // nodes, it can replace the incomplete node information previously saved locally, and build - // partitionTable to send it to other followers. - allNodes = new PartitionGroup(idNodeMap.values()); - if (partitionTable == null) { - partitionTable = new SlotPartitionTable(allNodes, thisNode); - logger.info("Partition table is set up"); - } - - router = new ClusterPlanRouter(partitionTable); - this.coordinator.setRouter(router); - rebuildDataGroups(); - logger.info("The Meta Engine is ready"); - this.ready = true; - } - } - - /** - * When a node requires a partition table in its heartbeat response, add it into blindNodes so in - * the next heartbeat the partition table will be sent to the node. - */ - private void addBlindNode(Node node) { - logger.debug("Node {} requires the node list (partition table)", node); - blindNodes.add(node); - } - - /** @return whether a node wants the partition table. */ - public boolean isNodeBlind(Node node) { - return blindNodes.contains(node); - } - - /** - * Remove the node from the blindNodes when the partition table is sent, so partition table will - * not be sent in each heartbeat. - */ - public void removeBlindNode(Node node) { - blindNodes.remove(node); - } - - /** Register the identifier for the node if it does not conflict with other nodes. */ - private void registerNodeIdentifier(Node node, int identifier) { - synchronized (idNodeMap) { - Node conflictNode = idNodeMap.get(identifier); - if (conflictNode != null && !conflictNode.equals(node)) { - idConflictNodes.add(node); - return; - } - node.setNodeIdentifier(identifier); - logger.info("Node {} registered with id {}", node, identifier); - idNodeMap.put(identifier, node); - idConflictNodes.remove(node); - } - } - - /** - * idNodeMap is initialized when the first leader wins or the follower receives the partition - * table from the leader or a node recovers - */ - private void initIdNodeMap() { - idNodeMap = new HashMap<>(); - idNodeMap.put(thisNode.getNodeIdentifier(), thisNode); - } - - /** @return Whether all nodes' identifier is known. */ - private boolean allNodesIdKnown() { - return idNodeMap != null && idNodeMap.size() == allNodes.size(); - } - - /** - * Start the DataClusterServer and ClusterTSServiceImpl` so this node can serve other nodes and - * clients. Also build DataGroupMembers using the partition table. - */ - protected synchronized void rebuildDataGroups() { - logger.info("Starting sub-servers..."); - synchronized (partitionTable) { - try { - getDataGroupEngine().buildDataGroupMembers(partitionTable); - sendHandshake(); - } catch (Exception e) { - logger.error("Build partition table failed: ", e); - stop(); - return; - } - } - logger.info("Sub-servers started."); - } - - /** When the node restarts, it sends handshakes to all other nodes so they may know it is back. */ - public void sendHandshake() { - for (Node node : allNodes) { - if (ClusterUtils.nodeEqual(node, thisNode)) { - // no need to shake hands with yourself - continue; - } - sendHandshakeForOneNode(node); - } - } - - private void sendHandshakeForOneNode(Node node) { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - AsyncMetaClient asyncClient = (AsyncMetaClient) getAsyncClient(node); - if (asyncClient != null) { - try { - asyncClient.handshake(thisNode, new GenericHandler<>(node, null)); - } catch (TException e) { - logger.error("failed send handshake to node: {}", node, e); - } - } else { - logger.error("send handshake fail as get empty async client"); - } - } else { - SyncMetaClient syncClient = (SyncMetaClient) getSyncClient(node); - if (syncClient != null) { - try { - syncClient.handshake(thisNode); - } catch (TException e) { - syncClient.close(); - logger.error("failed send handshake to node: {}", node, e); - } finally { - syncClient.returnSelf(); - } - } else { - logger.error("send handshake fail as get empty sync client"); - } - } - } - - /** - * Process the join cluster request of "node". Only proceed when the partition table is ready. - * - * @param node cannot be the local node - */ - public AddNodeResponse addNode(Node node, StartUpStatus startUpStatus) - throws AddSelfException, LogExecutionException, InterruptedException, - CheckConsistencyException { - AddNodeResponse response = new AddNodeResponse(); - if (partitionTable == null) { - logger.info("Cannot add node now because the partition table is not set"); - response.setRespNum((int) Response.RESPONSE_PARTITION_TABLE_UNAVAILABLE); - return response; - } - - logger.info("A node {} wants to join this cluster", node); - if (node.equals(thisNode)) { - throw new AddSelfException(); - } - - waitLeader(); - // try to process the request locally - if (processAddNodeLocally(node, startUpStatus, response)) { - return response; - } - // if it cannot be processed locally, forward it - return null; - } - - /** - * Process the join cluster request of "node" as a MetaLeader. A node already joined is accepted - * immediately. If the identifier of "node" conflicts with an existing node, the request will be - * turned down. - * - * @param newNode cannot be the local node - * @param startUpStatus the start up status of the new node - * @param response the response that will be sent to "node" - * @return true if the process is over, false if the request should be forwarded - */ - private boolean processAddNodeLocally( - Node newNode, StartUpStatus startUpStatus, AddNodeResponse response) - throws LogExecutionException, InterruptedException, CheckConsistencyException { - if (character != NodeCharacter.LEADER) { - return false; - } - - if (!waitDataMigrationEnd()) { - response.setRespNum((int) Response.RESPONSE_DATA_MIGRATION_NOT_FINISH); - return true; - } - - for (Node node : partitionTable.getAllNodes()) { - if (node.internalIp.equals(newNode.internalIp) - && newNode.dataPort == node.dataPort - && newNode.metaPort == node.metaPort - && newNode.clientPort == node.clientPort) { - newNode.nodeIdentifier = node.nodeIdentifier; - break; - } - } - if (allNodes.contains(newNode)) { - logger.debug("Node {} is already in the cluster", newNode); - response.setRespNum((int) Response.RESPONSE_AGREE); - synchronized (partitionTable) { - response.setPartitionTableBytes(partitionTable.serialize()); - } - return true; - } - - Node idConflictNode = idNodeMap.get(newNode.getNodeIdentifier()); - if (idConflictNode != null) { - logger.debug("{}'s id conflicts with {}", newNode, idConflictNode); - response.setRespNum((int) Response.RESPONSE_IDENTIFIER_CONFLICT); - return true; - } - - // check status of the new node - if (!checkNodeConfig(startUpStatus, response)) { - return true; - } - - AddNodeLog addNodeLog = new AddNodeLog(); - // node adding is serialized to reduce potential concurrency problem - synchronized (logManager) { - // update partition table - PartitionTable table = new SlotPartitionTable(thisNode); - table.deserialize(partitionTable.serialize()); - table.addNode(newNode); - table.setLastMetaLogIndex(logManager.getLastLogIndex() + 1); - - addNodeLog.setPartitionTable(table.serialize()); - addNodeLog.setCurrLogTerm(getTerm().get()); - addNodeLog.setCurrLogIndex(logManager.getLastLogIndex() + 1); - addNodeLog.setMetaLogIndex(logManager.getLastLogIndex() + 1); - - addNodeLog.setNewNode(newNode); - - logManager.append(addNodeLog); - } - - int retryTime = 0; - while (true) { - logger.info( - "{}: Send the join request of {} to other nodes, retry time: {}", - name, - newNode, - retryTime); - AppendLogResult result = sendLogToFollowers(addNodeLog); - switch (result) { - case OK: - commitLog(addNodeLog); - logger.info("{}: Join request of {} is accepted", name, newNode); - - synchronized (partitionTable) { - response.setPartitionTableBytes(partitionTable.serialize()); - } - response.setRespNum((int) Response.RESPONSE_AGREE); - logger.info("{}: Sending join response of {}", name, newNode); - return true; - case TIME_OUT: - logger.debug("{}: log {} timed out, retrying...", name, addNodeLog); - try { - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - logger.info("{}: Join request of {} timed out", name, newNode); - retryTime++; - break; - case LEADERSHIP_STALE: - default: - return false; - } - } - } - - /** Check if there has data migration due to previous change membership operation. */ - private boolean waitDataMigrationEnd() throws InterruptedException, CheckConsistencyException { - // try 5 time - int retryTime = 0; - while (true) { - Map res = collectAllPartitionMigrationStatus(); - if (res != null && res.isEmpty()) { - return true; - } - if (++retryTime == 5) { - break; - } - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } - return false; - } - - /** Process empty log for leader to commit all previous log. */ - public void processEmptyContentLog() { - Log log = new EmptyContentLog(); - - synchronized (logManager) { - log.setCurrLogTerm(getTerm().get()); - log.setCurrLogIndex(logManager.getLastLogIndex() + 1); - logManager.append(log); - } - - int retryTime = 0; - while (true) { - logger.debug("{} Send empty content log to other nodes, retry time: {}", name, retryTime); - AppendLogResult result = sendLogToFollowers(log); - switch (result) { - case OK: - try { - commitLog(log); - } catch (LogExecutionException e) { - logger.error("{}: Fail to execute empty content log", name, e); - } - return; - case TIME_OUT: - logger.debug("{}: add empty content log timed out, retry.", name); - try { - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - retryTime++; - break; - case LEADERSHIP_STALE: - default: - return; - } - } - } - - private boolean checkNodeConfig(StartUpStatus remoteStartUpStatus, AddNodeResponse response) { - long remotePartitionInterval = remoteStartUpStatus.getPartitionInterval(); - int remoteHashSalt = remoteStartUpStatus.getHashSalt(); - int remoteReplicationNum = remoteStartUpStatus.getReplicationNumber(); - int remoteMultiRaftFactor = remoteStartUpStatus.getMultiRaftFactor(); - String remoteClusterName = remoteStartUpStatus.getClusterName(); - List remoteSeedNodeList = remoteStartUpStatus.getSeedNodeList(); - long localPartitionInterval = IoTDBDescriptor.getInstance().getConfig().getPartitionInterval(); - int localHashSalt = ClusterConstant.HASH_SALT; - int localReplicationNum = config.getReplicationNum(); - String localClusterName = config.getClusterName(); - int localMultiRaftFactor = config.getMultiRaftFactor(); - boolean partitionIntervalEquals = true; - boolean multiRaftFactorEquals = true; - boolean hashSaltEquals = true; - boolean replicationNumEquals = true; - boolean seedNodeEquals = true; - boolean clusterNameEquals = true; - - if (localPartitionInterval != remotePartitionInterval) { - partitionIntervalEquals = false; - logger.info( - "Remote partition interval conflicts with the leader's. Leader: {}, remote: {}", - localPartitionInterval, - remotePartitionInterval); - } - if (localMultiRaftFactor != remoteMultiRaftFactor) { - multiRaftFactorEquals = false; - logger.info( - "Remote multi-raft factor conflicts with the leader's. Leader: {}, remote: {}", - localMultiRaftFactor, - remoteMultiRaftFactor); - } - if (localHashSalt != remoteHashSalt) { - hashSaltEquals = false; - logger.info( - "Remote hash salt conflicts with the leader's. Leader: {}, remote: {}", - localHashSalt, - remoteHashSalt); - } - if (localReplicationNum != remoteReplicationNum) { - replicationNumEquals = false; - logger.info( - "Remote replication number conflicts with the leader's. Leader: {}, remote: {}", - localReplicationNum, - remoteReplicationNum); - } - if (!Objects.equals(localClusterName, remoteClusterName)) { - clusterNameEquals = false; - logger.info( - "Remote cluster name conflicts with the leader's. Leader: {}, remote: {}", - localClusterName, - remoteClusterName); - } - if (!ClusterUtils.checkSeedNodes(true, allNodes, remoteSeedNodeList)) { - seedNodeEquals = false; - if (logger.isInfoEnabled()) { - logger.info( - "Remote seed node list conflicts with the leader's. Leader: {}, remote: {}", - Arrays.toString(allNodes.toArray(new Node[0])), - remoteSeedNodeList); - } - } - if (!(partitionIntervalEquals - && hashSaltEquals - && replicationNumEquals - && seedNodeEquals - && clusterNameEquals - && multiRaftFactorEquals)) { - response.setRespNum((int) Response.RESPONSE_NEW_NODE_PARAMETER_CONFLICT); - response.setCheckStatusResponse( - new CheckStatusResponse( - partitionIntervalEquals, - hashSaltEquals, - replicationNumEquals, - seedNodeEquals, - clusterNameEquals, - multiRaftFactorEquals)); - return false; - } - return true; - } - - /** - * Check if the seed nodes are consistent with other nodes. Only used when establishing the - * initial cluster. - */ - private void checkSeedNodesStatus() - throws ConfigInconsistentException, StartUpCheckFailureException { - if (getAllNodes().size() == 1) { - // one-node cluster, skip the check - return; - } - - boolean canEstablishCluster = false; - long startTime = System.currentTimeMillis(); - // the initial 1 represents this node - AtomicInteger consistentNum = new AtomicInteger(1); - AtomicInteger inconsistentNum = new AtomicInteger(0); - while (!canEstablishCluster) { - consistentNum.set(1); - inconsistentNum.set(0); - checkSeedNodesStatusOnce(consistentNum, inconsistentNum); - logger.debug( - "Status check result: consistent nodes: {}, inconsistent nodes: {}, total nodes: {}", - consistentNum.get(), - inconsistentNum.get(), - getAllNodes().size()); - canEstablishCluster = - analyseStartUpCheckResult( - consistentNum.get(), inconsistentNum.get(), getAllNodes().size()); - // If reach the start up time threshold, shut down. - // Otherwise, wait for a while, start the loop again. - if (System.currentTimeMillis() - startTime > ClusterUtils.START_UP_TIME_THRESHOLD_MS) { - throw new StartUpCheckFailureException(); - } else if (!canEstablishCluster) { - try { - Thread.sleep(ClusterUtils.START_UP_CHECK_TIME_INTERVAL_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for next start up check", e); - } - } - } - // after checking, we enable print the error stack in 'SyncClientPool.getClient' - } - - // TODO rewrite this method. - - /** check whether the nodes having consistent setting. */ - private void checkSeedNodesStatusOnce( - AtomicInteger consistentNum, AtomicInteger inconsistentNum) { - // use a thread pool to avoid being blocked by an unavailable node - ExecutorService pool = new ScheduledThreadPoolExecutor(getAllNodes().size() - 1); - for (Node seedNode : getAllNodes()) { - Node thisNode = getThisNode(); - if (seedNode.equals(thisNode)) { - continue; - } - pool.submit( - () -> { - logger.debug("Checking status with {}", seedNode); - CheckStatusResponse response = null; - try { - response = checkStatus(seedNode); - } catch (Exception e) { - logger.warn("Exception during status check", e); - } - logger.debug("CheckStatusResponse from {}: {}", seedNode, response); - if (response != null) { - // check the response - ClusterUtils.examineCheckStatusResponse( - response, consistentNum, inconsistentNum, seedNode); - } else { - logger.warn( - "Start up exception. Cannot connect to node {}. Try again in next turn.", - seedNode); - } - }); - } - pool.shutdown(); - try { - if (!pool.awaitTermination(WAIT_START_UP_CHECK_TIME_SEC, TimeUnit.SECONDS)) { - pool.shutdownNow(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for start up checks", e); - } - } - - private CheckStatusResponse checkStatus(Node seedNode) { - if (config.isUseAsyncServer()) { - AsyncMetaClient client = (AsyncMetaClient) getAsyncClient(seedNode); - if (client == null) { - return null; - } - try { - return SyncClientAdaptor.checkStatus(client, getStartUpStatus()); - } catch (TException e) { - logger.warn("Error occurs when check status on node : {}", seedNode); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Current thread is interrupted."); - } - } else { - SyncMetaClient client = (SyncMetaClient) getSyncClient(seedNode, false); - if (client == null) { - return null; - } - try { - return client.checkStatus(getStartUpStatus()); - } catch (TException e) { - client.close(); - logger.warn("Error occurs when check status on node : {}", seedNode); - } finally { - client.returnSelf(); - } - } - return null; - } - - public Set getIdConflictNodes() { - return idConflictNodes; - } - - /** - * When this node becomes the MetaLeader (for the first time), it should init the idNodeMap, so - * that if can require identifiers from all nodes and check if there are conflicts. - */ - @Override - public void onElectionWins() { - if (idNodeMap == null) { - initIdNodeMap(); - } - } - - /** Load the partition table from a local file if it can be found. */ - private void loadPartitionTable() { - File partitionFile = new File(PARTITION_FILE_NAME); - if (!partitionFile.exists() && !recoverPartitionTableFile()) { - logger.info("No partition table file found"); - return; - } - initIdNodeMap(); - try (DataInputStream inputStream = - new DataInputStream(new BufferedInputStream(new FileInputStream(partitionFile)))) { - int size = inputStream.readInt(); - byte[] tableBuffer = new byte[size]; - int readCnt = inputStream.read(tableBuffer); - if (readCnt < size) { - throw new IOException( - String.format("Expected partition table size: %s, actual read: %s", size, readCnt)); - } - - ByteBuffer wrap = ByteBuffer.wrap(tableBuffer); - logger.info("Load Partition Table locally."); - acceptPartitionTable(wrap, false); - - logger.info("Load {} nodes: {}", allNodes.size(), allNodes); - } catch (IOException e) { - logger.error("Cannot load the partition table", e); - } - } - - private boolean recoverPartitionTableFile() { - File tempFile = new File(PARTITION_FILE_NAME + TEMP_SUFFIX); - if (!tempFile.exists()) { - return false; - } - File partitionFile = new File(PARTITION_FILE_NAME); - return tempFile.renameTo(partitionFile); - } - - /** - * Serialize the partition table to a fixed position on the disk. Will first serialize to a - * temporary file and than replace the old file. - */ - private synchronized void savePartitionTable() { - File tempFile = new File(PARTITION_FILE_NAME + TEMP_SUFFIX); - tempFile.getParentFile().mkdirs(); - File oldFile = new File(PARTITION_FILE_NAME); - try (DataOutputStream outputStream = - new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile)))) { - synchronized (partitionTable) { - byte[] tableBuffer = partitionTable.serialize().array(); - outputStream.writeInt(tableBuffer.length); - outputStream.write(tableBuffer); - outputStream.flush(); - } - } catch (IOException e) { - logger.error("Cannot save the partition table", e); - } - if (oldFile.exists()) { - try { - Files.delete(Paths.get(oldFile.getAbsolutePath())); - } catch (IOException e) { - logger.warn("Old partition table file is not successfully deleted", e); - } - } - - if (!tempFile.renameTo(oldFile)) { - logger.warn("New partition table file is not successfully renamed"); - } - logger.info("Partition table is saved"); - } - - /** - * Load the identifier from the disk, if the identifier file does not exist, a new identifier will - * be generated. Do nothing if the identifier is already set. - */ - private void loadIdentifier() { - if (thisNode.isSetNodeIdentifier()) { - return; - } - File file = new File(NODE_IDENTIFIER_FILE_NAME); - Integer nodeId = null; - if (file.exists()) { - try (BufferedReader reader = new BufferedReader(new FileReader(file))) { - nodeId = Integer.parseInt(reader.readLine()); - logger.info("Recovered node identifier {}", nodeId); - } catch (Exception e) { - logger.warn("Cannot read the identifier from file, generating a new one", e); - } - } - if (nodeId != null) { - setNodeIdentifier(nodeId); - return; - } - - setNodeIdentifier(genNodeIdentifier()); - } - - /** - * Generate a new identifier using the hash of IP, metaPort and sysTime. - * - * @return a new identifier - */ - private int genNodeIdentifier() { - return Objects.hash( - thisNode.getInternalIp(), thisNode.getMetaPort(), System.currentTimeMillis()); - } - - /** Set the node's identifier to "identifier", also save it to a local file in text format. */ - private void setNodeIdentifier(int identifier) { - logger.info("The identifier of this node has been set to {}", identifier); - thisNode.setNodeIdentifier(identifier); - File idFile = new File(NODE_IDENTIFIER_FILE_NAME); - idFile.getParentFile().mkdirs(); - try (BufferedWriter writer = new BufferedWriter(new FileWriter(idFile))) { - writer.write(String.valueOf(identifier)); - } catch (IOException e) { - logger.error("Cannot save the node identifier", e); - } - } - - public PartitionTable getPartitionTable() { - return partitionTable; - } - - /** - * Process a snapshot sent by the MetaLeader. Deserialize the snapshot and apply it. The type of - * the snapshot should be MetaSimpleSnapshot. - */ - public void receiveSnapshot(SendSnapshotRequest request) throws SnapshotInstallationException { - MetaSimpleSnapshot snapshot = new MetaSimpleSnapshot(); - snapshot.deserialize(request.snapshotBytes); - snapshot.getDefaultInstaller(this).install(snapshot, -1, false); - } - - /** - * Execute a non-query plan. According to the type of the plan, the plan will be executed on all - * nodes (like timeseries deletion) or the nodes that belong to certain groups (like data - * ingestion). - * - * @param plan a non-query plan. - */ - @Override - public TSStatus executeNonQueryPlan(PhysicalPlan plan) { - TSStatus result; - long startTime = Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY.getOperationStartTime(); - if (PartitionUtils.isGlobalMetaPlan(plan)) { - // do it in local, only the follower forward the plan to local - logger.debug("receive a global meta plan {}", plan); - result = processNonPartitionedMetaPlan(plan); - } else { - // do nothing - logger.warn("receive a plan {} could not be processed in local", plan); - result = StatusUtils.UNSUPPORTED_OPERATION; - } - Timer.Statistic.META_GROUP_MEMBER_EXECUTE_NON_QUERY.calOperationCostTimeFromStart(startTime); - return result; - } - - @Override - ClientCategory getClientCategory() { - return ClientCategory.META; - } - - @Override - public String getMBeanName() { - return mbeanName; - } - - /** - * A non-partitioned plan (like storage group creation) should be executed on all metagroup nodes, - * so the MetaLeader should take the responsible to make sure that every node receives the plan. - * Thus the plan will be processed locally only by the MetaLeader and forwarded by non-leader - * nodes. - */ - public TSStatus processNonPartitionedMetaPlan(PhysicalPlan plan) { - if (character == NodeCharacter.LEADER) { - TSStatus status = processPlanLocally(plan); - if (status != null) { - return status; - } - } else if (!ClusterConstant.EMPTY_NODE.equals(leader.get())) { - TSStatus result = forwardPlan(plan, leader.get(), null); - if (!StatusUtils.NO_LEADER.equals(result)) { - result = - StatusUtils.getStatus( - result, new TEndPoint(leader.get().getInternalIp(), leader.get().getClientPort())); - return result; - } - } - - waitLeader(); - // the leader can be itself after waiting - if (character == NodeCharacter.LEADER) { - TSStatus status = processPlanLocally(plan); - if (status != null) { - return status; - } - } - TSStatus result = forwardPlan(plan, leader.get(), null); - if (!StatusUtils.NO_LEADER.equals(result)) { - result.setRedirectNode( - new TEndPoint(leader.get().getClientIp(), leader.get().getClientPort())); - } - return result; - } - - /** - * Get the data groups that should be queried when querying "path" with "filter". First, the time - * interval qualified by the filter will be extracted. If any side of the interval is open, query - * all groups. Otherwise compute all involved groups w.r.t. the time partitioning. - */ - public List routeFilter(Filter filter, PartialPath path) - throws StorageEngineException, EmptyIntervalException { - Intervals intervals = TimeValuePairUtils.extractTimeInterval(filter); - if (intervals.isEmpty()) { - throw new EmptyIntervalException(filter); - } - return routeIntervals(intervals, path); - } - - /** - * obtaining partition group based on path and intervals - * - * @param intervals time intervals, include minimum and maximum value - * @param path partial path - * @return data partition on which the current interval of the current path is stored - * @throws StorageEngineException if Failed to get storage group path - */ - public List routeIntervals(Intervals intervals, PartialPath path) - throws StorageEngineException { - List partitionGroups = new ArrayList<>(); - PartialPath storageGroupName; - try { - storageGroupName = IoTDB.schemaProcessor.getBelongedStorageGroup(path); - } catch (MetadataException e) { - throw new StorageEngineException(e); - } - - // if cluster is not enable-partition, a partial data storage in one PartitionGroup - if (!StorageEngine.isEnablePartition()) { - PartitionGroup partitionGroup = partitionTable.route(storageGroupName.getFullPath(), 0L); - partitionGroups.add(partitionGroup); - return partitionGroups; - } - - long firstLB = intervals.getLowerBound(0); - long lastUB = intervals.getUpperBound(intervals.getIntervalSize() - 1); - - if (firstLB == Long.MIN_VALUE || lastUB == Long.MAX_VALUE) { - // as there is no TimeLowerBound or TimeUpperBound, the query should be broadcast to every - // group - partitionGroups.addAll(partitionTable.getGlobalGroups()); - } else { - // compute the related data groups of all intervals - // TODO-Cluster#690: change to a broadcast when the computation is too expensive - Set groupHeaders = new HashSet<>(); - for (int i = 0; i < intervals.getIntervalSize(); i++) { - // compute the headers of groups involved in every interval - PartitionUtils.getIntervalHeaders( - storageGroupName.getFullPath(), - intervals.getLowerBound(i), - intervals.getUpperBound(i), - partitionTable, - groupHeaders); - } - // translate the headers to groups - for (RaftNode groupHeader : groupHeaders) { - partitionGroups.add(partitionTable.getPartitionGroup(groupHeader)); - } - } - return partitionGroups; - } - - @SuppressWarnings("java:S2274") - public Map getAllNodeStatus() { - if (getPartitionTable() == null) { - // the cluster is being built. - return null; - } - Map nodeStatus = new HashMap<>(); - for (Node node : allNodes) { - nodeStatus.put(node, thisNode.equals(node) ? Status.LIVE : Status.OFFLINE); - } - - try { - if (config.isUseAsyncServer()) { - getNodeStatusAsync(nodeStatus); - } else { - getNodeStatusSync(nodeStatus); - } - } catch (TException e) { - logger.warn("Cannot get the status of all nodes", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Cannot get the status of all nodes", e); - } - - for (Node node : partitionTable.getAllNodes()) { - nodeStatus.putIfAbsent(node, Status.JOINING); - } - for (Node node : allNodes) { - if (!partitionTable.getAllNodes().contains(node)) { - nodeStatus.put(node, Status.LEAVING); - } - } - return nodeStatus; - } - - @SuppressWarnings({"java:S2445", "java:S2274"}) - private void getNodeStatusAsync(Map nodeStatus) - throws TException, InterruptedException { - NodeStatusHandler nodeStatusHandler = new NodeStatusHandler(nodeStatus); - synchronized (nodeStatus) { - for (Node node : allNodes) { - TSMetaService.AsyncClient client = (AsyncClient) getAsyncClient(node); - if (!node.equals(thisNode) && client != null) { - client.checkAlive(nodeStatusHandler); - } - } - nodeStatus.wait(ClusterConstant.CHECK_ALIVE_TIME_OUT_MS); - } - } - - private void getNodeStatusSync(Map nodeStatus) { - NodeStatusHandler nodeStatusHandler = new NodeStatusHandler(nodeStatus); - for (Node node : allNodes) { - SyncMetaClient client = (SyncMetaClient) getSyncClient(node); - if (!node.equals(thisNode) && client != null) { - Node response = null; - try { - response = client.checkAlive(); - } catch (TException e) { - client.close(); - } finally { - client.returnSelf(); - } - nodeStatusHandler.onComplete(response); - } - } - } - - public Map collectMigrationStatus(Node node) { - try { - if (config.isUseAsyncServer()) { - return collectMigrationStatusAsync(node); - } else { - return collectMigrationStatusSync(node); - } - } catch (TException e) { - logger.error("{}: Cannot get the status of node {}", name, node, e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("{}: Cannot get the status of node {}", name, node, e); - } - return null; - } - - private Map collectMigrationStatusAsync(Node node) - throws TException, InterruptedException { - AtomicReference resultRef = new AtomicReference<>(); - GenericHandler migrationStatusHandler = new GenericHandler<>(node, resultRef); - AsyncMetaClient client = (AsyncMetaClient) getAsyncClient(node); - if (client == null) { - return null; - } - client.collectMigrationStatus(migrationStatusHandler); - synchronized (resultRef) { - if (resultRef.get() == null) { - resultRef.wait(ClusterConstant.getConnectionTimeoutInMS()); - } - } - return ClusterUtils.deserializeMigrationStatus(resultRef.get()); - } - - private Map collectMigrationStatusSync(Node node) throws TException { - SyncMetaClient client = (SyncMetaClient) getSyncClient(node); - if (client == null) { - return null; - } - try { - return ClusterUtils.deserializeMigrationStatus(client.collectMigrationStatus()); - } catch (TException e) { - client.close(); - throw e; - } finally { - client.returnSelf(); - } - } - - @TestOnly - public void setPartitionTable(PartitionTable partitionTable) { - this.partitionTable = partitionTable; - router = new ClusterPlanRouter(partitionTable); - this.coordinator.setRouter(router); - DataGroupEngine dClusterServer = getDataGroupEngine(); - if (dClusterServer != null) { - dClusterServer.setPartitionTable(partitionTable); - } - } - - /** - * Process the request of removing a node from the cluster. Reject the request if partition table - * is unavailable or the node is not the MetaLeader and it does not know who the leader is. - * Otherwise (being the MetaLeader), the request will be processed locally and broadcast to every - * node. - * - * @param node the node to be removed. - */ - public long removeNode(Node node) - throws PartitionTableUnavailableException, LogExecutionException, InterruptedException, - CheckConsistencyException { - if (partitionTable == null) { - logger.info("Cannot add node now because the partition table is not set"); - throw new PartitionTableUnavailableException(thisNode); - } - - waitLeader(); - // try to process the request locally, if it cannot be processed locally, forward it - return processRemoveNodeLocally(node); - } - - /** - * Process a node removal request locally and broadcast it to the whole cluster. The removal will - * be rejected if number of nodes will fall below half of the replication number after this - * operation. - * - * @param node the node to be removed. - * @return Long.MIN_VALUE if further forwarding is required, or the execution result - */ - private long processRemoveNodeLocally(Node node) - throws LogExecutionException, InterruptedException, CheckConsistencyException { - if (character != NodeCharacter.LEADER) { - return Response.RESPONSE_NULL; - } - - // if we cannot have enough replica after the removal, reject it - if (allNodes.size() <= config.getReplicationNum()) { - return Response.RESPONSE_CLUSTER_TOO_SMALL; - } - - if (!waitDataMigrationEnd()) { - return Response.RESPONSE_DATA_MIGRATION_NOT_FINISH; - } - - // find the node to be removed in the node list - Node target = null; - synchronized (allNodes) { - for (Node n : allNodes) { - if (n.internalIp.equals(node.internalIp) && n.metaPort == node.metaPort) { - target = n; - break; - } - } - } - - if (target == null) { - logger.debug("Node {} is not in the cluster", node); - return Response.RESPONSE_REJECT; - } - - RemoveNodeLog removeNodeLog = new RemoveNodeLog(); - // node removal must be serialized to reduce potential concurrency problem - synchronized (logManager) { - // update partition table - PartitionTable table = new SlotPartitionTable((SlotPartitionTable) partitionTable); - table.removeNode(target); - table.setLastMetaLogIndex(logManager.getLastLogIndex() + 1); - - removeNodeLog.setPartitionTable(table.serialize()); - removeNodeLog.setCurrLogTerm(getTerm().get()); - removeNodeLog.setCurrLogIndex(logManager.getLastLogIndex() + 1); - removeNodeLog.setMetaLogIndex(logManager.getLastLogIndex() + 1); - - removeNodeLog.setRemovedNode(target); - - logManager.append(removeNodeLog); - } - - int retryTime = 0; - while (true) { - logger.info( - "{}: Send the node removal request of {} to other nodes, retry time: {}", - name, - target, - retryTime); - AppendLogResult result = sendLogToFollowers(removeNodeLog); - switch (result) { - case OK: - commitLog(removeNodeLog); - logger.info("{}: Removal request of {} is accepted", name, target); - return Response.RESPONSE_AGREE; - case TIME_OUT: - logger.info("{}: Removal request of {} timed out", name, target); - try { - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - retryTime++; - break; - // retry - case LEADERSHIP_STALE: - default: - return Response.RESPONSE_NULL; - } - } - } - - /** - * Remove a node from the node list, partition table and update DataGroupMembers. If the removed - * node is the local node, also stop heartbeat and catch-up service of metadata, but the heartbeat - * and catch-up service of data are kept alive for other nodes to pull data. If the removed node - * is a leader, send an exile to the removed node so that it can know it is removed. - */ - public void applyRemoveNode(RemoveNodeLog removeNodeLog) { - - long startTime = System.currentTimeMillis(); - Node oldNode = removeNodeLog.getRemovedNode(); - synchronized (allNodes) { - if (logger.isDebugEnabled()) { - logger.debug("{}: Removing a node {} from {}", name, oldNode, allNodes); - } - - if (allNodes.contains(oldNode)) { - allNodes.remove(oldNode); - idNodeMap.remove(oldNode.nodeIdentifier); - } - - // save the updated partition table - savePartitionTable(); - - // update DataGroupMembers, as the node is removed, the members of some groups are - // changed and there will also be one less group - NodeRemovalResult result = partitionTable.getNodeRemovalResult(); - getDataGroupEngine().removeNode(oldNode, result); - - // the leader is removed, start the next election ASAP - if (oldNode.equals(leader.get()) && !oldNode.equals(thisNode)) { - synchronized (term) { - setCharacter(NodeCharacter.ELECTOR); - setLeader(null); - } - synchronized (getHeartBeatWaitObject()) { - getHeartBeatWaitObject().notifyAll(); - } - } - - if (oldNode.equals(thisNode)) { - // use super.stop() so that the data server will not be closed because other nodes may - // want to pull data from this node - new Thread( - () -> { - try { - Thread.sleep(ClusterConstant.getHeartbeatIntervalMs()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - // ignore - } - super.stop(); - getDataGroupEngine().stop(); - logger.info("{} has been removed from the cluster", name); - }) - .start(); - } else if (thisNode.equals(leader.get())) { - // as the old node is removed, it cannot know this by heartbeat or log, so it should be - // directly kicked out of the cluster - getAppendLogThreadPool().submit(() -> exileNode(removeNodeLog)); - } - - if (logger.isDebugEnabled()) { - logger.debug("{}: Success to remove a node {} from {}", name, oldNode, allNodes); - } - - logger.info( - "{}: execute removing node {} cost {} ms", - name, - oldNode, - (System.currentTimeMillis()) - startTime); - } - } - - protected void exileNode(RemoveNodeLog removeNodeLog) { - logger.debug("Exile node {}: start.", removeNodeLog.getRemovedNode()); - Node node = removeNodeLog.getRemovedNode(); - if (config.isUseAsyncServer()) { - AsyncMetaClient asyncMetaClient = (AsyncMetaClient) getAsyncClient(node); - if (asyncMetaClient != null) { - try { - asyncMetaClient.exile(removeNodeLog.serialize(), new GenericHandler<>(node, null)); - } catch (TException e) { - logger.warn("Cannot inform {} its removal", node, e); - } - } else { - logger.error("exile node fail for node: {} as empty client", node); - } - } else { - SyncMetaClient client = (SyncMetaClient) getSyncClient(node); - if (client == null) { - return; - } - try { - client.exile(removeNodeLog.serialize()); - } catch (TException e) { - client.close(); - logger.warn("Cannot inform {} its removal", node, e); - } finally { - client.returnSelf(); - } - } - } - - /** - * Generate a report containing the character, leader, term, last log and read-only-status. This - * will help to see if the node is in a consistent and right state during debugging. - */ - public MetaMemberReport genMemberReport() { - long prevLastLogIndex = lastReportedLogIndex; - lastReportedLogIndex = logManager.getLastLogIndex(); - return new MetaMemberReport( - character, - leader.get(), - term.get(), - logManager.getLastLogTerm(), - lastReportedLogIndex, - logManager.getCommitLogIndex(), - logManager.getCommitLogTerm(), - readOnly, - lastHeartbeatReceivedTime, - prevLastLogIndex, - logManager.getMaxHaveAppliedCommitIndex()); - } - - /** - * Collect data migration status of data group in all cluster nodes. - * - * @return key: data group; value: slot num in data migration - */ - public Map collectAllPartitionMigrationStatus() - throws CheckConsistencyException { - syncLeader(null); - Map res = new HashMap<>(); - for (Node node : allNodes) { - if (logger.isDebugEnabled()) { - logger.debug("{}: start to get migration status of {}", name, node); - } - Map oneNodeRes; - if (node.equals(thisNode)) { - oneNodeRes = collectMigrationStatus(); - } else { - oneNodeRes = collectMigrationStatus(node); - } - if (oneNodeRes == null) { - return null; - } - for (Entry entry : oneNodeRes.entrySet()) { - res.put(entry.getKey(), Math.max(res.getOrDefault(entry.getKey(), 0), entry.getValue())); - } - } - return res; - } - - /** - * Collect data migration status of data group in all cluster nodes. - * - * @return key: data group; value: slot num in data migration - */ - public Map collectMigrationStatus() { - logger.info("{}: start to collect migration status locally.", name); - Map groupSlotMap = new HashMap<>(); - if (getPartitionTable() == null) { - return groupSlotMap; - } - Map headerMap = getDataGroupEngine().getHeaderGroupMap(); - syncLocalApply(getPartitionTable().getLastMetaLogIndex(), false); - synchronized (headerMap) { - for (DataGroupMember dataMember : headerMap.values()) { - int num = dataMember.getSlotManager().getSlotNumInDataMigration(); - if (num > 0) { - groupSlotMap.put(dataMember.getPartitionGroup(), num); - } - } - } - return groupSlotMap; - } - - @Override - public void setAllNodes(PartitionGroup allNodes) { - super.setAllNodes(new PartitionGroup(allNodes)); - initPeerMap(); - idNodeMap = new HashMap<>(); - for (Node node : allNodes) { - idNodeMap.put(node.getNodeIdentifier(), node); - } - } - - /** - * Get a local DataGroupMember that is in the group of "header" and should process "request". - * - * @param header the header of the group which the local node is in - * @param request the toString() of this parameter should explain what the request is and it is - * only used in logs for tracing - */ - public DataGroupMember getLocalDataMember(RaftNode header, Object request) { - return getDataGroupEngine().getDataMember(header, null, request); - } - - /** - * Get a local DataGroupMember that is in the group of "header" for an internal request. - * - * @param raftNode the header of the group which the local node is in - */ - public DataGroupMember getLocalDataMember(RaftNode raftNode) { - return getDataGroupEngine().getDataMember(raftNode, null, "Internal call"); - } - - @Override - public void closeLogManager() { - super.closeLogManager(); - if (getDataGroupEngine() != null) { - getDataGroupEngine().closeLogManagers(); - } - } - - public StartUpStatus getStartUpStatus() { - return startUpStatus; - } - - public void setRouter(ClusterPlanRouter router) { - this.router = router; - } - - public void handleHandshake(Node sender) { - NodeStatusManager.getINSTANCE().activate(sender); - } - - @Override - public String getAllNodesAsString() { - return getAllNodes().toString(); - } - - @Override - public String getPartitionTableAsString() { - return partitionTable.toString(); - } - - @Override - public String getBlindNodesAsString() { - return blindNodes.toString(); - } - - @Override - public String getIdNodeMapAsString() { - return idNodeMap.toString(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberMBean.java deleted file mode 100644 index f204fea6de48..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberMBean.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -public interface MetaGroupMemberMBean extends RaftMemberMBean { - - String getPartitionTableAsString(); - - boolean isReady(); - - String getAllNodesAsString(); - - String getCharacterAsString(); - - String getBlindNodesAsString(); - - String getIdNodeMapAsString(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMember.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMember.java deleted file mode 100644 index 770c407907cd..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMember.java +++ /dev/null @@ -1,2219 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.CommitLogCallback; -import org.apache.iotdb.cluster.log.CommitLogTask; -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogDispatcher; -import org.apache.iotdb.cluster.log.LogDispatcher.SendLogRequest; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.catchup.CatchUpTask; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.handlers.caller.AppendNodeEntryHandler; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.server.monitor.Timer.Statistic; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.cluster.utils.PlanSerializer; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.concurrent.IoTThreadFactory; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.IoTDBException; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.BatchProcessException; -import org.apache.iotdb.db.exception.metadata.PathAlreadyExistException; -import org.apache.iotdb.db.exception.metadata.PathNotExistException; -import org.apache.iotdb.db.exception.metadata.StorageGroupAlreadySetException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.metadata.template.DuplicatedTemplateException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.LogPlan; -import org.apache.iotdb.rpc.RpcUtils; -import org.apache.iotdb.rpc.TSStatusCode; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.SocketTimeoutException; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.ConcurrentModificationException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.apache.iotdb.cluster.config.ClusterConstant.THREAD_POLL_WAIT_TERMINATION_TIME_S; - -/** - * RaftMember process the common raft logic like leader election, log appending, catch-up and so on. - */ -@SuppressWarnings("java:S3077") // reference volatile is enough -public abstract class RaftMember implements RaftMemberMBean { - private static final Logger logger = LoggerFactory.getLogger(RaftMember.class); - public static final boolean USE_LOG_DISPATCHER = false; - - private static final String MSG_FORWARD_TIMEOUT = "{}: Forward {} to {} time out"; - private static final String MSG_FORWARD_ERROR = - "{}: encountered an error when forwarding {} to" + " {}"; - private static final String MSG_NO_LEADER_COMMIT_INDEX = - "{}: Cannot request commit index from {}"; - private static final String MSG_NO_LEADER_IN_SYNC = "{}: No leader is found when synchronizing"; - public static final String MSG_LOG_IS_ACCEPTED = "{}: log {} is accepted"; - /** - * when there is no leader, wait for waitLeaderTimeMs before return a NoLeader response to the - * client. - */ - private static long waitLeaderTimeMs = 60 * 1000L; - - /** - * when the leader of this node changes, the condition will be notified so other threads that wait - * on this may be woken. - */ - private final Object waitLeaderCondition = new Object(); - /** the lock is to make sure that only one thread can apply snapshot at the same time */ - private final Lock snapshotApplyLock = new ReentrantLock(); - - private final Object heartBeatWaitObject = new Object(); - - protected Node thisNode = ClusterIoTDB.getInstance().getThisNode(); - - /** the nodes that belong to the same raft group as thisNode. */ - protected PartitionGroup allNodes; - - ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - /** the name of the member, to distinguish several members in the logs. */ - String name; - /** to choose nodes to send request of joining cluster randomly. */ - Random random = new Random(); - /** when the node is a leader, this map is used to track log progress of each follower. */ - Map peerMap; - /** - * the current term of the node, this object also works as lock of some transactions of the member - * like elections. - */ - AtomicLong term = new AtomicLong(0); - - volatile NodeCharacter character = NodeCharacter.ELECTOR; - AtomicReference leader = new AtomicReference<>(ClusterConstant.EMPTY_NODE); - /** - * the node that thisNode has voted for in this round of election, which prevents a node voting - * twice in a single election. - */ - volatile Node voteFor; - /** - * when this node is a follower, this records the unix-epoch timestamp when the last heartbeat - * arrived, and is reported in the timed member report to show how long the leader has been - * offline. - */ - volatile long lastHeartbeatReceivedTime; - /** the raft logs are all stored and maintained in the log manager */ - RaftLogManager logManager; - /** - * the single thread pool that runs the heartbeat thread, which send heartbeats to the follower - * when this node is a leader, or start elections when this node is an elector. - */ - ExecutorService heartBeatService; - /** - * if set to true, the node will reject all log appends when the header of a group is removed from - * the cluster, the members of the group should no longer accept writes, but they still can be - * candidates for weak consistency reads and provide snapshots for the new data holders - */ - volatile boolean readOnly = false; - /** - * lastLogIndex when generating the previous member report, to show the log ingestion rate of the - * member by comparing it with the current last log index. - */ - long lastReportedLogIndex; - /** the thread pool that runs catch-up tasks */ - private ExecutorService catchUpService; - /** - * lastCatchUpResponseTime records when is the latest response of each node's catch-up. There - * should be only one catch-up task for each node to avoid duplication, but the task may time out - * or the task may corrupt unexpectedly, and in that case, the next catch up should be enabled. So - * if we find a catch-up task that does not respond for long, we will start a new one instead of - * waiting for the previous one to finish. - */ - private Map lastCatchUpResponseTime = new ConcurrentHashMap<>(); - /** - * client manager that provides reusable Thrift clients to connect to other RaftMembers and - * execute RPC requests. It will be initialized according to the implementation of the subclasses - */ - private ClientManager clientManager; - /** - * when the commit progress is updated by a heartbeat, this object is notified so that we may know - * if this node is up-to-date with the leader, and whether the given consistency is reached - */ - private Object syncLock = new Object(); - /** - * when this node sends logs to the followers, the send is performed in parallel in this pool, so - * that a slow or unavailable node will not block other nodes. - */ - private ExecutorService appendLogThreadPool; - /** - * when using sync server, this thread pool is used to convert serial operations (like sending - * heartbeats and asking for votes) into paralleled ones, so the process will not be blocked by - * one slow node. - */ - private ExecutorService serialToParallelPool; - /** a thread pool that is used to do commit log tasks asynchronous in heartbeat thread */ - private ExecutorService commitLogPool; - - /** - * logDispatcher buff the logs orderly according to their log indexes and send them sequentially, - * which avoids the followers receiving out-of-order logs, forcing them to wait for previous logs. - */ - private LogDispatcher logDispatcher; - - /** - * If this node can not be the leader, this parameter will be set true. This field must be true - * only after all necessary threads are ready - */ - private volatile boolean skipElection = true; - - /** - * localExecutor is used to directly execute plans like load configuration in the underlying IoTDB - */ - protected PlanExecutor localExecutor; - - protected RaftMember() {} - - protected RaftMember(String name, ClientManager clientManager) { - this.name = name; - this.clientManager = clientManager; - } - - /** - * Start the heartbeat thread and the catch-up thread pool. Calling the method twice does not - * induce side effects. - */ - public void start() { - if (heartBeatService != null) { - return; - } - - startBackGroundThreads(); - setSkipElection(false); - logger.info("{} started", name); - } - - void startBackGroundThreads() { - heartBeatService = IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(name + "-Heartbeat"); - - catchUpService = IoTDBThreadPoolFactory.newCachedThreadPool(name + "-CatchUp"); - appendLogThreadPool = - IoTDBThreadPoolFactory.newFixedThreadPool( - Runtime.getRuntime().availableProcessors() * 10, name + "-AppendLog"); - serialToParallelPool = - IoTDBThreadPoolFactory.newThreadPool( - allNodes.size(), - Math.max(allNodes.size(), Runtime.getRuntime().availableProcessors()), - 1000L, - TimeUnit.MILLISECONDS, - new LinkedBlockingQueue<>(), - new IoTThreadFactory(getName() + "-SerialToParallel"), - getName() + "-SerialToParallel"); - commitLogPool = IoTDBThreadPoolFactory.newSingleThreadExecutor("RaftCommitLog"); - } - - public String getName() { - return name; - } - - public RaftLogManager getLogManager() { - return logManager; - } - - @TestOnly - public void setLogManager(RaftLogManager logManager) { - if (this.logManager != null) { - this.logManager.close(); - } - this.logManager = logManager; - } - - /** - * Stop the heartbeat thread and the catch-up thread pool. Calling the method twice does not - * induce side effects. - */ - public void stop() { - setSkipElection(true); - closeLogManager(); - if (heartBeatService == null) { - return; - } - - heartBeatService.shutdownNow(); - catchUpService.shutdownNow(); - appendLogThreadPool.shutdownNow(); - try { - heartBeatService.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - catchUpService.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - appendLogThreadPool.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error( - "Unexpected interruption when waiting for heartBeatService and catchUpService " - + "to end", - e); - } - if (serialToParallelPool != null) { - serialToParallelPool.shutdownNow(); - try { - serialToParallelPool.awaitTermination( - THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for asyncThreadPool to end", e); - } - } - - if (commitLogPool != null) { - commitLogPool.shutdownNow(); - try { - commitLogPool.awaitTermination(THREAD_POLL_WAIT_TERMINATION_TIME_S, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for commitLogPool to end", e); - } - } - leader.set(ClusterConstant.EMPTY_NODE); - catchUpService = null; - heartBeatService = null; - appendLogThreadPool = null; - logger.info("Member {} stopped", name); - } - - public void closeLogManager() { - if (logManager != null) { - logManager.close(); - } - } - - /** - * Process the HeartBeatRequest from the leader. If the term of the leader is smaller than the - * local term, reject the request by telling it the newest term. Else if the local logs are - * consistent with the leader's, commit them. Else help the leader find the last matched log. Also - * update the leadership, heartbeat timer and term of the local node. - */ - public HeartBeatResponse processHeartbeatRequest(HeartBeatRequest request) { - logger.trace("{} received a heartbeat", name); - synchronized (term) { - long thisTerm = term.get(); - long leaderTerm = request.getTerm(); - HeartBeatResponse response = new HeartBeatResponse(); - - if (leaderTerm < thisTerm) { - // a leader with a term lower than this node is invalid, send it the local term to inform - // it to resign - response.setTerm(thisTerm); - if (logger.isDebugEnabled()) { - logger.debug("{} received a heartbeat from a stale leader {}", name, request.getLeader()); - } - } else { - // try updating local term - stepDown(leaderTerm, true); - setLeader(request.getLeader()); - if (character != NodeCharacter.FOLLOWER) { - // interrupt current election - term.notifyAll(); - } - - // the heartbeat comes from a valid leader, process it with the sub-class logic - processValidHeartbeatReq(request, response); - - response.setTerm(Response.RESPONSE_AGREE); - // tell the leader who I am in case of catch-up - response.setFollower(thisNode); - // tell the leader the local log progress so it may decide whether to perform a catch up - response.setLastLogIndex(logManager.getLastLogIndex()); - response.setLastLogTerm(logManager.getLastLogTerm()); - // if the snapshot apply lock is held, it means that a snapshot is installing now. - boolean isFree = snapshotApplyLock.tryLock(); - if (isFree) { - snapshotApplyLock.unlock(); - } - response.setInstallingSnapshot(!isFree); - if (logger.isDebugEnabled()) { - logger.debug( - "{}: log commit log index = {}, max have applied commit index = {}", - name, - logManager.getCommitLogIndex(), - logManager.getMaxHaveAppliedCommitIndex()); - } - - tryUpdateCommitIndex(leaderTerm, request.getCommitLogIndex(), request.getCommitLogTerm()); - - if (logger.isTraceEnabled()) { - logger.trace("{} received heartbeat from a valid leader {}", name, request.getLeader()); - } - } - return response; - } - } - - private void tryUpdateCommitIndex(long leaderTerm, long commitIndex, long commitTerm) { - if (leaderTerm >= term.get() && logManager.getCommitLogIndex() < commitIndex) { - // there are more local logs that can be committed, commit them in a ThreadPool so the - // heartbeat response will not be blocked - CommitLogTask commitLogTask = new CommitLogTask(logManager, commitIndex, commitTerm); - commitLogTask.registerCallback(new CommitLogCallback(this)); - // if the log is not consistent, the commitment will be blocked until the leader makes the - // node catch up - if (commitLogPool != null && !commitLogPool.isShutdown()) { - commitLogPool.submit(commitLogTask); - } - - logger.debug( - "{}: Inconsistent log found, leaderCommit: {}-{}, localCommit: {}-{}, " - + "localLast: {}-{}", - name, - commitIndex, - commitTerm, - logManager.getCommitLogIndex(), - logManager.getCommitLogTerm(), - logManager.getLastLogIndex(), - logManager.getLastLogTerm()); - } - } - - /** - * Process an ElectionRequest. If the request comes from the last leader, accept it. Else decide - * whether to accept by examining the log status of the elector. - */ - public long processElectionRequest(ElectionRequest electionRequest) { - if (logger.isDebugEnabled()) { - logger.debug( - "{}: start to handle request from elector {}", name, electionRequest.getElector()); - } - synchronized (term) { - long currentTerm = term.get(); - long response = - checkElectorTerm(currentTerm, electionRequest.getTerm(), electionRequest.getElector()); - if (response != Response.RESPONSE_AGREE) { - return response; - } - - // compare the log progress of the elector with this node - response = checkElectorLogProgress(electionRequest); - logger.info( - "{} sending response {} to the elector {}", name, response, electionRequest.getElector()); - return response; - } - } - - private long checkElectorTerm(long currentTerm, long electorTerm, Node elector) { - if (electorTerm < currentTerm) { - // the elector has a smaller term thus the request is invalid - logger.info( - "{} sending localTerm {} to the elector {} because it's term {} is smaller.", - name, - currentTerm, - elector, - electorTerm); - return currentTerm; - } - if (currentTerm == electorTerm && voteFor != null && !Objects.equals(voteFor, elector)) { - // this node has voted in this round, but not for the elector, as one node cannot vote - // twice, reject the request - logger.info( - "{} sending rejection to the elector {} because member already has voted {} in this term {}.", - name, - elector, - voteFor, - currentTerm); - return Response.RESPONSE_REJECT; - } - if (electorTerm > currentTerm) { - // the elector has a larger term, this node should update its term first - logger.info( - "{} received an election from elector {} which has bigger term {} than localTerm {}, raftMember should step down first and then continue to decide whether to grant it's vote by log status.", - name, - elector, - electorTerm, - currentTerm); - stepDown(electorTerm, false); - } - return Response.RESPONSE_AGREE; - } - - /** - * Process an AppendEntryRequest. First check the term of the leader, then parse the log and - * finally see if we can find a position to append the log. - */ - public long appendEntry(AppendEntryRequest request) throws UnknownLogTypeException { - logger.debug("{} received an AppendEntryRequest: {}", name, request); - // the term checked here is that of the leader, not that of the log - long checkResult = checkRequestTerm(request.term, request.leader); - if (checkResult != Response.RESPONSE_AGREE) { - return checkResult; - } - - long startTime = Timer.Statistic.RAFT_RECEIVER_LOG_PARSE.getOperationStartTime(); - int logByteSize = request.getEntry().length; - Log log = LogParser.getINSTANCE().parse(request.entry); - log.setByteSize(logByteSize); - Timer.Statistic.RAFT_RECEIVER_LOG_PARSE.calOperationCostTimeFromStart(startTime); - - long result = appendEntry(request.prevLogIndex, request.prevLogTerm, request.leaderCommit, log); - logger.debug("{} AppendEntryRequest of {} completed with result {}", name, log, result); - - return result; - } - - /** Similar to appendEntry, while the incoming load is batch of logs instead of a single log. */ - public long appendEntries(AppendEntriesRequest request) throws UnknownLogTypeException { - logger.debug("{} received an AppendEntriesRequest", name); - - // the term checked here is that of the leader, not that of the log - long checkResult = checkRequestTerm(request.term, request.leader); - if (checkResult != Response.RESPONSE_AGREE) { - return checkResult; - } - - long response; - List logs = new ArrayList<>(); - int logByteSize = 0; - long startTime = Timer.Statistic.RAFT_RECEIVER_LOG_PARSE.getOperationStartTime(); - for (ByteBuffer buffer : request.getEntries()) { - buffer.mark(); - Log log; - logByteSize = buffer.limit() - buffer.position(); - try { - log = LogParser.getINSTANCE().parse(buffer); - log.setByteSize(logByteSize); - } catch (BufferUnderflowException e) { - buffer.reset(); - throw e; - } - logs.add(log); - } - - Timer.Statistic.RAFT_RECEIVER_LOG_PARSE.calOperationCostTimeFromStart(startTime); - - response = appendEntries(request.prevLogIndex, request.prevLogTerm, request.leaderCommit, logs); - if (logger.isDebugEnabled()) { - logger.debug( - "{} AppendEntriesRequest of log size {} completed with result {}", - name, - request.getEntries().size(), - response); - } - return response; - } - - public PlanExecutor getLocalExecutor() throws QueryProcessException { - if (localExecutor == null) { - localExecutor = new PlanExecutor(); - } - return localExecutor; - } - - public void sendLogAsync( - Log log, - AtomicInteger voteCounter, - Node node, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - AppendEntryRequest request, - Peer peer) { - AsyncClient client = getSendLogAsyncClient(node); - if (client != null) { - AppendNodeEntryHandler handler = - getAppendNodeEntryHandler(log, voteCounter, node, leaderShipStale, newLeaderTerm, peer); - try { - client.appendEntry(request, handler); - logger.debug("{} sending a log to {}: {}", name, node, log); - } catch (Exception e) { - logger.warn("{} cannot append log to node {}", name, node, e); - } - } - } - - public NodeCharacter getCharacter() { - return character; - } - - public String getCharacterAsString() { - return character.toString(); - } - - public void setCharacter(NodeCharacter character) { - if (!Objects.equals(character, this.character)) { - logger.info("{} has become a {}", name, character); - this.character = character; - } - } - - public long getLastHeartbeatReceivedTime() { - return lastHeartbeatReceivedTime; - } - - public void setLastHeartbeatReceivedTime(long lastHeartbeatReceivedTime) { - this.lastHeartbeatReceivedTime = lastHeartbeatReceivedTime; - } - - public Node getLeader() { - return leader.get(); - } - - public void setLeader(Node leader) { - if (!Objects.equals(leader, this.leader.get())) { - if (ClusterConstant.EMPTY_NODE.equals(leader) || leader == null) { - logger.info("{} has been set to null in term {}", getName(), term.get()); - } else if (!Objects.equals(leader, this.thisNode)) { - logger.info("{} has become a follower of {} in term {}", getName(), leader, term.get()); - } - synchronized (waitLeaderCondition) { - if (leader == null) { - this.leader.set(ClusterConstant.EMPTY_NODE); - } else { - this.leader.set(leader); - } - if (!ClusterConstant.EMPTY_NODE.equals(this.leader.get())) { - waitLeaderCondition.notifyAll(); - } - } - } - } - - public Collection getAllNodes() { - return allNodes; - } - - public PartitionGroup getPartitionGroup() { - return allNodes; - } - - public void setAllNodes(PartitionGroup allNodes) { - this.allNodes = allNodes; - } - - public Map getLastCatchUpResponseTime() { - return lastCatchUpResponseTime; - } - - /** Sub-classes will add their own process of HeartBeatResponse in this method. */ - public void processValidHeartbeatResp(HeartBeatResponse response, Node receiver) {} - - /** The actions performed when the node wins in an election (becoming a leader). */ - public void onElectionWins() {} - - /** - * Update the followers' log by sending logs whose index >= followerLastMatchedLogIndex to the - * follower. If some of the required logs are removed, also send the snapshot.
- * notice that if a part of data is in the snapshot, then it is not in the logs. - */ - public void catchUp(Node follower, long lastLogIdx) { - // for one follower, there is at most one ongoing catch-up, so the same data will not be sent - // twice to the node - synchronized (catchUpService) { - // check if the last catch-up is still ongoing and does not time out yet - Long lastCatchupResp = lastCatchUpResponseTime.get(follower); - if (lastCatchupResp != null - && System.currentTimeMillis() - lastCatchupResp < config.getCatchUpTimeoutMS()) { - logger.debug("{}: last catch up of {} is ongoing", name, follower); - return; - } else { - // record the start of the catch-up - lastCatchUpResponseTime.put(follower, System.currentTimeMillis()); - } - } - logger.info("{}: Start to make {} catch up", name, follower); - if (!catchUpService.isShutdown()) { - Future future = - catchUpService.submit( - new CatchUpTask(follower, getRaftGroupId(), peerMap.get(follower), this, lastLogIdx)); - catchUpService.submit( - () -> { - try { - future.get(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (ExecutionException e) { - logger.error("{}: Catch up task exits with unexpected exception", name, e); - } - }); - } - } - - /** - * If the node is not a leader, the request will be sent to the leader or reports an error if - * there is no leader. Otherwise execute the plan locally (whether to send it to followers depends - * on the type of the plan). - */ - public TSStatus executeNonQueryPlan(ExecutNonQueryReq request) - throws IOException, IllegalPathException { - // process the plan locally - PhysicalPlan plan = PhysicalPlan.Factory.create(request.planBytes); - - TSStatus answer = executeNonQueryPlan(plan); - logger.debug("{}: Received a plan {}, executed answer: {}", name, plan, answer); - return answer; - } - - /** - * Execute a non-query plan. Subclass may have their individual implements. - * - * @param plan a non-query plan. - * @return A TSStatus indicating the execution result. - */ - abstract TSStatus executeNonQueryPlan(PhysicalPlan plan); - - abstract ClientCategory getClientCategory(); - - /** - * according to the consistency configuration, decide whether to execute syncLeader or not and - * throws exception when failed. Note that the write request will always try to sync leader - */ - public void syncLeaderWithConsistencyCheck(boolean isWriteRequest) - throws CheckConsistencyException { - if (isWriteRequest) { - syncLeader(new StrongCheckConsistency()); - } else { - switch (config.getConsistencyLevel()) { - case STRONG_CONSISTENCY: - syncLeader(new StrongCheckConsistency()); - return; - case MID_CONSISTENCY: - // if leaderCommitId bigger than localAppliedId a value, - // will throw CHECK_MID_CONSISTENCY_EXCEPTION - syncLeader(new MidCheckConsistency()); - return; - case WEAK_CONSISTENCY: - // do nothing - return; - default: - // this should not happen in theory - throw new CheckConsistencyException( - "unknown consistency=" + config.getConsistencyLevel().name()); - } - } - } - - public String getMBeanName() { - return String.format( - "%s:%s=%s", "org.apache.iotdb.cluster.service", IoTDBConstant.JMX_TYPE, "Engine"); - } - - /** call back after syncLeader */ - public interface CheckConsistency { - - /** - * deal leaderCommitId and localAppliedId after syncLeader - * - * @param leaderCommitId leader commit id - * @param localAppliedId local applied id - * @throws CheckConsistencyException maybe throw CheckConsistencyException, which is defined in - * implements. - */ - void postCheckConsistency(long leaderCommitId, long localAppliedId) - throws CheckConsistencyException; - } - - public static class MidCheckConsistency implements CheckConsistency { - - /** - * if leaderCommitId - localAppliedId > MaxReadLogLag, will throw - * CHECK_MID_CONSISTENCY_EXCEPTION - * - * @param leaderCommitId leader commit id - * @param localAppliedId local applied id - * @throws CheckConsistencyException - */ - @Override - public void postCheckConsistency(long leaderCommitId, long localAppliedId) - throws CheckConsistencyException { - if (leaderCommitId == Long.MAX_VALUE - || leaderCommitId == Long.MIN_VALUE - || leaderCommitId - localAppliedId - > ClusterDescriptor.getInstance().getConfig().getMaxReadLogLag()) { - throw CheckConsistencyException.CHECK_MID_CONSISTENCY_EXCEPTION; - } - } - } - - public static class StrongCheckConsistency implements CheckConsistency { - - /** - * if leaderCommitId > localAppliedId, will throw CHECK_STRONG_CONSISTENCY_EXCEPTION - * - * @param leaderCommitId leader commit id - * @param localAppliedId local applied id - * @throws CheckConsistencyException - */ - @Override - public void postCheckConsistency(long leaderCommitId, long localAppliedId) - throws CheckConsistencyException { - if (leaderCommitId > localAppliedId - || leaderCommitId == Long.MAX_VALUE - || leaderCommitId == Long.MIN_VALUE) { - throw CheckConsistencyException.CHECK_STRONG_CONSISTENCY_EXCEPTION; - } - } - } - - /** - * Request and check the leader's commitId to see whether this node has caught up. If not, wait - * until this node catches up. - * - * @param checkConsistency check after syncleader - * @return true if the node has caught up, false otherwise - * @throws CheckConsistencyException if leaderCommitId bigger than localAppliedId a threshold - * value after timeout - */ - public boolean syncLeader(CheckConsistency checkConsistency) throws CheckConsistencyException { - if (character == NodeCharacter.LEADER) { - return true; - } - waitLeader(); - if (leader.get() == null || ClusterConstant.EMPTY_NODE.equals(leader.get())) { - // the leader has not been elected, we must assume the node falls behind - logger.warn(MSG_NO_LEADER_IN_SYNC, name); - return false; - } - if (character == NodeCharacter.LEADER) { - return true; - } - logger.debug("{}: try synchronizing with the leader {}", name, leader.get()); - return waitUntilCatchUp(checkConsistency); - } - - /** Wait until the leader of this node becomes known or time out. */ - public void waitLeader() { - long startTime = System.currentTimeMillis(); - while (leader.get() == null || ClusterConstant.EMPTY_NODE.equals(leader.get())) { - synchronized (waitLeaderCondition) { - try { - waitLeaderCondition.wait(10); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Unexpected interruption when waiting for a leader", e); - } - } - long consumedTime = System.currentTimeMillis() - startTime; - if (consumedTime >= getWaitLeaderTimeMs()) { - logger.warn("{}: leader is still offline after {}ms", name, consumedTime); - break; - } - } - logger.debug("{}: current leader is {}", name, leader.get()); - } - - /** - * Request the leader's commit index and wait until the local commit index becomes not less than - * it. - * - * @return true if this node has caught up before timeout, false otherwise - * @throws CheckConsistencyException if leaderCommitId bigger than localAppliedId a threshold - * value after timeout - */ - protected boolean waitUntilCatchUp(CheckConsistency checkConsistency) - throws CheckConsistencyException { - long leaderCommitId = Long.MIN_VALUE; - RequestCommitIndexResponse response; - try { - response = config.isUseAsyncServer() ? requestCommitIdAsync() : requestCommitIdSync(); - leaderCommitId = response.getCommitLogIndex(); - - tryUpdateCommitIndex( - response.getTerm(), response.getCommitLogIndex(), response.getCommitLogTerm()); - - return syncLocalApply(leaderCommitId, true); - } catch (TException e) { - logger.error(MSG_NO_LEADER_COMMIT_INDEX, name, leader.get(), e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error(MSG_NO_LEADER_COMMIT_INDEX, name, leader.get(), e); - } finally { - if (checkConsistency != null) { - checkConsistency.postCheckConsistency( - leaderCommitId, logManager.getMaxHaveAppliedCommitIndex()); - } - } - logger.debug("Start to sync with leader, leader commit id is {}", leaderCommitId); - return false; - } - - /** - * sync local applyId to leader commitId - * - * @param leaderCommitId leader commit id - * @param fastFail if enable, when log differ too much, return false directly. - * @return true if leaderCommitId <= localAppliedId - */ - public boolean syncLocalApply(long leaderCommitId, boolean fastFail) { - long startTime = System.currentTimeMillis(); - long waitedTime = 0; - long localAppliedId; - - if (fastFail) { - if (leaderCommitId - logManager.getMaxHaveAppliedCommitIndex() > config.getMaxSyncLogLag()) { - logger.info( - "{}: The raft log of this member is too backward to provide service directly.", name); - return false; - } - } - - while (waitedTime < ClusterConstant.getSyncLeaderMaxWaitMs()) { - try { - localAppliedId = logManager.getMaxHaveAppliedCommitIndex(); - logger.debug("{}: synchronizing commitIndex {}/{}", name, localAppliedId, leaderCommitId); - if (leaderCommitId <= localAppliedId) { - // this node has caught up - if (logger.isDebugEnabled()) { - waitedTime = System.currentTimeMillis() - startTime; - logger.debug( - "{}: synchronized to target index {} after {}ms", name, leaderCommitId, waitedTime); - } - return true; - } - // wait for next heartbeat to catch up - // the local node will not perform a commit here according to the leaderCommitId because - // the node may have some inconsistent logs with the leader - waitedTime = System.currentTimeMillis() - startTime; - synchronized (syncLock) { - syncLock.wait(ClusterConstant.getHeartbeatIntervalMs()); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error(MSG_NO_LEADER_COMMIT_INDEX, name, leader.get(), e); - } - } - logger.warn( - "{}: Failed to synchronize to target index {} after {}ms", - name, - leaderCommitId, - waitedTime); - return false; - } - - /** - * Create a log for "plan" and append it locally and to all followers. Only the group leader can - * call this method. Will commit the log locally and send it to followers - * - * @return OK if over half of the followers accept the log or null if the leadership is lost - * during the appending - */ - TSStatus processPlanLocally(PhysicalPlan plan) { - if (USE_LOG_DISPATCHER) { - return processPlanLocallyV2(plan); - } - - logger.debug("{}: Processing plan {}", name, plan); - if (readOnly && !(plan instanceof LogPlan)) { - return StatusUtils.NODE_READ_ONLY; - } - long startTime = Timer.Statistic.RAFT_SENDER_APPEND_LOG.getOperationStartTime(); - - Log log; - - if (plan instanceof LogPlan) { - try { - log = LogParser.getINSTANCE().parse(((LogPlan) plan).getLog()); - } catch (UnknownLogTypeException e) { - logger.error("Can not parse LogPlan {}", plan, e); - return StatusUtils.PARSE_LOG_ERROR; - } - } else { - log = new PhysicalPlanLog(); - ((PhysicalPlanLog) log).setPlan(plan); - } - - // if a single log exceeds the threshold - // we need to return error code to the client as in server mode - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence() - && log.serialize().capacity() + Integer.BYTES - >= ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize()) { - logger.error( - "Log cannot fit into buffer, please increase raft_log_buffer_size;" - + "or reduce the size of requests you send."); - return StatusUtils.INTERNAL_ERROR; - } - - long startWaitingTime = System.currentTimeMillis(); - while (true) { - // assign term and index to the new log and append it - synchronized (logManager) { - if (logManager.getLastLogIndex() - logManager.getCommitLogIndex() - <= config.getUnCommittedRaftLogNumForRejectThreshold()) { - if (!(plan instanceof LogPlan)) { - plan.setIndex(logManager.getLastLogIndex() + 1); - } - log.setCurrLogTerm(getTerm().get()); - log.setCurrLogIndex(logManager.getLastLogIndex() + 1); - logManager.append(log); - break; - } - } - try { - TimeUnit.MILLISECONDS.sleep( - IoTDBDescriptor.getInstance().getConfig().getCheckPeriodWhenInsertBlocked()); - if (System.currentTimeMillis() - startWaitingTime - > IoTDBDescriptor.getInstance().getConfig().getMaxWaitingTimeWhenInsertBlocked()) { - return StatusUtils.getStatus(TSStatusCode.WRITE_PROCESS_REJECT); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - Timer.Statistic.RAFT_SENDER_APPEND_LOG.calOperationCostTimeFromStart(startTime); - - try { - if (appendLogInGroup(log)) { - return StatusUtils.OK; - } - } catch (LogExecutionException e) { - return handleLogExecutionException(log, IOUtils.getRootCause(e)); - } - return StatusUtils.TIME_OUT; - } - - private TSStatus processPlanLocallyV2(PhysicalPlan plan) { - logger.debug("{}: Processing plan {}", name, plan); - if (readOnly) { - return StatusUtils.NODE_READ_ONLY; - } - // assign term and index to the new log and append it - SendLogRequest sendLogRequest; - - Log log; - if (plan instanceof LogPlan) { - try { - log = LogParser.getINSTANCE().parse(((LogPlan) plan).getLog()); - } catch (UnknownLogTypeException e) { - logger.error("Can not parse LogPlan {}", plan, e); - return StatusUtils.PARSE_LOG_ERROR; - } - } else { - log = new PhysicalPlanLog(); - ((PhysicalPlanLog) log).setPlan(plan); - } - - // just like processPlanLocally,we need to check the size of log - if (ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence() - && log.serialize().capacity() + Integer.BYTES - >= ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize()) { - logger.error( - "Log cannot fit into buffer, please increase raft_log_buffer_size;" - + "or reduce the size of requests you send."); - return StatusUtils.INTERNAL_ERROR; - } - long startTime = - Statistic.RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_APPEND_V2.getOperationStartTime(); - long startWaitingTime = System.currentTimeMillis(); - while (true) { - synchronized (logManager) { - if (!IoTDBDescriptor.getInstance().getConfig().isEnableMemControl() - || (logManager.getLastLogIndex() - logManager.getCommitLogIndex() - <= config.getUnCommittedRaftLogNumForRejectThreshold())) { - Statistic.RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_APPEND_V2.calOperationCostTimeFromStart( - startTime); - if (!(plan instanceof LogPlan)) { - plan.setIndex(logManager.getLastLogIndex() + 1); - } - log.setCurrLogTerm(getTerm().get()); - log.setCurrLogIndex(logManager.getLastLogIndex() + 1); - startTime = Timer.Statistic.RAFT_SENDER_APPEND_LOG_V2.getOperationStartTime(); - logManager.append(log); - Timer.Statistic.RAFT_SENDER_APPEND_LOG_V2.calOperationCostTimeFromStart(startTime); - startTime = Statistic.RAFT_SENDER_BUILD_LOG_REQUEST.getOperationStartTime(); - sendLogRequest = buildSendLogRequest(log); - Statistic.RAFT_SENDER_BUILD_LOG_REQUEST.calOperationCostTimeFromStart(startTime); - break; - } - } - try { - TimeUnit.MILLISECONDS.sleep( - IoTDBDescriptor.getInstance().getConfig().getCheckPeriodWhenInsertBlocked()); - if (System.currentTimeMillis() - startWaitingTime - > IoTDBDescriptor.getInstance().getConfig().getMaxWaitingTimeWhenInsertBlocked()) { - return StatusUtils.getStatus(TSStatusCode.WRITE_PROCESS_REJECT); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - startTime = Statistic.RAFT_SENDER_OFFER_LOG.getOperationStartTime(); - log.setCreateTime(System.nanoTime()); - getLogDispatcher().offer(sendLogRequest); - Statistic.RAFT_SENDER_OFFER_LOG.calOperationCostTimeFromStart(startTime); - - try { - AppendLogResult appendLogResult = - waitAppendResult( - sendLogRequest.getVoteCounter(), - sendLogRequest.getLeaderShipStale(), - sendLogRequest.getNewLeaderTerm()); - Timer.Statistic.RAFT_SENDER_LOG_FROM_CREATE_TO_ACCEPT.calOperationCostTimeFromStart( - sendLogRequest.getLog().getCreateTime()); - switch (appendLogResult) { - case OK: - logger.debug(MSG_LOG_IS_ACCEPTED, name, log); - startTime = Timer.Statistic.RAFT_SENDER_COMMIT_LOG.getOperationStartTime(); - commitLog(log); - Timer.Statistic.RAFT_SENDER_COMMIT_LOG.calOperationCostTimeFromStart(startTime); - return StatusUtils.OK; - case TIME_OUT: - logger.debug("{}: log {} timed out...", name, log); - break; - case LEADERSHIP_STALE: - // abort the appending, the new leader will fix the local logs by catch-up - default: - break; - } - } catch (LogExecutionException e) { - return handleLogExecutionException(log, IOUtils.getRootCause(e)); - } - return StatusUtils.TIME_OUT; - } - - public SendLogRequest buildSendLogRequest(Log log) { - AtomicInteger voteCounter = new AtomicInteger(allNodes.size() / 2); - AtomicBoolean leaderShipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(term.get()); - - long startTime = Statistic.RAFT_SENDER_BUILD_APPEND_REQUEST.getOperationStartTime(); - AppendEntryRequest appendEntryRequest = buildAppendEntryRequest(log, false); - Statistic.RAFT_SENDER_BUILD_APPEND_REQUEST.calOperationCostTimeFromStart(startTime); - - return new SendLogRequest(log, voteCounter, leaderShipStale, newLeaderTerm, appendEntryRequest); - } - - /** - * The maximum time to wait if there is no leader in the group, after which a - * LeadNotFoundException will be thrown. - */ - static long getWaitLeaderTimeMs() { - return waitLeaderTimeMs; - } - - static void setWaitLeaderTimeMs(long waitLeaderTimeMs) { - RaftMember.waitLeaderTimeMs = waitLeaderTimeMs; - } - - @SuppressWarnings("java:S2274") // enable timeout - protected RequestCommitIndexResponse requestCommitIdAsync() - throws TException, InterruptedException { - // use Long.MAX_VALUE to indicate a timeout - RequestCommitIndexResponse response = - new RequestCommitIndexResponse(Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE); - AtomicReference commitIdResult = new AtomicReference<>(response); - AsyncClient client = getAsyncClient(leader.get()); - if (client == null) { - // cannot connect to the leader - logger.warn(MSG_NO_LEADER_IN_SYNC, name); - return commitIdResult.get(); - } - synchronized (commitIdResult) { - client.requestCommitIndex(getHeader(), new GenericHandler<>(leader.get(), commitIdResult)); - commitIdResult.wait(ClusterConstant.getReadOperationTimeoutMS()); - } - return commitIdResult.get(); - } - - private RequestCommitIndexResponse requestCommitIdSync() throws TException { - Client client = getSyncClient(leader.get()); - RequestCommitIndexResponse response; - if (client == null) { - // cannot connect to the leader - logger.warn(MSG_NO_LEADER_IN_SYNC, name); - // use Long.MAX_VALUE to indicate a timeouts - response = new RequestCommitIndexResponse(Long.MAX_VALUE, Long.MAX_VALUE, Long.MAX_VALUE); - return response; - } - try { - response = client.requestCommitIndex(getHeader()); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } finally { - ClientUtils.putBackSyncClient(client); - } - return response; - } - - /** - * Tell the requester the current commit index if the local node is the leader of the group headed - * by header. Or forward it to the leader. Otherwise report an error. - * - * @return Long.MIN_VALUE if the node is not a leader, or the commitIndex - */ - public long getCommitIndex() { - if (character == NodeCharacter.LEADER) { - return logManager.getCommitLogIndex(); - } else { - return Long.MIN_VALUE; - } - } - - public void setReadOnly() { - synchronized (logManager) { - readOnly = true; - } - } - - public boolean isReadOnly() { - return readOnly; - } - - public void initPeerMap() { - peerMap = new ConcurrentHashMap<>(); - for (Node entry : allNodes) { - peerMap.computeIfAbsent(entry, k -> new Peer(logManager.getLastLogIndex())); - } - } - - public Map getPeerMap() { - return peerMap; - } - - /** @return true if there is a log whose index is "index" and term is "term", false otherwise */ - public boolean matchLog(long index, long term) { - boolean matched = logManager.matchTerm(term, index); - logger.debug("Log {}-{} matched: {}", index, term, matched); - return matched; - } - - public ExecutorService getSerialToParallelPool() { - return serialToParallelPool; - } - - public ExecutorService getAppendLogThreadPool() { - return appendLogThreadPool; - } - - public Object getSyncLock() { - return syncLock; - } - - /** Sub-classes will add their own process of HeartBeatRequest in this method. */ - void processValidHeartbeatReq(HeartBeatRequest request, HeartBeatResponse response) {} - - /** - * Verify the validity of an ElectionRequest, and make itself a follower of the elector if the - * request is valid. - * - * @return Response.RESPONSE_AGREE if the elector is valid or the local term if the elector has a - * smaller term or Response.RESPONSE_LOG_MISMATCH if the elector has older logs. - */ - long checkElectorLogProgress(ElectionRequest electionRequest) { - - long thatTerm = electionRequest.getTerm(); - long thatLastLogIndex = electionRequest.getLastLogIndex(); - long thatLastLogTerm = electionRequest.getLastLogTerm(); - Node elector = electionRequest.getElector(); - - // check the log progress of the elector - long resp = checkLogProgress(thatLastLogIndex, thatLastLogTerm); - if (resp == Response.RESPONSE_AGREE) { - logger.info( - "{} accepted an election request, term:{}/{}, logIndex:{}/{}, logTerm:{}/{}", - name, - thatTerm, - term.get(), - thatLastLogIndex, - logManager.getLastLogIndex(), - thatLastLogTerm, - logManager.getLastLogTerm()); - setCharacter(NodeCharacter.FOLLOWER); - lastHeartbeatReceivedTime = System.currentTimeMillis(); - setVoteFor(elector); - updateHardState(thatTerm, getVoteFor()); - } else { - logger.info( - "{} rejected an election request, term:{}/{}, logIndex:{}/{}, logTerm:{}/{}", - name, - thatTerm, - term.get(), - thatLastLogIndex, - logManager.getLastLogIndex(), - thatLastLogTerm, - logManager.getLastLogTerm()); - } - return resp; - } - - /** - * Reject the election if the lastLogTerm of the candidate equals to the voter's but its - * lastLogIndex is smaller than the voter's Otherwise accept the election. - * - * @return Response.RESPONSE_AGREE if the elector is valid or the local term if the elector has a - * smaller term or Response.RESPONSE_LOG_MISMATCH if the elector has older logs. - */ - long checkLogProgress(long lastLogIndex, long lastLogTerm) { - long response; - synchronized (logManager) { - if (logManager.isLogUpToDate(lastLogTerm, lastLogIndex)) { - response = Response.RESPONSE_AGREE; - } else { - response = Response.RESPONSE_LOG_MISMATCH; - } - } - return response; - } - - /** - * Forward a non-query plan to a node using the default client. - * - * @param plan a non-query plan - * @param node cannot be the local node - * @param header must be set for data group communication, set to null for meta group - * communication - * @return a TSStatus indicating if the forwarding is successful. - */ - public TSStatus forwardPlan(PhysicalPlan plan, Node node, RaftNode header) { - if (node == null || node.equals(thisNode)) { - logger.debug("{}: plan {} has no where to be forwarded", name, plan); - return StatusUtils.NO_LEADER; - } - logger.debug("{}: Forward {} to node {}", name, plan, node); - - TSStatus status; - if (config.isUseAsyncServer()) { - status = forwardPlanAsync(plan, node, header); - } else { - status = forwardPlanSync(plan, node, header); - } - if (status.getCode() == TSStatusCode.NO_CONNECTION.getStatusCode() - && (header == null || header.equals(getHeader())) - && (leader.get() != null) - && leader.get().equals(node)) { - // leader is down, trigger a new election by resetting heartbeat - lastHeartbeatReceivedTime = -1; - leader.set(null); - waitLeader(); - } - return status; - } - - /** - * Forward a non-query plan to "receiver" using "client". - * - * @param plan a non-query plan - * @param header to determine which DataGroupMember of "receiver" will process the request. - * @return a TSStatus indicating if the forwarding is successful. - */ - private TSStatus forwardPlanAsync(PhysicalPlan plan, Node receiver, RaftNode header) { - AsyncClient client = getAsyncClient(receiver); - if (client == null) { - logger.debug("{}: can not get client for node={}", name, receiver); - return StatusUtils.NO_CONNECTION - .deepCopy() - .setMessage(String.format("%s cannot be reached", receiver)); - } - return forwardPlanAsync(plan, receiver, header, client); - } - - public TSStatus forwardPlanAsync( - PhysicalPlan plan, Node receiver, RaftNode header, AsyncClient client) { - try { - TSStatus tsStatus = SyncClientAdaptor.executeNonQuery(client, plan, header, receiver); - if (tsStatus == null) { - tsStatus = StatusUtils.TIME_OUT; - logger.warn(MSG_FORWARD_TIMEOUT, name, plan, receiver); - } - return tsStatus; - } catch (IOException | TException e) { - logger.error(MSG_FORWARD_ERROR, name, plan, receiver, e); - return StatusUtils.getStatus(StatusUtils.INTERNAL_ERROR, e.getMessage()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("{}: forward {} to {} interrupted", name, plan, receiver); - return StatusUtils.TIME_OUT; - } - } - - private TSStatus forwardPlanSync(PhysicalPlan plan, Node receiver, RaftNode header) { - Client client = getSyncClient(receiver); - if (client == null) { - logger.warn(MSG_FORWARD_TIMEOUT, name, plan, receiver); - return StatusUtils.TIME_OUT; - } - return forwardPlanSync(plan, receiver, header, client); - } - - public TSStatus forwardPlanSync( - PhysicalPlan plan, Node receiver, RaftNode header, Client client) { - try { - ExecutNonQueryReq req = new ExecutNonQueryReq(); - req.setPlanBytes(PlanSerializer.getInstance().serialize(plan)); - if (header != null) { - req.setHeader(header); - } - - TSStatus tsStatus = client.executeNonQueryPlan(req); - if (tsStatus == null) { - tsStatus = StatusUtils.TIME_OUT; - logger.warn(MSG_FORWARD_TIMEOUT, name, plan, receiver); - } - return tsStatus; - } catch (IOException e) { - logger.error(MSG_FORWARD_ERROR, name, plan, receiver, e); - return StatusUtils.getStatus(StatusUtils.INTERNAL_ERROR, e.getMessage()); - } catch (TException e) { - TSStatus status; - if (e.getCause() instanceof SocketTimeoutException) { - status = StatusUtils.TIME_OUT; - logger.warn(MSG_FORWARD_TIMEOUT, name, plan, receiver); - } else { - logger.error(MSG_FORWARD_ERROR, name, plan, receiver, e); - status = StatusUtils.getStatus(StatusUtils.INTERNAL_ERROR, e.getMessage()); - } - // the connection may be broken, close it to avoid it being reused - client.getInputProtocol().getTransport().close(); - return status; - } finally { - ClientUtils.putBackSyncClient(client); - } - } - - /** - * Get an asynchronous thrift client of the given node. - * - * @return an asynchronous thrift client or null if the caller tries to connect the local node or - * the node cannot be reached. - */ - public AsyncClient getAsyncClient(Node node) { - try { - return clientManager.borrowAsyncClient(node, getClientCategory()); - } catch (Exception e) { - logger.error("borrow async client fail", e); - return null; - } - } - - public AsyncClient getSendLogAsyncClient(Node node) { - try { - return clientManager.borrowAsyncClient(node, ClientCategory.DATA_ASYNC_APPEND_CLIENT); - } catch (Exception e) { - logger.error("borrow send log async client fail", e); - return null; - } - } - - /** - * NOTICE: ClientManager.returnClient() must be called after use. the caller needs to check to see - * if the return value is null - * - * @param node the node to connect - * @return the client if node is available, otherwise null - */ - public Client getSyncClient(Node node) { - try { - return clientManager.borrowSyncClient(node, getClientCategory()); - } catch (IOException e) { - logger.error("borrow sync client fail", e); - return null; - } - } - - public Client getSyncClient(Node node, boolean activatedOnly) { - if (ClusterConstant.EMPTY_NODE.equals(node) || node == null) { - return null; - } - - if (activatedOnly && !NodeStatusManager.getINSTANCE().isActivated(node)) { - return null; - } - - return getSyncClient(node); - } - - /** - * Get an asynchronous heartbeat thrift client to the given node. - * - * @return an asynchronous thrift client or null if the caller tries to connect the local node. - */ - public AsyncClient getAsyncHeartbeatClient(Node node) { - ClientCategory category = - ClientCategory.META == getClientCategory() - ? ClientCategory.META_HEARTBEAT - : ClientCategory.DATA_HEARTBEAT; - - try { - return clientManager.borrowAsyncClient(node, category); - } catch (Exception e) { - logger.error("borrow async heartbeat client fail", e); - return null; - } - } - - /** - * NOTICE: client.putBack() must be called after use. - * - * @return the heartbeat client for the node - */ - public Client getSyncHeartbeatClient(Node node) { - ClientCategory category = - ClientCategory.META == getClientCategory() - ? ClientCategory.META_HEARTBEAT - : ClientCategory.DATA_HEARTBEAT; - try { - return clientManager.borrowSyncClient(node, category); - } catch (IOException e) { - logger.error("borrow sync heartbeat client fail", e); - return null; - } - } - - public void returnSyncClient(Client client) { - if (ClientCategory.META == getClientCategory()) { - ((SyncMetaClient) client).returnSelf(); - } else { - ((SyncDataClient) client).returnSelf(); - } - } - - public AtomicLong getTerm() { - return term; - } - - private synchronized LogDispatcher getLogDispatcher() { - if (logDispatcher == null) { - logDispatcher = new LogDispatcher(this); - } - return logDispatcher; - } - - /** - * wait until "voteCounter" counts down to zero, which means the quorum has received the log, or - * one follower tells the node that it is no longer a valid leader, or a timeout is triggered. - */ - @SuppressWarnings({"java:S2445"}) // safe synchronized - private AppendLogResult waitAppendResult( - AtomicInteger voteCounter, AtomicBoolean leaderShipStale, AtomicLong newLeaderTerm) { - // wait for the followers to vote - long startTime = Timer.Statistic.RAFT_SENDER_VOTE_COUNTER.getOperationStartTime(); - synchronized (voteCounter) { - long waitStart = System.currentTimeMillis(); - long alreadyWait = 0; - while (voteCounter.get() > 0 - && alreadyWait < ClusterConstant.getWriteOperationTimeoutMS() - && voteCounter.get() != Integer.MAX_VALUE) { - try { - voteCounter.wait(ClusterConstant.getWriteOperationTimeoutMS()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Unexpected interruption when sending a log", e); - } - alreadyWait = System.currentTimeMillis() - waitStart; - } - } - Timer.Statistic.RAFT_SENDER_VOTE_COUNTER.calOperationCostTimeFromStart(startTime); - - // a node has a larger term than the local node, so this node is no longer a valid leader - if (leaderShipStale.get()) { - stepDown(newLeaderTerm.get(), false); - return AppendLogResult.LEADERSHIP_STALE; - } - // the node knows it is no long the leader from other requests - if (character != NodeCharacter.LEADER) { - return AppendLogResult.LEADERSHIP_STALE; - } - - // cannot get enough agreements within a certain amount of time - if (voteCounter.get() > 0) { - return AppendLogResult.TIME_OUT; - } - - // voteCounter has counted down to zero - return AppendLogResult.OK; - } - - @SuppressWarnings("java:S2445") - void commitLog(Log log) throws LogExecutionException { - long startTime = - Statistic.RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_COMMIT.getOperationStartTime(); - synchronized (logManager) { - Statistic.RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_COMMIT.calOperationCostTimeFromStart( - startTime); - - startTime = Statistic.RAFT_SENDER_COMMIT_LOG_IN_MANAGER.getOperationStartTime(); - logManager.commitTo(log.getCurrLogIndex()); - } - Statistic.RAFT_SENDER_COMMIT_LOG_IN_MANAGER.calOperationCostTimeFromStart(startTime); - // when using async applier, the log here may not be applied. To return the execution - // result, we must wait until the log is applied. - startTime = Statistic.RAFT_SENDER_COMMIT_WAIT_LOG_APPLY.getOperationStartTime(); - synchronized (log) { - while (!log.isApplied()) { - // wait until the log is applied - try { - log.wait(5); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new LogExecutionException(e); - } - } - } - Statistic.RAFT_SENDER_COMMIT_WAIT_LOG_APPLY.calOperationCostTimeFromStart(startTime); - if (log.getException() != null) { - throw new LogExecutionException(log.getException()); - } - } - - protected TSStatus handleLogExecutionException(Object log, Throwable cause) { - if (cause instanceof BatchProcessException) { - return RpcUtils.getStatus(Arrays.asList(((BatchProcessException) cause).getFailingStatus())); - } - if (cause instanceof DuplicatedTemplateException) { - return StatusUtils.DUPLICATED_TEMPLATE.deepCopy().setMessage(cause.getMessage()); - } - if (cause instanceof StorageGroupNotSetException) { - TSStatus status = StatusUtils.getStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST); - status.setMessage(cause.getMessage()); - return status; - } - TSStatus tsStatus = - StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR, cause.getMessage()); - if (cause instanceof RuntimeException) { - logger.error("RuntimeException during executing {}", log, cause); - } - if (cause instanceof IoTDBException) { - tsStatus.setCode(((IoTDBException) cause).getErrorCode()); - } - if (!(cause instanceof PathNotExistException) - && !(cause instanceof PathAlreadyExistException) - && !(cause instanceof StorageGroupAlreadySetException)) { - logger.debug("{} cannot be executed because ", log, cause); - } - return tsStatus; - } - - AppendEntryRequest buildAppendEntryRequest(Log log, boolean serializeNow) { - AppendEntryRequest request = new AppendEntryRequest(); - request.setTerm(term.get()); - if (serializeNow) { - ByteBuffer byteBuffer = log.serialize(); - log.setByteSize(byteBuffer.array().length); - request.setEntry(byteBuffer); - } - request.setLeader(getThisNode()); - // don't need lock because even if it's larger than the commitIndex when appending this log to - // logManager, the follower can handle the larger commitIndex with no effect - request.setLeaderCommit(logManager.getCommitLogIndex()); - request.setPrevLogIndex(log.getCurrLogIndex() - 1); - try { - request.setPrevLogTerm(logManager.getTerm(log.getCurrLogIndex() - 1)); - } catch (Exception e) { - logger.error("getTerm failed for newly append entries", e); - } - if (getHeader() != null) { - // data groups use header to find a particular DataGroupMember - request.setHeader(getHeader()); - } - return request; - } - - /** - * If "newTerm" is larger than the local term, give up the leadership, become a follower and reset - * heartbeat timer. - * - * @param fromLeader true if the request is from a leader, false if the request is from an - * elector. - */ - public void stepDown(long newTerm, boolean fromLeader) { - synchronized (term) { - long currTerm = term.get(); - // confirm that the heartbeat of the new leader hasn't come - if (currTerm < newTerm) { - logger.info("{} has update it's term to {}", getName(), newTerm); - term.set(newTerm); - setVoteFor(null); - setCharacter(NodeCharacter.ELECTOR); - setLeader(null); - updateHardState(newTerm, getVoteFor()); - } - - if (fromLeader) { - // only when the request is from a leader should we update lastHeartbeatReceivedTime, - // otherwise the node may be stuck in FOLLOWER state by a stale node. - setCharacter(NodeCharacter.FOLLOWER); - lastHeartbeatReceivedTime = System.currentTimeMillis(); - } - } - } - - public Node getThisNode() { - return thisNode; - } - - public void setThisNode(Node thisNode) { - this.thisNode = thisNode; - } - - /** @return the header of the data raft group or null if this is in a meta group. */ - public RaftNode getHeader() { - return null; - } - - public void updateHardState(long currentTerm, Node voteFor) { - HardState state = logManager.getHardState(); - state.setCurrentTerm(currentTerm); - state.setVoteFor(voteFor); - logManager.updateHardState(state); - } - - public Node getVoteFor() { - return voteFor; - } - - public void setVoteFor(Node voteFor) { - if (!Objects.equals(voteFor, this.voteFor)) { - logger.info("{} has update it's voteFor to {}", getName(), voteFor); - this.voteFor = voteFor; - } - } - - /** - * Append a log to all followers in the group until half of them accept the log or the leadership - * is lost. - * - * @return true if the log is accepted by the quorum of the group, false otherwise - */ - boolean appendLogInGroup(Log log) throws LogExecutionException { - if (allNodes.size() == 1) { - // single node group, no followers - long startTime = Timer.Statistic.RAFT_SENDER_COMMIT_LOG.getOperationStartTime(); - logger.debug(MSG_LOG_IS_ACCEPTED, name, log); - commitLog(log); - Timer.Statistic.RAFT_SENDER_COMMIT_LOG.calOperationCostTimeFromStart(startTime); - return true; - } - - int retryTime = 0; - while (true) { - long startTime = Timer.Statistic.RAFT_SENDER_SEND_LOG_TO_FOLLOWERS.getOperationStartTime(); - logger.debug("{}: Send log {} to other nodes, retry times: {}", name, log, retryTime); - if (character != NodeCharacter.LEADER) { - logger.debug("{}: Has lose leadership, so need not to send log", name); - return false; - } - AppendLogResult result = sendLogToFollowers(log); - Timer.Statistic.RAFT_SENDER_SEND_LOG_TO_FOLLOWERS.calOperationCostTimeFromStart(startTime); - switch (result) { - case OK: - startTime = Timer.Statistic.RAFT_SENDER_COMMIT_LOG.getOperationStartTime(); - logger.debug(MSG_LOG_IS_ACCEPTED, name, log); - commitLog(log); - Timer.Statistic.RAFT_SENDER_COMMIT_LOG.calOperationCostTimeFromStart(startTime); - return true; - case TIME_OUT: - logger.debug("{}: log {} timed out, retrying...", name, log); - try { - Thread.sleep(ClusterConstant.RETRY_WAIT_TIME_MS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - retryTime++; - if (retryTime > 5) { - return false; - } - break; - case LEADERSHIP_STALE: - // abort the appending, the new leader will fix the local logs by catch-up - default: - return false; - } - } - } - - /** - * Send the given log to all the followers and decide the result by how many followers return a - * success. - * - * @return an AppendLogResult - */ - protected AppendLogResult sendLogToFollowers(Log log) { - int requiredQuorum = allNodes.size() / 2; - if (requiredQuorum <= 0) { - // use half of the members' size as the quorum - return sendLogToFollowers(log, new AtomicInteger(requiredQuorum)); - } else { - // make sure quorum does not exceed the number of members - 1 - return sendLogToFollowers( - log, new AtomicInteger(Math.min(requiredQuorum, allNodes.size() - 1))); - } - } - - /** - * Send the log to each follower. Every time a follower returns a success, "voteCounter" is - * decreased by 1 and when it counts to 0, return an OK. If any follower returns a higher term - * than the local term, retire from leader and return a LEADERSHIP_STALE. If "voteCounter" is - * still positive after a certain time, return TIME_OUT. - * - * @param voteCounter a decreasing vote counter - * @return an AppendLogResult indicating a success or a failure and why - */ - private AppendLogResult sendLogToFollowers(Log log, AtomicInteger voteCounter) { - if (allNodes.size() == 1) { - // single node group, does not need the agreement of others - return AppendLogResult.OK; - } - logger.debug("{} sending a log to followers: {}", name, log); - - // if a follower has larger term than this node, leaderShipStale will be set to true and - // newLeaderTerm will store the follower's term - AtomicBoolean leaderShipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(term.get()); - - AppendEntryRequest request = buildAppendEntryRequest(log, true); - - try { - if (allNodes.size() > 2) { - // if there are more than one followers, send the requests in parallel so that one slow - // follower will not be blocked - for (Node node : allNodes) { - appendLogThreadPool.submit( - () -> - sendLogToFollower( - log, voteCounter, node, leaderShipStale, newLeaderTerm, request)); - if (character != NodeCharacter.LEADER) { - return AppendLogResult.LEADERSHIP_STALE; - } - } - } else { - // there is only one member, send to it within this thread to reduce thread switching - // overhead - for (Node node : allNodes) { - sendLogToFollower(log, voteCounter, node, leaderShipStale, newLeaderTerm, request); - if (character != NodeCharacter.LEADER) { - return AppendLogResult.LEADERSHIP_STALE; - } - } - } - - } catch (ConcurrentModificationException e) { - // retry if allNodes has changed - return AppendLogResult.TIME_OUT; - } - - return waitAppendResult(voteCounter, leaderShipStale, newLeaderTerm); - } - - /** Send "log" to "node". */ - public void sendLogToFollower( - Log log, - AtomicInteger voteCounter, - Node node, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - AppendEntryRequest request) { - if (node.equals(thisNode)) { - return; - } - /** - * if the peer's log progress is too stale, wait until it catches up, otherwise, there may be - * too many waiting requests on the peer's side. - */ - long startTime = Timer.Statistic.RAFT_SENDER_WAIT_FOR_PREV_LOG.getOperationStartTime(); - Peer peer = peerMap.computeIfAbsent(node, k -> new Peer(logManager.getLastLogIndex())); - if (!waitForPrevLog(peer, log)) { - logger.warn("{}: node {} timed out when appending {}", name, node, log); - return; - } - Timer.Statistic.RAFT_SENDER_WAIT_FOR_PREV_LOG.calOperationCostTimeFromStart(startTime); - - if (character != NodeCharacter.LEADER) { - return; - } - - if (config.isUseAsyncServer()) { - sendLogAsync(log, voteCounter, node, leaderShipStale, newLeaderTerm, request, peer); - } else { - sendLogSync(log, voteCounter, node, leaderShipStale, newLeaderTerm, request, peer); - } - } - - /** - * wait until the difference of log index between the matched log of peer and the given log become - * no bigger than maxLogDiff. - */ - @SuppressWarnings("java:S2445") // safe synchronized - public boolean waitForPrevLog(Peer peer, Log log) { - final int maxLogDiff = config.getMaxNumOfLogsInMem(); - long waitStart = System.currentTimeMillis(); - long alreadyWait = 0; - // if the peer falls behind too much, wait until it catches up, otherwise there may be too - // many client threads in the peer - while (peer.getMatchIndex() < log.getCurrLogIndex() - maxLogDiff - && character == NodeCharacter.LEADER - && alreadyWait <= ClusterConstant.getWriteOperationTimeoutMS()) { - synchronized (peer) { - try { - peer.wait(ClusterConstant.getWriteOperationTimeoutMS()); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("Waiting for peer to catch up interrupted"); - return false; - } - } - alreadyWait = System.currentTimeMillis() - waitStart; - } - return alreadyWait <= ClusterConstant.getWriteOperationTimeoutMS(); - } - - private void sendLogSync( - Log log, - AtomicInteger voteCounter, - Node node, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - AppendEntryRequest request, - Peer peer) { - Client client = getSyncClient(node); - if (client != null) { - AppendNodeEntryHandler handler = - getAppendNodeEntryHandler(log, voteCounter, node, leaderShipStale, newLeaderTerm, peer); - try { - logger.debug("{} sending a log to {}: {}", name, node, log); - long result = client.appendEntry(request); - handler.onComplete(result); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - handler.onError(e); - } catch (Exception e) { - handler.onError(e); - } finally { - ClientUtils.putBackSyncClient(client); - } - } - } - - public AppendNodeEntryHandler getAppendNodeEntryHandler( - Log log, - AtomicInteger voteCounter, - Node node, - AtomicBoolean leaderShipStale, - AtomicLong newLeaderTerm, - Peer peer) { - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setReceiver(node); - handler.setVoteCounter(voteCounter); - handler.setLeaderShipStale(leaderShipStale); - handler.setLog(log); - handler.setMember(this); - handler.setPeer(peer); - handler.setReceiverTerm(newLeaderTerm); - return handler; - } - - @TestOnly - public void setAppendLogThreadPool(ExecutorService appendLogThreadPool) { - this.appendLogThreadPool = appendLogThreadPool; - } - - public Lock getSnapshotApplyLock() { - return snapshotApplyLock; - } - - /** - * Find the local previous log of "log". If such log is found, discard all local logs behind it - * and append "log" to it. Otherwise report a log mismatch. If too many committed logs have not - * been applied, reject the appendEntry request. - * - * @return Response.RESPONSE_AGREE when the log is successfully appended or Response - * .RESPONSE_LOG_MISMATCH if the previous log of "log" is not found or Response - * .RESPONSE_TOO_BUSY if too many committed logs have not been applied. - */ - protected long appendEntry(long prevLogIndex, long prevLogTerm, long leaderCommit, Log log) { - long resp = checkPrevLogIndex(prevLogIndex); - if (resp != Response.RESPONSE_AGREE) { - return resp; - } - - long startTime = Timer.Statistic.RAFT_RECEIVER_APPEND_ENTRY.getOperationStartTime(); - long startWaitingTime = System.currentTimeMillis(); - long success; - while (true) { - synchronized (logManager) { - // TODO: Consider memory footprint to execute a precise rejection - if ((logManager.getCommitLogIndex() - logManager.getMaxHaveAppliedCommitIndex()) - <= config.getUnAppliedRaftLogNumForRejectThreshold()) { - success = logManager.maybeAppend(prevLogIndex, prevLogTerm, leaderCommit, log); - break; - } - try { - TimeUnit.MILLISECONDS.sleep( - IoTDBDescriptor.getInstance().getConfig().getCheckPeriodWhenInsertBlocked()); - if (System.currentTimeMillis() - startWaitingTime - > IoTDBDescriptor.getInstance().getConfig().getMaxWaitingTimeWhenInsertBlocked()) { - return Response.RESPONSE_TOO_BUSY; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - Timer.Statistic.RAFT_RECEIVER_APPEND_ENTRY.calOperationCostTimeFromStart(startTime); - if (success != -1) { - logger.debug("{} append a new log {}", name, log); - resp = Response.RESPONSE_AGREE; - } else { - // the incoming log points to an illegal position, reject it - resp = Response.RESPONSE_LOG_MISMATCH; - } - return resp; - } - - /** Wait until all logs before "prevLogIndex" arrive or a timeout is reached. */ - private boolean waitForPrevLog(long prevLogIndex) { - long waitStart = System.currentTimeMillis(); - long alreadyWait = 0; - Object logUpdateCondition = logManager.getLogUpdateCondition(prevLogIndex); - long lastLogIndex = logManager.getLastLogIndex(); - while (lastLogIndex < prevLogIndex - && alreadyWait <= ClusterConstant.getWriteOperationTimeoutMS()) { - try { - // each time new logs are appended, this will be notified - synchronized (logUpdateCondition) { - logUpdateCondition.wait(1); - } - lastLogIndex = logManager.getLastLogIndex(); - if (lastLogIndex >= prevLogIndex) { - return true; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return false; - } - alreadyWait = System.currentTimeMillis() - waitStart; - } - - return alreadyWait <= ClusterConstant.getWriteOperationTimeoutMS(); - } - - private long checkPrevLogIndex(long prevLogIndex) { - long lastLogIndex = logManager.getLastLogIndex(); - long startTime = Timer.Statistic.RAFT_RECEIVER_WAIT_FOR_PREV_LOG.getOperationStartTime(); - if (lastLogIndex < prevLogIndex && !waitForPrevLog(prevLogIndex)) { - // there are logs missing between the incoming log and the local last log, and such logs - // did not come within a timeout, report a mismatch to the sender and it shall fix this - // through catch-up - Timer.Statistic.RAFT_RECEIVER_INDEX_DIFF.add(prevLogIndex - lastLogIndex); - return Response.RESPONSE_LOG_MISMATCH; - } - Timer.Statistic.RAFT_RECEIVER_WAIT_FOR_PREV_LOG.calOperationCostTimeFromStart(startTime); - return Response.RESPONSE_AGREE; - } - - /** - * Find the local previous log of "log". If such log is found, discard all local logs behind it - * and append "log" to it. Otherwise report a log mismatch. If too many committed logs have not - * been applied, reject the appendEntry request. - * - * @param logs append logs - * @return Response.RESPONSE_AGREE when the log is successfully appended or Response - * .RESPONSE_LOG_MISMATCH if the previous log of "log" is not found Response - * .RESPONSE_TOO_BUSY if too many committed logs have not been applied. - */ - private long appendEntries( - long prevLogIndex, long prevLogTerm, long leaderCommit, List logs) { - logger.debug( - "{}, prevLogIndex={}, prevLogTerm={}, leaderCommit={}", - name, - prevLogIndex, - prevLogTerm, - leaderCommit); - if (logs.isEmpty()) { - return Response.RESPONSE_AGREE; - } - - long resp = checkPrevLogIndex(prevLogIndex); - if (resp != Response.RESPONSE_AGREE) { - return resp; - } - - long startWaitingTime = System.currentTimeMillis(); - while (true) { - synchronized (logManager) { - // TODO: Consider memory footprint to execute a precise rejection - if ((logManager.getCommitLogIndex() - logManager.getMaxHaveAppliedCommitIndex()) - <= config.getUnAppliedRaftLogNumForRejectThreshold()) { - long startTime = Timer.Statistic.RAFT_RECEIVER_APPEND_ENTRY.getOperationStartTime(); - resp = logManager.maybeAppend(prevLogIndex, prevLogTerm, leaderCommit, logs); - Timer.Statistic.RAFT_RECEIVER_APPEND_ENTRY.calOperationCostTimeFromStart(startTime); - if (resp != -1) { - if (logger.isDebugEnabled()) { - logger.debug("{} append a new log list {}, commit to {}", name, logs, leaderCommit); - } - resp = Response.RESPONSE_AGREE; - } else { - // the incoming log points to an illegal position, reject it - resp = Response.RESPONSE_LOG_MISMATCH; - } - break; - } - } - try { - TimeUnit.MILLISECONDS.sleep( - IoTDBDescriptor.getInstance().getConfig().getCheckPeriodWhenInsertBlocked()); - if (System.currentTimeMillis() - startWaitingTime - > IoTDBDescriptor.getInstance().getConfig().getMaxWaitingTimeWhenInsertBlocked()) { - return Response.RESPONSE_TOO_BUSY; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - return resp; - } - - /** - * Check the term of the AppendEntryRequest. The term checked is the term of the leader, not the - * term of the log. A new leader can still send logs of old leaders. - * - * @return -1 if the check is passed, >0 otherwise - */ - private long checkRequestTerm(long leaderTerm, Node leader) { - long localTerm; - - synchronized (term) { - // if the request comes before the heartbeat arrives, the local term may be smaller than the - // leader term - localTerm = term.get(); - if (leaderTerm < localTerm) { - logger.debug( - "{} rejected the AppendEntriesRequest for term: {}/{}", name, leaderTerm, localTerm); - return localTerm; - } else { - if (leaderTerm > localTerm) { - stepDown(leaderTerm, true); - } else { - lastHeartbeatReceivedTime = System.currentTimeMillis(); - } - setLeader(leader); - if (character != NodeCharacter.FOLLOWER) { - term.notifyAll(); - } - } - } - logger.debug("{} accepted the AppendEntryRequest for term: {}", name, localTerm); - return Response.RESPONSE_AGREE; - } - - public int getRaftGroupId() { - return allNodes.getRaftId(); - } - - enum AppendLogResult { - OK, - TIME_OUT, - LEADERSHIP_STALE - } - - public Object getHeartBeatWaitObject() { - return heartBeatWaitObject; - } - - public boolean isSkipElection() { - return skipElection; - } - - public void setSkipElection(boolean skipElection) { - this.skipElection = skipElection; - } - - public long getLastReportedLogIndex() { - return lastReportedLogIndex; - } - - @Override - public String getAllNodesAsString() { - return allNodes.toString(); - } - - @Override - public String getPeerMapAsString() { - return peerMap.toString(); - } - - @Override - public String getLeaderAsString() { - return leader.get().toString(); - } - - @Override - public String getLogManagerObject() { - return getLogManager().toString(); - } - - @Override - public String getLastCatchUpResponseTimeAsString() { - return lastCatchUpResponseTime.toString(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMemberMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMemberMBean.java deleted file mode 100644 index fa96bd6f9159..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/RaftMemberMBean.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import java.util.concurrent.atomic.AtomicLong; - -public interface RaftMemberMBean { - - String getAllNodesAsString(); - - String getName(); - - String getPeerMapAsString(); - - AtomicLong getTerm(); - - String getCharacterAsString(); - - String getLeaderAsString(); - - Node getVoteFor(); - - long getLastHeartbeatReceivedTime(); - - String getLogManagerObject(); - - boolean isReadOnly(); - - long getLastReportedLogIndex(); - - String getLastCatchUpResponseTimeAsString(); - - boolean isSkipElection(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/package-info.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/member/package-info.java deleted file mode 100644 index 250afcd76055..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/member/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * member package provides raft logic implementation without concern of the network layer, so that - * with a proper broker a single connection can support multiple data groups. - */ -package org.apache.iotdb.cluster.server.member; diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeReport.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeReport.java deleted file mode 100644 index 10538be31e4b..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeReport.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.monitor; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.rpc.RpcStat; -import org.apache.iotdb.rpc.RpcTransportFactory; - -import java.util.ArrayList; -import java.util.List; - -/** - * A node report collects the current runtime information of the local node, which contains: 1. The - * MetaMemberReport of the meta member. 2. The DataMemberReports of each data member. - */ -@SuppressWarnings("java:S107") // reports need enough parameters -public class NodeReport { - - private Node thisNode; - private MetaMemberReport metaMemberReport; - private List dataMemberReportList; - - public NodeReport(Node thisNode) { - this.thisNode = thisNode; - dataMemberReportList = new ArrayList<>(); - } - - public void setMetaMemberReport(MetaMemberReport metaMemberReport) { - this.metaMemberReport = metaMemberReport; - } - - public void setDataMemberReportList(List dataMemberReportList) { - this.dataMemberReportList = dataMemberReportList; - } - - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("Report of ").append(thisNode).append(System.lineSeparator()); - stringBuilder.append(metaMemberReport).append(System.lineSeparator()); - for (DataMemberReport dataMemberReport : dataMemberReportList) { - stringBuilder.append(dataMemberReport).append(System.lineSeparator()); - } - return stringBuilder.toString(); - } - - /** - * A RaftMemberReport contains the character, leader, term, last log term/index of a raft member. - */ - static class RaftMemberReport { - NodeCharacter character; - Node leader; - long term; - long lastLogTerm; - long lastLogIndex; - long commitIndex; - long commitTerm; - boolean isReadOnly; - long lastHeartbeatReceivedTime; - long prevLastLogIndex; - long maxAppliedLogIndex; - - RaftMemberReport( - NodeCharacter character, - Node leader, - long term, - long lastLogTerm, - long lastLogIndex, - long commitIndex, - long commitTerm, - boolean isReadOnly, - long lastHeartbeatReceivedTime, - long prevLastLogIndex, - long maxAppliedLogIndex) { - this.character = character; - this.leader = leader; - this.term = term; - this.lastLogTerm = lastLogTerm; - this.lastLogIndex = lastLogIndex; - this.commitIndex = commitIndex; - this.commitTerm = commitTerm; - this.isReadOnly = isReadOnly; - this.lastHeartbeatReceivedTime = lastHeartbeatReceivedTime; - this.prevLastLogIndex = prevLastLogIndex; - this.maxAppliedLogIndex = maxAppliedLogIndex; - } - } - - /** MetaMemberReport has no additional fields currently. */ - public static class MetaMemberReport extends RaftMemberReport { - - public MetaMemberReport( - NodeCharacter character, - Node leader, - long term, - long lastLogTerm, - long lastLogIndex, - long commitIndex, - long commitTerm, - boolean isReadOnly, - long lastHeartbeatReceivedTime, - long prevLastLogIndex, - long maxAppliedLogIndex) { - super( - character, - leader, - term, - lastLogTerm, - lastLogIndex, - commitIndex, - commitTerm, - isReadOnly, - lastHeartbeatReceivedTime, - prevLastLogIndex, - maxAppliedLogIndex); - } - - @Override - public String toString() { - long readBytes = RpcStat.getReadBytes(); - long readCompressedBytes = RpcStat.getReadCompressedBytes(); - long writeBytes = RpcStat.getWriteBytes(); - long writeCompressedBytes = RpcStat.getWriteCompressedBytes(); - double readCompressionRatio = (double) readBytes / readCompressedBytes; - double writeCompressionRatio = (double) writeBytes / writeCompressedBytes; - String transportCompressionReport = ""; - if (RpcTransportFactory.isUseSnappy()) { - transportCompressionReport = - ", readBytes=" - + readBytes - + "/" - + readCompressedBytes - + "(" - + readCompressionRatio - + ")" - + ", writeBytes=" - + writeBytes - + "/" - + writeCompressedBytes - + "(" - + writeCompressionRatio - + ")"; - } - return "MetaMemberReport {\n" - + "character=" - + character - + ", Leader=" - + leader - + ", term=" - + term - + ", lastLogTerm=" - + lastLogTerm - + ", lastLogIndex=" - + lastLogIndex - + ", commitIndex=" - + commitIndex - + ", commitTerm=" - + commitTerm - + ", appliedLogIndex=" - + maxAppliedLogIndex - + ", readOnly=" - + isReadOnly - + ", lastHeartbeat=" - + (System.currentTimeMillis() - lastHeartbeatReceivedTime) - + "ms ago" - + ", logIncrement=" - + (lastLogIndex - prevLastLogIndex) - + transportCompressionReport - + ", \n timer: " - + Timer.getReport() - + '}'; - } - } - - /** - * A DataMemberReport additionally contains the header, so it can be told which group this member - * belongs to. - */ - public static class DataMemberReport extends RaftMemberReport { - RaftNode header; - long headerLatency; - - public DataMemberReport( - NodeCharacter character, - Node leader, - long term, - long lastLogTerm, - long lastLogIndex, - long commitIndex, - long commitTerm, - RaftNode header, - boolean isReadOnly, - long headerLatency, - long lastHeartbeatReceivedTime, - long prevLastLogIndex, - long maxAppliedLogIndex) { - super( - character, - leader, - term, - lastLogTerm, - lastLogIndex, - commitIndex, - commitTerm, - isReadOnly, - lastHeartbeatReceivedTime, - prevLastLogIndex, - maxAppliedLogIndex); - this.header = header; - this.headerLatency = headerLatency; - } - - @Override - public String toString() { - return "DataMemberReport{" - + "header=" - + header.getNode() - + ", raftId=" - + header.getRaftId() - + ", character=" - + character - + ", Leader=" - + leader - + ", term=" - + term - + ", lastLogTerm=" - + lastLogTerm - + ", lastLogIndex=" - + lastLogIndex - + ", commitIndex=" - + commitIndex - + ", commitTerm=" - + commitTerm - + ", appliedLogIndex=" - + maxAppliedLogIndex - + ", readOnly=" - + isReadOnly - + ", headerLatency=" - + headerLatency - + "ns" - + ", lastHeartbeat=" - + (System.currentTimeMillis() - lastHeartbeatReceivedTime) - + "ms ago" - + ", logIncrement=" - + (lastLogIndex - prevLastLogIndex) - + '}'; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatus.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatus.java deleted file mode 100644 index 4524667b9dad..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatus.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.monitor; - -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; - -import java.util.Objects; - -/** NodeStatus contains the last-known spec and load of a node in the cluster. */ -@SuppressWarnings("java:S1135") -public class NodeStatus implements Comparable { - - // if a node is deactivated lastly too long ago, it is also assumed activated because it may - // have restarted but there is no heartbeat between the two nodes, and the local node cannot - // know the fact that it is normal again. Notice that we cannot always rely on the start-up - // hello, because it only occurs once and may be lost. - private static final long DEACTIVATION_VALID_INTERVAL_MS = 600_000L; - - private TNodeStatus status; - // when is the status last updated, millisecond timestamp, to judge whether we should update - // the status or not - private long lastUpdateTime; - // how long does it take to get the status in the last attempt, in nanoseconds, which partially - // reflect the node's load or network condition - private long lastResponseLatency; - - // if a node is judged down by heartbeats or other attempts to connect, isActivated will be set - // to false, so further attempts to get clients of this node will fail without a timeout, but - // getting clients for heartbeat will not fail so the node can be activated as soon as it is up - // again. Clients of associated nodes should take the responsibility to activate or deactivate - // the node. - private volatile boolean isActivated = true; - - // if there is no heartbeat between the local node and this node, when this node is marked - // deactivated, it cannot be reactivated in a normal way. So we also consider it reactivated if - // its lastDeactivatedTime is too old. - private long lastDeactivatedTime; - - // TODO-Cluster: decide what should be contained in NodeStatus and how two compare two NodeStatus - @Override - public int compareTo(NodeStatus o) { - return Long.compare(this.lastResponseLatency, o.lastResponseLatency); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - NodeStatus that = (NodeStatus) o; - return lastUpdateTime == that.lastUpdateTime - && lastResponseLatency == that.lastResponseLatency - && Objects.equals(status, that.status); - } - - @Override - public int hashCode() { - return Objects.hash(status, lastUpdateTime, lastResponseLatency); - } - - public long getLastUpdateTime() { - return lastUpdateTime; - } - - public long getLastResponseLatency() { - return lastResponseLatency; - } - - public TNodeStatus getStatus() { - return status; - } - - public void setStatus(TNodeStatus status) { - this.status = status; - } - - public void setLastUpdateTime(long lastUpdateTime) { - this.lastUpdateTime = lastUpdateTime; - } - - public void setLastResponseLatency(long lastResponseLatency) { - this.lastResponseLatency = lastResponseLatency; - } - - public void activate() { - isActivated = true; - } - - public void deactivate() { - isActivated = false; - lastDeactivatedTime = System.currentTimeMillis(); - } - - public boolean isActivated() { - return isActivated - || (System.currentTimeMillis() - lastDeactivatedTime) > DEACTIVATION_VALID_INTERVAL_MS; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatusManager.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatusManager.java deleted file mode 100644 index aa42dbac278d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/NodeStatusManager.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.monitor; - -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncClientAdaptor; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.utils.TestOnly; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.ConnectException; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -/** - * NodeStatusManager manages the status (network latency, workload, connectivity) of each node in - * the whole cluster. The status is updated on demand, so it may not be up-to-date if not forced to - * update. - */ -public class NodeStatusManager { - - private static final Logger logger = LoggerFactory.getLogger(NodeStatusManager.class); - // a status is considered stale if it is older than one minute and should be updated - private static final long NODE_STATUS_UPDATE_INTERVAL_MS = 60 * 1000L; - private static final NodeStatusManager INSTANCE = new NodeStatusManager(); - - private MetaGroupMember metaGroupMember; - private Map nodeStatusMap = new ConcurrentHashMap<>(); - - public static NodeStatusManager getINSTANCE() { - return INSTANCE; - } - - public void setMetaGroupMember(MetaGroupMember metaGroupMember) { - this.metaGroupMember = metaGroupMember; - } - - private TNodeStatus getNodeStatusWithAsyncServer(Node node) { - TNodeStatus status; - AsyncMetaClient asyncMetaClient = (AsyncMetaClient) metaGroupMember.getAsyncClient(node); - if (asyncMetaClient == null) { - return null; - } - try { - status = SyncClientAdaptor.queryNodeStatus(asyncMetaClient); - } catch (TException e) { - if (e.getCause() instanceof ConnectException) { - logger.warn("Cannot query the node status of {}: {}", node, e.getCause()); - } else { - logger.error("query node status failed {}", node, e); - } - return null; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Cannot query the node status of {}", node, e); - return null; - } - return status; - } - - private TNodeStatus getNodeStatusWithSyncServer(Node node) { - TNodeStatus status; - SyncMetaClient syncMetaClient = (SyncMetaClient) metaGroupMember.getSyncClient(node); - if (syncMetaClient == null) { - logger.error("Cannot query the node status of {} for no available client", node); - return null; - } - try { - status = syncMetaClient.queryNodeStatus(); - } catch (TException e) { - syncMetaClient.getInputProtocol().getTransport().close(); - logger.error("Cannot query the node status of {}", node, e); - return null; - } finally { - ClientUtils.putBackSyncClient(syncMetaClient); - } - return status; - } - - /** - * Get the status of the given node. If tryUpdate == true and the current status is older than - * NODE_STATUS_UPDATE_INTERVAL_MS, it will be updated. - * - * @param node - * @param tryUpdate when set to true, the manager will try to update the status of the node if it - * is old enough, otherwise, it will just return the last recorded status. - * @return - */ - public NodeStatus getNodeStatus(Node node, boolean tryUpdate) { - // avoid duplicated computing of concurrent queries - NodeStatus nodeStatus = nodeStatusMap.computeIfAbsent(node, n -> new NodeStatus()); - if (metaGroupMember == null || node.equals(metaGroupMember.getThisNode())) { - return nodeStatus; - } - - if (tryUpdate) { - tryUpdateNodeStatus(node, nodeStatus); - } - return nodeStatus; - } - - private void tryUpdateNodeStatus(Node node, NodeStatus nodeStatus) { - long currTime = System.currentTimeMillis(); - if (nodeStatus.getStatus() != null - && currTime - nodeStatus.getLastUpdateTime() <= NODE_STATUS_UPDATE_INTERVAL_MS) { - return; - } - - long startTime = System.nanoTime(); - TNodeStatus status; - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - status = getNodeStatusWithAsyncServer(node); - } else { - status = getNodeStatusWithSyncServer(node); - } - long responseTime = System.nanoTime() - startTime; - - if (status != null) { - nodeStatus.setStatus(status); - nodeStatus.setLastUpdateTime(System.currentTimeMillis()); - nodeStatus.setLastResponseLatency(responseTime); - } else { - nodeStatus.setLastResponseLatency(Long.MAX_VALUE); - } - logger.info( - "NodeStatus of {} is updated, status: {}, response time: {}", - node, - nodeStatus.getStatus(), - nodeStatus.getLastResponseLatency()); - } - - public long getLastResponseLatency(Node node) { - NodeStatus nodeStatus = getNodeStatus(node, true); - return nodeStatus.getLastResponseLatency(); - } - - @TestOnly - public void clear() { - nodeStatusMap.clear(); - } - - public void activate(Node node) { - getNodeStatus(node, false).activate(); - } - - public void deactivate(Node node) { - getNodeStatus(node, false).deactivate(); - } - - /** - * @param node - * @return whether the node is CURRENTLY available, this method will not try to update its status - * to avoid deadlock - */ - public boolean isActivated(Node node) { - return getNodeStatus(node, false).isActivated(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Peer.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Peer.java deleted file mode 100644 index d012d60ebc6e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Peer.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.monitor; - -import java.util.concurrent.atomic.AtomicInteger; - -public class Peer { - - private long nextIndex; - private long matchIndex; - private AtomicInteger inconsistentHeartbeatNum = new AtomicInteger(); - // lastLogIndex from the last heartbeat - private long lastHeartBeatIndex; - - public Peer(long nextIndex) { - this.nextIndex = nextIndex; - this.matchIndex = -1; - } - - public synchronized long getNextIndex() { - return nextIndex; - } - - public synchronized void setNextIndex(long nextIndex) { - this.nextIndex = nextIndex; - } - - public synchronized long getMatchIndex() { - return matchIndex; - } - - public synchronized void setMatchIndex(long matchIndex) { - this.matchIndex = matchIndex; - this.setNextIndex(Math.max(nextIndex, matchIndex + 1)); - this.notifyAll(); - } - - public int incInconsistentHeartbeatNum() { - return inconsistentHeartbeatNum.incrementAndGet(); - } - - public void resetInconsistentHeartbeatNum() { - inconsistentHeartbeatNum.set(0); - } - - public long getLastHeartBeatIndex() { - return lastHeartBeatIndex; - } - - public void setLastHeartBeatIndex(long lastHeartBeatIndex) { - this.lastHeartBeatIndex = lastHeartBeatIndex; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Timer.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Timer.java deleted file mode 100644 index f4ac9806d5b2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/monitor/Timer.java +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.monitor; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -public class Timer { - - public static final boolean ENABLE_INSTRUMENTING = true; - - private static final String COORDINATOR = "Coordinator"; - private static final String META_GROUP_MEMBER = "Meta group member"; - private static final String DATA_GROUP_MEMBER = "Data group member"; - private static final String RAFT_MEMBER_SENDER = " Raft member(sender)"; - private static final String RAFT_MEMBER_RECEIVER = " Raft member(receiver)"; - private static final String LOG_DISPATCHER = "Log dispatcher"; - - // convert nano to milli - private static final double TIME_SCALE = 1_000_000.0; - - public enum Statistic { - // A dummy root for the convenience of prints - ROOT("ClassName", "BlockName", TIME_SCALE, true, null), - // coordinator - COORDINATOR_EXECUTE_NON_QUERY(COORDINATOR, "execute non query", TIME_SCALE, true, ROOT), - - // meta group member - META_GROUP_MEMBER_EXECUTE_NON_QUERY( - META_GROUP_MEMBER, "execute non query", TIME_SCALE, true, COORDINATOR_EXECUTE_NON_QUERY), - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP( - META_GROUP_MEMBER, - "execute in local group", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY), - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_REMOTE_GROUP( - META_GROUP_MEMBER, - "execute in remote group", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY), - // data group member - DATA_GROUP_MEMBER_LOCAL_EXECUTION( - DATA_GROUP_MEMBER, - "execute locally", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP), - DATA_GROUP_MEMBER_WAIT_LEADER( - DATA_GROUP_MEMBER, - "wait for leader", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP), - DATA_GROUP_MEMBER_FORWARD_PLAN( - DATA_GROUP_MEMBER, - "forward to leader", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP), - // raft member - sender - RAFT_SENDER_APPEND_LOG( - RAFT_MEMBER_SENDER, - "locally append log", - TIME_SCALE, - !RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_APPEND_V2( - RAFT_MEMBER_SENDER, - "compete for log manager before append", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_APPEND_LOG_V2( - RAFT_MEMBER_SENDER, - "locally append log", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_BUILD_LOG_REQUEST( - RAFT_MEMBER_SENDER, - "build SendLogRequest", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_BUILD_APPEND_REQUEST( - RAFT_MEMBER_SENDER, - "build AppendEntryRequest", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_BUILD_LOG_REQUEST), - RAFT_SENDER_OFFER_LOG( - RAFT_MEMBER_SENDER, - "offer log to dispatcher", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_SEND_LOG_TO_FOLLOWERS( - RAFT_MEMBER_SENDER, - "send log to followers", - TIME_SCALE, - !RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_WAIT_FOR_PREV_LOG( - RAFT_MEMBER_SENDER, - "sender wait for prev log", - TIME_SCALE, - true, - RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_SENDER_SERIALIZE_LOG( - RAFT_MEMBER_SENDER, "serialize logs", TIME_SCALE, true, RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_SENDER_SEND_LOG_ASYNC( - RAFT_MEMBER_SENDER, - "send log async", - TIME_SCALE, - ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(), - RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_SENDER_SEND_LOG( - RAFT_MEMBER_SENDER, "send log", TIME_SCALE, true, RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_SENDER_VOTE_COUNTER( - RAFT_MEMBER_SENDER, - "wait for votes", - TIME_SCALE, - true, - RaftMember.USE_LOG_DISPATCHER - ? DATA_GROUP_MEMBER_LOCAL_EXECUTION - : RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_SENDER_COMMIT_LOG( - RAFT_MEMBER_SENDER, - "locally commit log", - TIME_SCALE, - true, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - RAFT_SENDER_COMPETE_LOG_MANAGER_BEFORE_COMMIT( - RAFT_MEMBER_SENDER, - "compete for log manager before commit", - TIME_SCALE, - true, - RAFT_SENDER_COMMIT_LOG), - RAFT_SENDER_COMMIT_LOG_IN_MANAGER( - RAFT_MEMBER_SENDER, - "commit log in log manager", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_LOG), - RAFT_SENDER_COMMIT_GET_LOGS( - RAFT_MEMBER_SENDER, - "get logs to be committed", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_LOG_IN_MANAGER), - RAFT_SENDER_COMMIT_DELETE_EXCEEDING_LOGS( - RAFT_MEMBER_SENDER, - "delete logs exceeding capacity", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_LOG_IN_MANAGER), - RAFT_SENDER_COMMIT_APPEND_AND_STABLE_LOGS( - RAFT_MEMBER_SENDER, - "append and stable committed logs", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_LOG_IN_MANAGER), - RAFT_SENDER_COMMIT_APPLY_LOGS( - RAFT_MEMBER_SENDER, - "apply after committing logs", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_LOG_IN_MANAGER), - RAFT_SENDER_COMMIT_TO_CONSUMER_LOGS( - RAFT_MEMBER_SENDER, - "provide log to consumer", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_APPLY_LOGS), - RAFT_SENDER_COMMIT_EXCLUSIVE_LOGS( - RAFT_MEMBER_SENDER, - "apply logs that cannot run in parallel", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - RAFT_SENDER_COMMIT_APPLY_LOGS), - RAFT_SENDER_COMMIT_WAIT_LOG_APPLY( - RAFT_MEMBER_SENDER, "wait until log is applied", TIME_SCALE, true, RAFT_SENDER_COMMIT_LOG), - RAFT_SENDER_IN_APPLY_QUEUE( - RAFT_MEMBER_SENDER, "in apply queue", TIME_SCALE, true, RAFT_SENDER_COMMIT_WAIT_LOG_APPLY), - RAFT_SENDER_DATA_LOG_APPLY( - RAFT_MEMBER_SENDER, "apply data log", TIME_SCALE, true, RAFT_SENDER_COMMIT_WAIT_LOG_APPLY), - RAFT_SENDER_LOG_FROM_CREATE_TO_ACCEPT( - RAFT_MEMBER_SENDER, - "log from create to accept", - TIME_SCALE, - RaftMember.USE_LOG_DISPATCHER, - DATA_GROUP_MEMBER_LOCAL_EXECUTION), - // raft member - receiver - RAFT_RECEIVER_LOG_PARSE( - RAFT_MEMBER_RECEIVER, "log parse", TIME_SCALE, true, RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_RECEIVER_WAIT_FOR_PREV_LOG( - RAFT_MEMBER_RECEIVER, - "receiver wait for prev log", - TIME_SCALE, - true, - RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_RECEIVER_APPEND_ENTRY( - RAFT_MEMBER_RECEIVER, "append entrys", TIME_SCALE, true, RAFT_SENDER_SEND_LOG_TO_FOLLOWERS), - RAFT_RECEIVER_INDEX_DIFF(RAFT_MEMBER_RECEIVER, "index diff", 1.0, true, ROOT), - // log dispatcher - LOG_DISPATCHER_LOG_IN_QUEUE( - LOG_DISPATCHER, - "in queue", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP), - LOG_DISPATCHER_FROM_CREATE_TO_END( - LOG_DISPATCHER, - "from create to end", - TIME_SCALE, - true, - META_GROUP_MEMBER_EXECUTE_NON_QUERY_IN_LOCAL_GROUP); - - String className; - String blockName; - AtomicLong sum = new AtomicLong(0); - AtomicLong counter = new AtomicLong(0); - double scale; - boolean valid; - int level; - Statistic parent; - List children = new ArrayList<>(); - - Statistic(String className, String blockName, double scale, boolean valid, Statistic parent) { - this.className = className; - this.blockName = blockName; - this.scale = scale; - this.valid = valid; - this.parent = parent; - if (parent == null) { - level = -1; - } else { - level = parent.level + 1; - parent.children.add(this); - } - } - - public void add(long val) { - if (ENABLE_INSTRUMENTING) { - sum.addAndGet(val); - counter.incrementAndGet(); - } - } - - /** @return System.nanoTime() if the ENABLE_INSTRUMENTING is true, else zero */ - public long getOperationStartTime() { - if (ENABLE_INSTRUMENTING) { - return System.nanoTime(); - } - return Long.MIN_VALUE; - } - - /** - * This method equals `add(System.nanoTime() - start)`. We wrap `System.nanoTime()` in this - * method to avoid unnecessary calls when instrumenting is disabled. - */ - public void calOperationCostTimeFromStart(long startTime) { - if (ENABLE_INSTRUMENTING && startTime != Long.MIN_VALUE) { - add(System.nanoTime() - startTime); - } - } - - /** WARN: no current safety guarantee. */ - public void reset() { - sum.set(0); - counter.set(0); - } - - /** WARN: no current safety guarantee. */ - public static void resetAll() { - for (Statistic value : values()) { - value.reset(); - } - } - - @Override - public String toString() { - double s = sum.get() / scale; - long cnt = counter.get(); - double avg = s / cnt; - return String.format("%s - %s: %.2f, %d, %.2f", className, blockName, s, cnt, avg); - } - } - - public static String getReport() { - if (!ENABLE_INSTRUMENTING) { - return ""; - } - StringBuilder result = new StringBuilder(); - printTo(Statistic.ROOT, result); - return result.toString(); - } - - private static void printTo(Statistic currNode, StringBuilder out) { - if (currNode != Statistic.ROOT && currNode.valid) { - indent(out, currNode.level); - out.append(currNode).append("\n"); - } - for (Statistic child : currNode.children) { - printTo(child, out); - } - } - - private static void indent(StringBuilder out, int indents) { - for (int i = 0; i < indents; i++) { - out.append(" "); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractDataRaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractDataRaftService.java deleted file mode 100644 index c16d96cafa60..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractDataRaftService.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.server.service.DataGroupServiceImpls; - -public abstract class AbstractDataRaftService extends AbstractRaftService { - - private DataGroupServiceImpls impl; - - @Override - public void initSyncedServiceImpl(Object serviceImpl) { - if (serviceImpl != null) { - impl = (DataGroupServiceImpls) serviceImpl; - } - super.initSyncedServiceImpl(serviceImpl); - } - - @Override - public void initAsyncedServiceImpl(Object serviceImpl) { - if (serviceImpl != null) { - impl = (DataGroupServiceImpls) serviceImpl; - } - super.initAsyncedServiceImpl(serviceImpl); - } - - @Override - public void initTProcessor() { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - processor = new TSDataService.AsyncProcessor<>(impl); - } else { - processor = new TSDataService.Processor<>(impl); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractMetaRaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractMetaRaftService.java deleted file mode 100644 index 02b891c265a5..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractMetaRaftService.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.server.service.MetaAsyncService; -import org.apache.iotdb.cluster.server.service.MetaSyncService; - -public abstract class AbstractMetaRaftService extends AbstractRaftService { - - private MetaAsyncService asyncServiceImpl; - private MetaSyncService syncServiceImpl; - - @Override - public void initSyncedServiceImpl(Object serviceImpl) { - syncServiceImpl = (MetaSyncService) serviceImpl; - super.initSyncedServiceImpl(serviceImpl); - } - - @Override - public void initAsyncedServiceImpl(Object serviceImpl) { - asyncServiceImpl = (MetaAsyncService) serviceImpl; - super.initAsyncedServiceImpl(serviceImpl); - } - - @Override - public void initTProcessor() { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - processor = new TSMetaService.AsyncProcessor<>(asyncServiceImpl); - } else { - processor = new TSMetaService.Processor<>(syncServiceImpl); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractRaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractRaftService.java deleted file mode 100644 index e5c1b7c79f26..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/AbstractRaftService.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.commons.exception.runtime.RPCServiceException; -import org.apache.iotdb.commons.service.ThriftService; -import org.apache.iotdb.commons.service.ThriftServiceThread; -import org.apache.iotdb.db.conf.IoTDBConfig; -import org.apache.iotdb.db.conf.IoTDBDescriptor; - -import org.apache.thrift.TBaseAsyncProcessor; - -public abstract class AbstractRaftService extends ThriftService { - - public void initThriftServiceThread( - String daemonThreadName, String clientThreadName, ThriftServiceThread.ServerType serverType) - throws IllegalAccessException { - IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig(); - try { - if (ClusterDescriptor.getInstance().getConfig().isUseAsyncServer()) { - thriftServiceThread = - new ThriftServiceThread( - (TBaseAsyncProcessor) processor, - getID().getName(), - clientThreadName, - getBindIP(), - getBindPort(), - config.getRpcMaxConcurrentClientNum(), - config.getThriftServerAwaitTimeForStopService(), - new RaftServiceHandler(), - false, - ClusterDescriptor.getInstance().getConfig().getConnectionTimeoutInMS(), - config.getThriftMaxFrameSize(), - serverType); - } else { - thriftServiceThread = - new ThriftServiceThread( - processor, - getID().getName(), - clientThreadName, - getBindIP(), - getBindPort(), - config.getRpcMaxConcurrentClientNum(), - config.getThriftServerAwaitTimeForStopService(), - new RaftServiceHandler(), - false); - } - } catch (RPCServiceException e) { - throw new IllegalAccessException(e.getMessage()); - } - thriftServiceThread.setName(daemonThreadName); - } - - @Override - public String getBindIP() { - return ClusterDescriptor.getInstance().getConfig().getInternalIp(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatService.java deleted file mode 100644 index b70851618c38..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatService.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftServiceThread; - -public class DataRaftHeartBeatService extends AbstractDataRaftService - implements DataRaftHeartBeatServiceMBean { - - private DataRaftHeartBeatService() {} - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_DATA_HEART_BEAT_RPC_SERVICE; - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - initThriftServiceThread( - ThreadName.CLUSTER_DATA_HEARTBEAT_RPC_SERVICE.getName(), - ThreadName.CLUSTER_DATA_HEARTBEAT_RPC_CLIENT.getName(), - ThriftServiceThread.ServerType.HSHA); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getInternalDataPort() - + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET; - } - - public static DataRaftHeartBeatService getInstance() { - return DataRaftHeartBeatServiceHolder.INSTANCE; - } - - private static class DataRaftHeartBeatServiceHolder { - - private static final DataRaftHeartBeatService INSTANCE = new DataRaftHeartBeatService(); - - private DataRaftHeartBeatServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatServiceMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatServiceMBean.java deleted file mode 100644 index eb4a750eeffb..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftHeartBeatServiceMBean.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -public interface DataRaftHeartBeatServiceMBean {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftService.java deleted file mode 100644 index b449730386d8..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftService.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftServiceThread; - -public class DataRaftService extends AbstractDataRaftService implements DataRaftServiceMBean { - - private DataRaftService() {} - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_DATA_RPC_SERVICE; - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - initThriftServiceThread( - ThreadName.CLUSTER_DATA_RPC_SERVICE.getName(), - ThreadName.CLUSTER_DATA_RPC_CLIENT.getName(), - ThriftServiceThread.ServerType.SELECTOR); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getInternalDataPort(); - } - - public static DataRaftService getInstance() { - return DataRaftServiceHolder.INSTANCE; - } - - private static class DataRaftServiceHolder { - - private static final DataRaftService INSTANCE = new DataRaftService(); - - private DataRaftServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftServiceMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftServiceMBean.java deleted file mode 100644 index 2f4a63820799..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/DataRaftServiceMBean.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -public interface DataRaftServiceMBean {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatService.java deleted file mode 100644 index 5166b56b2283..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatService.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftServiceThread; - -public class MetaRaftHeartBeatService extends AbstractMetaRaftService - implements MetaRaftHeartBeatServiceMBean { - - private MetaRaftHeartBeatService() {} - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_META_HEART_BEAT_RPC_SERVICE; - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - initThriftServiceThread( - ThreadName.CLUSTER_META_HEARTBEAT_RPC_SERVICE.getName(), - ThreadName.CLUSTER_META_HEARTBEAT_RPC_CLIENT.getName(), - ThriftServiceThread.ServerType.HSHA); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getInternalMetaPort() - + ClusterUtils.META_HEARTBEAT_PORT_OFFSET; - } - - public static MetaRaftHeartBeatService getInstance() { - return MetaRaftHeartBeatServiceHolder.INSTANCE; - } - - private static class MetaRaftHeartBeatServiceHolder { - - private static final MetaRaftHeartBeatService INSTANCE = new MetaRaftHeartBeatService(); - - private MetaRaftHeartBeatServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatServiceMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatServiceMBean.java deleted file mode 100644 index 8fa990495695..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftHeartBeatServiceMBean.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -public interface MetaRaftHeartBeatServiceMBean {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftService.java deleted file mode 100644 index a7eff19b7664..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftService.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.service.ThriftServiceThread; - -public class MetaRaftService extends AbstractMetaRaftService implements MetaRaftServiceMBean { - - private MetaRaftService() {} - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_META_RPC_SERVICE; - } - - @Override - public void initThriftServiceThread() throws IllegalAccessException { - initThriftServiceThread( - ThreadName.CLUSTER_META_RPC_SERVICE.getName(), - ThreadName.CLUSTER_META_RPC_CLIENT.getName(), - ThriftServiceThread.ServerType.SELECTOR); - } - - @Override - public int getBindPort() { - return ClusterDescriptor.getInstance().getConfig().getInternalMetaPort(); - } - - public static MetaRaftService getInstance() { - return MetaRaftServiceHolder.INSTANCE; - } - - private static class MetaRaftServiceHolder { - - private static final MetaRaftService INSTANCE = new MetaRaftService(); - - private MetaRaftServiceHolder() {} - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftServiceMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftServiceMBean.java deleted file mode 100644 index 4376393368a2..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/MetaRaftServiceMBean.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -public interface MetaRaftServiceMBean {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/RaftServiceHandler.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/RaftServiceHandler.java deleted file mode 100644 index e00753c83b3e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/raft/RaftServiceHandler.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.raft; - -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.server.ServerContext; -import org.apache.thrift.server.TServerEventHandler; -import org.apache.thrift.transport.TTransport; - -public class RaftServiceHandler implements TServerEventHandler { - @Override - public void preServe() {} - - @Override - public ServerContext createContext(TProtocol inputProtocol, TProtocol outputProtocol) { - return null; - } - - @Override - public void deleteContext( - ServerContext serverContext, TProtocol inputProtocol, TProtocol outputProtocol) {} - - @Override - public void processContext( - ServerContext serverContext, TTransport inputProtocol, TTransport outputProtocol) {} -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseAsyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseAsyncService.java deleted file mode 100644 index 970112f3b5fc..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseAsyncService.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; - -public abstract class BaseAsyncService implements RaftService.AsyncIface { - - RaftMember member; - String name; - - BaseAsyncService(RaftMember member) { - this.member = member; - this.name = member.getName(); - } - - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(member.processHeartbeatRequest(request)); - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(member.processElectionRequest(request)); - } - - @Override - public void appendEntry(AppendEntryRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(member.appendEntry(request)); - } catch (UnknownLogTypeException e) { - resultHandler.onError(e); - } - } - - @Override - public void appendEntries(AppendEntriesRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(member.appendEntries(request)); - } catch (Exception e) { - resultHandler.onError(e); - } - } - - @Override - public void requestCommitIndex( - RaftNode header, AsyncMethodCallback resultHandler) { - long commitIndex; - long commitTerm; - long curTerm; - synchronized (member.getTerm()) { - commitIndex = member.getLogManager().getCommitLogIndex(); - commitTerm = member.getLogManager().getCommitLogTerm(); - curTerm = member.getTerm().get(); - } - - RequestCommitIndexResponse response = - new RequestCommitIndexResponse(curTerm, commitIndex, commitTerm); - - if (commitIndex != Long.MIN_VALUE) { - resultHandler.onComplete(response); - return; - } - - member.waitLeader(); - AsyncClient client = member.getAsyncClient(member.getLeader()); - if (client == null) { - resultHandler.onError(new LeaderUnknownException(member.getAllNodes())); - return; - } - try { - client.requestCommitIndex(header, resultHandler); - } catch (TException e) { - resultHandler.onError(e); - } - } - - @Override - public void readFile( - String filePath, long offset, int length, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(IOUtils.readFile(filePath, offset, length)); - } catch (IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void removeHardLink(String hardLinkPath, AsyncMethodCallback resultHandler) { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - resultHandler.onComplete(null); - } catch (IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void matchTerm( - long index, long term, RaftNode header, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(member.matchLog(index, term)); - } - - @Override - public void executeNonQueryPlan( - ExecutNonQueryReq request, AsyncMethodCallback resultHandler) { - if (member.getCharacter() != NodeCharacter.LEADER) { - // forward the plan to the leader - AsyncClient client = member.getAsyncClient(member.getLeader()); - if (client != null) { - try { - client.executeNonQueryPlan(request, resultHandler); - } catch (TException e) { - resultHandler.onError(e); - } - } else { - resultHandler.onComplete(StatusUtils.NO_LEADER); - } - return; - } - - try { - TSStatus status = member.executeNonQueryPlan(request); - resultHandler.onComplete( - StatusUtils.getStatus( - status, - new TEndPoint( - member.getThisNode().getClientIp(), member.getThisNode().getClientPort()))); - } catch (Exception e) { - resultHandler.onError(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseSyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseSyncService.java deleted file mode 100644 index 105194c02c97..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/BaseSyncService.java +++ /dev/null @@ -1,181 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.BufferUnderflowException; -import java.nio.ByteBuffer; -import java.nio.file.Files; - -public abstract class BaseSyncService implements RaftService.Iface { - - private static final Logger logger = LoggerFactory.getLogger(BaseSyncService.class); - RaftMember member; - String name; - - BaseSyncService(RaftMember member) { - this.member = member; - this.name = member.getName(); - } - - @Override - public HeartBeatResponse sendHeartbeat(HeartBeatRequest request) { - return member.processHeartbeatRequest(request); - } - - @Override - public long startElection(ElectionRequest request) { - return member.processElectionRequest(request); - } - - @Override - public long appendEntry(AppendEntryRequest request) throws TException { - try { - return member.appendEntry(request); - } catch (UnknownLogTypeException e) { - throw new TException(e); - } - } - - @Override - public long appendEntries(AppendEntriesRequest request) throws TException { - try { - return member.appendEntries(request); - } catch (BufferUnderflowException e) { - logger.error( - "Underflow buffers {} of logs from {}", - request.getEntries(), - request.getPrevLogIndex() + 1); - throw new TException(e); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public RequestCommitIndexResponse requestCommitIndex(RaftNode header) throws TException { - - long commitIndex; - long commitTerm; - long curTerm; - synchronized (member.getTerm()) { - commitIndex = member.getLogManager().getCommitLogIndex(); - commitTerm = member.getLogManager().getCommitLogTerm(); - curTerm = member.getTerm().get(); - } - - RequestCommitIndexResponse response = - new RequestCommitIndexResponse(curTerm, commitIndex, commitTerm); - - if (commitIndex != Long.MIN_VALUE) { - return response; - } - - member.waitLeader(); - Client client = member.getSyncClient(member.getLeader()); - if (client == null) { - throw new TException(new LeaderUnknownException(member.getAllNodes())); - } - try { - response = client.requestCommitIndex(header); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } finally { - ClientUtils.putBackSyncClient(client); - } - return response; - } - - @Override - public ByteBuffer readFile(String filePath, long offset, int length) throws TException { - try { - return IOUtils.readFile(filePath, offset, length); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public void removeHardLink(String hardLinkPath) throws TException { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public boolean matchTerm(long index, long term, RaftNode header) { - return member.matchLog(index, term); - } - - @Override - public TSStatus executeNonQueryPlan(ExecutNonQueryReq request) throws TException { - if (member.getCharacter() != NodeCharacter.LEADER) { - // forward the plan to the leader - Client client = member.getSyncClient(member.getLeader()); - if (client != null) { - TSStatus status; - try { - status = client.executeNonQueryPlan(request); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } finally { - ClientUtils.putBackSyncClient(client); - } - return status; - } else { - return StatusUtils.NO_LEADER; - } - } - - try { - return member.executeNonQueryPlan(request); - } catch (Exception e) { - throw new TException(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataAsyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataAsyncService.java deleted file mode 100644 index 63b28aa98eef..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataAsyncService.java +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.ReaderNotFoundException; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.service.IoTDB; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class DataAsyncService extends BaseAsyncService implements TSDataService.AsyncIface { - - private static final Logger logger = LoggerFactory.getLogger(DataAsyncService.class); - private DataGroupMember dataGroupMember; - - public DataAsyncService(DataGroupMember member) { - super(member); - this.dataGroupMember = member; - } - - @Override - public void sendSnapshot(SendSnapshotRequest request, AsyncMethodCallback resultHandler) { - try { - dataGroupMember.receiveSnapshot(request); - resultHandler.onComplete(null); - } catch (Exception e) { - resultHandler.onError(e); - } - } - - @Override - public void pullSnapshot( - PullSnapshotRequest request, AsyncMethodCallback resultHandler) { - PullSnapshotResp pullSnapshotResp = null; - try { - pullSnapshotResp = dataGroupMember.getSnapshot(request); - } catch (IOException e) { - resultHandler.onError(e); - } - if (pullSnapshotResp == null) { - forwardPullSnapshot(request, resultHandler); - } else { - resultHandler.onComplete(pullSnapshotResp); - } - } - - private void forwardPullSnapshot( - PullSnapshotRequest request, AsyncMethodCallback resultHandler) { - // if this node has been set readOnly, then it must have been synchronized with the leader - // otherwise forward the request to the leader - if (dataGroupMember.getLeader() != null - && !ClusterConstant.EMPTY_NODE.equals(dataGroupMember.getLeader())) { - logger.debug( - "{} forwarding a pull snapshot request to the leader {}", - name, - dataGroupMember.getLeader()); - AsyncDataClient client = - (AsyncDataClient) dataGroupMember.getAsyncClient(dataGroupMember.getLeader()); - try { - client.pullSnapshot(request, resultHandler); - } catch (TException e) { - resultHandler.onError(e); - } - } else { - resultHandler.onError(new LeaderUnknownException(dataGroupMember.getAllNodes())); - } - } - - /** - * forward the request to the leader - * - * @param request pull schema request - * @param resultHandler result handler - */ - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - if (dataGroupMember.getCharacter() == NodeCharacter.LEADER) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().queryTimeSeriesSchema(request)); - return; - } catch (CheckConsistencyException | MetadataException e) { - // maybe the partition table of this node is not up-to-date, try again after updating - // partition table - try { - dataGroupMember.getMetaGroupMember().syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().queryTimeSeriesSchema(request)); - return; - } catch (CheckConsistencyException | MetadataException ex) { - resultHandler.onError(ex); - } - } - } - - // forward the request to the leader - AsyncDataClient leaderClient = getLeaderClient(); - if (leaderClient == null) { - resultHandler.onError(new LeaderUnknownException(dataGroupMember.getAllNodes())); - return; - } - try { - leaderClient.pullTimeSeriesSchema(request, resultHandler); - } catch (TException e1) { - resultHandler.onError(e1); - } - } - - private AsyncDataClient getLeaderClient() { - dataGroupMember.waitLeader(); - return (AsyncDataClient) dataGroupMember.getAsyncClient(dataGroupMember.getLeader()); - } - - /** - * forward the request to the leader - * - * @param request pull schema request - * @param resultHandler result handler - */ - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - if (dataGroupMember.getCharacter() == NodeCharacter.LEADER) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().queryMeasurementSchema(request)); - return; - } catch (CheckConsistencyException | MetadataException e) { - // maybe the partition table of this node is not up-to-date, try again after updating - // partition table - try { - dataGroupMember.getMetaGroupMember().syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().queryMeasurementSchema(request)); - return; - } catch (CheckConsistencyException | MetadataException ex) { - resultHandler.onError(ex); - } - } - } - - // forward the request to the leader - AsyncDataClient leaderClient = getLeaderClient(); - if (leaderClient == null) { - resultHandler.onError(new LeaderUnknownException(dataGroupMember.getAllNodes())); - return; - } - try { - leaderClient.pullMeasurementSchema(request, resultHandler); - } catch (TException e1) { - resultHandler.onError(e1); - } - } - - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().querySingleSeries(request)); - } catch (Exception e) { - resultHandler.onError(e); - } - } - - @Override - public void queryMultSeries( - MultSeriesQueryRequest request, AsyncMethodCallback resultHandler) throws TException { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().queryMultSeries(request)); - } catch (Exception e) { - resultHandler.onError(e); - } - } - - @Override - public void querySingleSeriesByTimestamp( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().querySingleSeriesByTimestamp(request)); - } catch (Exception e) { - resultHandler.onError(e); - } - } - - @Override - public void endQuery( - RaftNode header, Node requester, long queryId, AsyncMethodCallback resultHandler) { - try { - dataGroupMember.getQueryManager().endQuery(requester, queryId); - resultHandler.onComplete(null); - } catch (StorageEngineException e) { - resultHandler.onError(e); - } - } - - @Override - public void fetchSingleSeries( - RaftNode header, long readerId, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().fetchSingleSeries(readerId)); - } catch (ReaderNotFoundException | IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void fetchMultSeries( - RaftNode header, - long readerId, - List paths, - AsyncMethodCallback> resultHandler) - throws TException { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().fetchMultSeries(readerId, paths)); - } catch (ReaderNotFoundException | IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void fetchSingleSeriesByTimestamps( - RaftNode header, - long readerId, - List timestamps, - AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember - .getLocalQueryExecutor() - .fetchSingleSeriesByTimestamps( - readerId, timestamps.stream().mapToLong(k -> k).toArray(), timestamps.size())); - } catch (ReaderNotFoundException | IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void getAllPaths( - RaftNode header, - List paths, - boolean withAlias, - AsyncMethodCallback resultHandler) { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - ((CSchemaProcessor) IoTDB.schemaProcessor).getAllPaths(paths, withAlias)); - } catch (MetadataException | CheckConsistencyException e) { - resultHandler.onError(e); - } - } - - @Override - public void getAllDevices( - RaftNode header, - List path, - boolean isPrefixMatch, - AsyncMethodCallback> resultHandler) { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - ((CSchemaProcessor) IoTDB.schemaProcessor).getAllDevices(path, isPrefixMatch)); - } catch (MetadataException | CheckConsistencyException e) { - resultHandler.onError(e); - } - } - - @Override - public void getDevices( - RaftNode header, ByteBuffer planBinary, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().getDevices(planBinary)); - } catch (CheckConsistencyException | IOException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getNodeList( - RaftNode header, - String path, - int nodeLevel, - AsyncMethodCallback> resultHandler) { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - ((CSchemaProcessor) IoTDB.schemaProcessor).getNodeList(path, nodeLevel)); - } catch (CheckConsistencyException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getChildNodeInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - ((CSchemaProcessor) IoTDB.schemaProcessor).getChildNodeInNextLevel(path)); - } catch (CheckConsistencyException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getChildNodePathInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - resultHandler.onComplete( - ((CSchemaProcessor) IoTDB.schemaProcessor).getChildNodePathInNextLevel(path)); - } catch (CheckConsistencyException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getAllMeasurementSchema( - MeasurementSchemaRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().getAllMeasurementSchema(request)); - } catch (CheckConsistencyException | IOException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getAggrResult( - GetAggrResultRequest request, AsyncMethodCallback> resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().getAggrResult(request)); - } catch (StorageEngineException | QueryProcessException | IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void getUnregisteredTimeseries( - RaftNode header, - List timeseriesList, - AsyncMethodCallback> resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().getUnregisteredTimeseries(timeseriesList)); - } catch (CheckConsistencyException e) { - resultHandler.onError(e); - } - } - - @Override - public void getGroupByExecutor(GroupByRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().getGroupByExecutor(request)); - } catch (QueryProcessException | StorageEngineException e) { - resultHandler.onError(e); - } - } - - @Override - public void getGroupByResult( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback> resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().getGroupByResult(executorId, startTime, endTime)); - } catch (ReaderNotFoundException | IOException | QueryProcessException e) { - resultHandler.onError(e); - } - } - - @Override - public void previousFill( - PreviousFillRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().previousFill(request)); - } catch (QueryProcessException - | StorageEngineException - | IOException - | IllegalPathException e) { - resultHandler.onError(e); - } - } - - @Override - public void last(LastQueryRequest request, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(dataGroupMember.getLocalQueryExecutor().last(request)); - } catch (CheckConsistencyException - | QueryProcessException - | IOException - | StorageEngineException - | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getPathCount( - RaftNode header, - List pathsToQuery, - int level, - AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().getPathCount(pathsToQuery, level)); - } catch (CheckConsistencyException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void getDeviceCount( - RaftNode header, List pathsToQuery, AsyncMethodCallback resultHandler) - throws TException { - try { - resultHandler.onComplete( - dataGroupMember.getLocalQueryExecutor().getDeviceCount(pathsToQuery)); - } catch (CheckConsistencyException | MetadataException e) { - resultHandler.onError(e); - } - } - - @Override - public void onSnapshotApplied( - RaftNode header, List slots, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(dataGroupMember.onSnapshotInstalled(slots)); - } - - @Override - public void peekNextNotNullValue( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete( - dataGroupMember - .getLocalQueryExecutor() - .peekNextNotNullValue(executorId, startTime, endTime)); - } catch (ReaderNotFoundException | IOException e) { - resultHandler.onError(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngine.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngine.java deleted file mode 100644 index c52ca1b5a5ab..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngine.java +++ /dev/null @@ -1,510 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.NoHeaderNodeException; -import org.apache.iotdb.cluster.exception.NotInSameGroupException; -import org.apache.iotdb.cluster.exception.PartitionTableUnavailableException; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.StoppedMemberManager; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.monitor.NodeReport.DataMemberReport; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.service.IService; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.commons.utils.TestOnly; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TProtocolFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; - -public class DataGroupEngine implements IService, DataGroupEngineMBean { - - private static final Logger logger = LoggerFactory.getLogger(DataGroupEngine.class); - // key: the header of a data group, value: the member representing this node in this group and - // it is currently at service - private static final Map headerGroupMap = new ConcurrentHashMap<>(); - private static final Map asyncServiceMap = new ConcurrentHashMap<>(); - private static final Map syncServiceMap = new ConcurrentHashMap<>(); - // key: the header of a data group, value: the member representing this node in this group but - // it is out of service because another node has joined the group and expelled this node, or - // the node itself is removed, but it is still stored to provide snapshot for other nodes - private final StoppedMemberManager stoppedMemberManager; - private PartitionTable partitionTable; - private DataGroupMember.Factory dataMemberFactory; - private static MetaGroupMember metaGroupMember; - private final Node thisNode = ClusterIoTDB.getInstance().getThisNode(); - private static TProtocolFactory protocolFactory; - - private DataGroupEngine() { - dataMemberFactory = new DataGroupMember.Factory(protocolFactory, metaGroupMember); - stoppedMemberManager = new StoppedMemberManager(dataMemberFactory); - } - - public static DataGroupEngine getInstance() { - if (metaGroupMember == null || protocolFactory == null) { - logger.error("MetaGroupMember or protocolFactory init failed."); - } - return InstanceHolder.Instance; - } - - @TestOnly - public void resetFactory() { - dataMemberFactory = new DataGroupMember.Factory(protocolFactory, metaGroupMember); - } - - @TestOnly - public DataGroupEngine( - DataGroupMember.Factory dataMemberFactory, MetaGroupMember metaGroupMember) { - DataGroupEngine.metaGroupMember = metaGroupMember; - this.dataMemberFactory = dataMemberFactory; - this.stoppedMemberManager = new StoppedMemberManager(dataMemberFactory); - } - - @Override - public void start() throws StartupException {} - - @Override - public void stop() { - closeLogManagers(); - for (DataGroupMember member : headerGroupMap.values()) { - member.stop(); - } - } - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_DATA_ENGINE; - } - - public void closeLogManagers() { - for (DataGroupMember member : headerGroupMap.values()) { - member.closeLogManager(); - } - } - - public DataAsyncService getDataAsyncService( - RaftNode header, AsyncMethodCallback resultHandler, Object request) { - return asyncServiceMap.computeIfAbsent( - header, - h -> { - DataGroupMember dataMember = getDataMember(header, resultHandler, request); - return dataMember != null ? new DataAsyncService(dataMember) : null; - }); - } - - public DataSyncService getDataSyncService(RaftNode header) { - return syncServiceMap.computeIfAbsent( - header, - h -> { - DataGroupMember dataMember = getDataMember(header, null, null); - return dataMember != null ? new DataSyncService(dataMember) : null; - }); - } - - /** - * Add a DataGroupMember into this server, if a member with the same header exists, the old member - * will be stopped and replaced by the new one. - */ - public DataGroupMember addDataGroupMember(DataGroupMember dataGroupMember, RaftNode header) { - synchronized (headerGroupMap) { - // TODO this method won't update headerMap if a new dataGroupMember comes with the same - // header. - if (headerGroupMap.containsKey(header)) { - logger.debug("Group {} already exist.", dataGroupMember.getAllNodes()); - return headerGroupMap.get(header); - } - stoppedMemberManager.remove(header); - headerGroupMap.put(header, dataGroupMember); - - dataGroupMember.start(); - } - logger.info("Add group {} successfully.", dataGroupMember.getName()); - resetServiceCache(header); // avoid dead-lock - - return dataGroupMember; - } - - private void resetServiceCache(RaftNode header) { - asyncServiceMap.remove(header); - syncServiceMap.remove(header); - } - - /** - * @param header the header of the group which the local node is in - * @param resultHandler can be set to null if the request is an internal request - * @param request the toString() of this parameter should explain what the request is and it is - * only used in logs for tracing - * @return - */ - public DataGroupMember getDataMember( - RaftNode header, AsyncMethodCallback resultHandler, Object request) { - // if the resultHandler is not null, then the request is a external one and must be with a - // header - if (header.getNode() == null) { - if (resultHandler != null) { - resultHandler.onError(new NoHeaderNodeException()); - } - return null; - } - DataGroupMember member = stoppedMemberManager.get(header); - if (member != null) { - return member; - } - - // avoid creating two members for a header - Exception ex = null; - member = headerGroupMap.get(header); - if (member != null) { - return member; - } - logger.info("Received a request \"{}\" from unregistered header {}", request, header); - if (partitionTable != null) { - try { - member = createNewMember(header); - } catch (NotInSameGroupException | CheckConsistencyException e) { - ex = e; - } - } else { - logger.info("Partition is not ready, cannot create member"); - ex = new PartitionTableUnavailableException(thisNode); - } - if (ex != null && resultHandler != null) { - resultHandler.onError(ex); - } - return member; - } - - /** - * @return A DataGroupMember representing this node in the data group of the header. - * @throws NotInSameGroupException If this node is not in the group of the header. - */ - private DataGroupMember createNewMember(RaftNode header) - throws NotInSameGroupException, CheckConsistencyException { - PartitionGroup partitionGroup; - partitionGroup = partitionTable.getPartitionGroup(header); - if (partitionGroup == null || !partitionGroup.contains(thisNode)) { - // if the partition table is old, this node may have not been moved to the new group - metaGroupMember.syncLeaderWithConsistencyCheck(true); - partitionGroup = partitionTable.getPartitionGroup(header); - } - DataGroupMember member; - synchronized (headerGroupMap) { - member = headerGroupMap.get(header); - if (member != null) { - return member; - } - if (partitionGroup != null && partitionGroup.contains(thisNode)) { - // the two nodes are in the same group, create a new data member - member = dataMemberFactory.create(partitionGroup); - headerGroupMap.put(header, member); - stoppedMemberManager.remove(header); - logger.info("Created a member for header {}, group is {}", header, partitionGroup); - member.start(); - } else { - // the member may have been stopped after syncLeader - member = stoppedMemberManager.get(header); - if (member != null) { - return member; - } - logger.info( - "This node {} does not belong to the group {}, header {}", - thisNode, - partitionGroup, - header); - throw new NotInSameGroupException(partitionGroup, thisNode); - } - } - return member; - } - - public void preAddNodeForDataGroup(AddNodeLog log, DataGroupMember targetDataGroupMember) { - - // Make sure the previous add/remove node log has applied - metaGroupMember.syncLocalApply(log.getMetaLogIndex() - 1, false); - - // Check the validity of the partition table - if (!metaGroupMember.getPartitionTable().deserialize(log.getPartitionTable())) { - return; - } - - targetDataGroupMember.preAddNode(log.getNewNode()); - } - - /** - * Try adding the node into the group of each DataGroupMember, and if the DataGroupMember no - * longer stays in that group, also remove and stop it. If the new group contains this node, also - * create and add a new DataGroupMember for it. - */ - public void addNode(Node node, NodeAdditionResult result) { - // If the node executed adding itself to the cluster, it's unnecessary to add new groups because - // they already exist. - if (node.equals(thisNode)) { - return; - } - Iterator> entryIterator = headerGroupMap.entrySet().iterator(); - synchronized (headerGroupMap) { - while (entryIterator.hasNext()) { - Entry entry = entryIterator.next(); - DataGroupMember dataGroupMember = entry.getValue(); - // the member may be extruded from the group, remove and stop it if so - boolean shouldLeave = dataGroupMember.addNode(node, result); - if (shouldLeave) { - logger.info("This node does not belong to {} any more", dataGroupMember.getAllNodes()); - removeMember(entry.getKey(), entry.getValue(), false); - entryIterator.remove(); - } - } - - if (logger.isDebugEnabled()) { - logger.debug( - "Data cluster server: start to handle new groups when adding new node {}", node); - } - for (PartitionGroup newGroup : result.getNewGroupList()) { - if (newGroup.contains(thisNode)) { - RaftNode header = newGroup.getHeader(); - logger.info("Adding this node into a new group {}", newGroup); - DataGroupMember dataGroupMember = dataMemberFactory.create(newGroup); - dataGroupMember = addDataGroupMember(dataGroupMember, header); - dataGroupMember.pullNodeAdditionSnapshots( - ((SlotPartitionTable) partitionTable).getNodeSlots(header), node); - } - } - } - } - - /** - * When the node joins a cluster, it also creates a new data group and a corresponding member - * which has no data. This is to make that member pull data from other nodes. - */ - public void pullSnapshots() { - for (int raftId = 0; - raftId < ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor(); - raftId++) { - RaftNode raftNode = new RaftNode(thisNode, raftId); - List slots = ((SlotPartitionTable) partitionTable).getNodeSlots(raftNode); - DataGroupMember dataGroupMember = headerGroupMap.get(raftNode); - dataGroupMember.pullNodeAdditionSnapshots(slots, thisNode); - } - } - - /** Make sure the group will not receive new raft logs. */ - private void removeMember( - RaftNode header, DataGroupMember dataGroupMember, boolean removedGroup) { - dataGroupMember.setReadOnly(); - if (!removedGroup) { - dataGroupMember.stop(); - } else { - if (dataGroupMember.getCharacter() != NodeCharacter.LEADER) { - new Thread( - () -> { - try { - dataGroupMember.syncLeader(null); - dataGroupMember.stop(); - } catch (CheckConsistencyException e) { - logger.warn("Failed to check consistency.", e); - } - }) - .start(); - } - } - stoppedMemberManager.put(header, dataGroupMember); - logger.info( - "Data group member has removed, header {}, group is {}.", - header, - dataGroupMember.getAllNodes()); - } - - /** - * Set the partition table as the in-use one and build a DataGroupMember for each local group (the - * group which the local node is in) and start them. - */ - @SuppressWarnings("java:S1135") - public void buildDataGroupMembers(PartitionTable partitionTable) { - setPartitionTable(partitionTable); - // TODO-Cluster: if there are unchanged members, do not stop and restart them - // clear previous members if the partition table is reloaded - for (DataGroupMember value : headerGroupMap.values()) { - value.stop(); - } - - for (DataGroupMember value : headerGroupMap.values()) { - value.setUnchanged(false); - } - - List partitionGroups = partitionTable.getLocalGroups(); - for (PartitionGroup partitionGroup : partitionGroups) { - RaftNode header = partitionGroup.getHeader(); - DataGroupMember prevMember = headerGroupMap.get(header); - if (prevMember == null || !prevMember.getAllNodes().equals(partitionGroup)) { - logger.info("Building member of data group: {}", partitionGroup); - // no previous member or member changed - DataGroupMember dataGroupMember = dataMemberFactory.create(partitionGroup); - // the previous member will be replaced here - addDataGroupMember(dataGroupMember, header); - dataGroupMember.setUnchanged(true); - } else { - prevMember.setUnchanged(true); - prevMember.start(); - // TODO do we nedd call other functions in addDataGroupMember() ? - } - } - - // remove out-dated members of this node - headerGroupMap.entrySet().removeIf(e -> !e.getValue().isUnchanged()); - - logger.info("Data group members are ready"); - } - - public void preRemoveNodeForDataGroup(RemoveNodeLog log, DataGroupMember targetDataGroupMember) { - - // Make sure the previous add/remove node log has applied - metaGroupMember.syncLocalApply(log.getMetaLogIndex() - 1, false); - - // Check the validity of the partition table - if (!metaGroupMember.getPartitionTable().deserialize(log.getPartitionTable())) { - return; - } - - logger.debug( - "Pre removing a node {} from {}", - log.getRemovedNode(), - targetDataGroupMember.getAllNodes()); - targetDataGroupMember.preRemoveNode(log.getRemovedNode()); - } - - /** - * Try removing a node from the groups of each DataGroupMember. If the node is the header of some - * group, set the member to read only so that it can still provide data for other nodes that has - * not yet pulled its data. Otherwise, just change the node list of the member and pull new data. - * And create a new DataGroupMember if this node should join a new group because of this removal. - * - * @param node - * @param removalResult cluster changes due to the node removal - */ - public void removeNode(Node node, NodeRemovalResult removalResult) { - Iterator> entryIterator = headerGroupMap.entrySet().iterator(); - synchronized (headerGroupMap) { - while (entryIterator.hasNext()) { - Entry entry = entryIterator.next(); - DataGroupMember dataGroupMember = entry.getValue(); - if (dataGroupMember.getHeader().getNode().equals(node) || node.equals(thisNode)) { - entryIterator.remove(); - removeMember( - entry.getKey(), dataGroupMember, dataGroupMember.getHeader().getNode().equals(node)); - } else { - // the group should be updated - dataGroupMember.removeNode(node); - } - } - - if (logger.isDebugEnabled()) { - logger.debug( - "Data cluster server: start to handle new groups and pulling data when removing node {}", - node); - } - // if the removed group contains the local node, the local node should join a new group to - // preserve the replication number - for (PartitionGroup group : partitionTable.getLocalGroups()) { - RaftNode header = group.getHeader(); - if (!headerGroupMap.containsKey(header)) { - logger.info("{} should join a new group {}", thisNode, group); - DataGroupMember dataGroupMember = dataMemberFactory.create(group); - addDataGroupMember(dataGroupMember, header); - } - // pull new slots from the removed node - headerGroupMap.get(header).pullSlots(removalResult); - } - } - } - - public void setPartitionTable(PartitionTable partitionTable) { - this.partitionTable = partitionTable; - } - - /** @return The reports of every DataGroupMember in this node. */ - public List genMemberReports() { - List dataMemberReports = new ArrayList<>(); - for (DataGroupMember value : headerGroupMap.values()) { - - dataMemberReports.add(value.genReport()); - } - return dataMemberReports; - } - - public Map getHeaderGroupMap() { - return headerGroupMap; - } - - public static void setProtocolFactory(TProtocolFactory protocolFactory) { - DataGroupEngine.protocolFactory = protocolFactory; - } - - public static void setMetaGroupMember(MetaGroupMember metaGroupMember) { - DataGroupEngine.metaGroupMember = metaGroupMember; - } - - private static class InstanceHolder { - private InstanceHolder() {} - - private static final DataGroupEngine Instance = new DataGroupEngine(); - } - - @Override - public String getHeaderGroupMapAsString() { - return headerGroupMap.toString(); - } - - @Override - public int getAsyncServiceMapSize() { - return asyncServiceMap.size(); - } - - @Override - public int getSyncServiceMapSize() { - return syncServiceMap.size(); - } - - @Override - public String getPartitionTable() { - return partitionTable.toString(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngineMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngineMBean.java deleted file mode 100644 index b7bfccdef468..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupEngineMBean.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -public interface DataGroupEngineMBean { - - String getHeaderGroupMapAsString(); - - int getAsyncServiceMapSize(); - - int getSyncServiceMapSize(); - - String getPartitionTable(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupServiceImpls.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupServiceImpls.java deleted file mode 100644 index adad811823b6..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataGroupServiceImpls.java +++ /dev/null @@ -1,749 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class DataGroupServiceImpls implements TSDataService.AsyncIface, TSDataService.Iface { - - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.sendHeartbeat(request, resultHandler); - } - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.startElection(request, resultHandler); - } - } - - @Override - public void appendEntries(AppendEntriesRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.appendEntries(request, resultHandler); - } - } - - @Override - public void appendEntry(AppendEntryRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.appendEntry(request, resultHandler); - } - } - - @Override - public void sendSnapshot(SendSnapshotRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.sendSnapshot(request, resultHandler); - } - } - - @Override - public void pullSnapshot( - PullSnapshotRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.pullSnapshot(request, resultHandler); - } - } - - @Override - public void executeNonQueryPlan( - ExecutNonQueryReq request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.executeNonQueryPlan(request, resultHandler); - } - } - - @Override - public void requestCommitIndex( - RaftNode header, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Request commit index"); - if (service != null) { - service.requestCommitIndex(header, resultHandler); - } - } - - @Override - public void readFile( - String filePath, long offset, int length, AsyncMethodCallback resultHandler) { - try { - resultHandler.onComplete(IOUtils.readFile(filePath, offset, length)); - } catch (IOException e) { - resultHandler.onError(e); - } - } - - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService( - request.getHeader(), resultHandler, "Query series:" + request.getPath()); - if (service != null) { - service.querySingleSeries(request, resultHandler); - } - } - - @Override - public void queryMultSeries( - MultSeriesQueryRequest request, AsyncMethodCallback resultHandler) throws TException { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService( - request.getHeader(), resultHandler, "Query series:" + request.getPath()); - if (service != null) { - service.queryMultSeries(request, resultHandler); - } - } - - @Override - public void fetchSingleSeries( - RaftNode header, long readerId, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Fetch reader:" + readerId); - if (service != null) { - service.fetchSingleSeries(header, readerId, resultHandler); - } - } - - @Override - public void fetchMultSeries( - RaftNode header, - long readerId, - List paths, - AsyncMethodCallback> resultHandler) - throws TException { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Fetch reader:" + readerId); - if (service != null) { - service.fetchMultSeries(header, readerId, paths, resultHandler); - } - } - - @Override - public void getAllPaths( - RaftNode header, - List paths, - boolean withAlias, - AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Find path:" + paths); - if (service != null) { - service.getAllPaths(header, paths, withAlias, resultHandler); - } - } - - @Override - public void endQuery( - RaftNode header, Node thisNode, long queryId, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "End query"); - if (service != null) { - service.endQuery(header, thisNode, queryId, resultHandler); - } - } - - @Override - public void querySingleSeriesByTimestamp( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService( - request.getHeader(), - resultHandler, - "Query by timestamp:" - + request.getQueryId() - + "#" - + request.getPath() - + " of " - + request.getRequester()); - if (service != null) { - service.querySingleSeriesByTimestamp(request, resultHandler); - } - } - - @Override - public void fetchSingleSeriesByTimestamps( - RaftNode header, - long readerId, - List timestamps, - AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Fetch by timestamp:" + readerId); - if (service != null) { - service.fetchSingleSeriesByTimestamps(header, readerId, timestamps, resultHandler); - } - } - - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.pullTimeSeriesSchema(request, resultHandler); - } - } - - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, "Pull measurement schema"); - if (service != null) { - service.pullMeasurementSchema(request, resultHandler); - } - } - - @Override - public void getAllDevices( - RaftNode header, - List paths, - boolean isPrefixMatch, - AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "Get all devices"); - if (service != null) { - service.getAllDevices(header, paths, isPrefixMatch, resultHandler); - } - } - - @Override - public void getDevices( - RaftNode header, ByteBuffer planBinary, AsyncMethodCallback resultHandler) - throws TException { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "get devices"); - if (service != null) { - service.getDevices(header, planBinary, resultHandler); - } - } - - @Override - public void getNodeList( - RaftNode header, - String path, - int nodeLevel, - AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "Get node list"); - if (service != null) { - service.getNodeList(header, path, nodeLevel, resultHandler); - } - } - - @Override - public void getChildNodeInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Get child node in next level"); - if (service != null) { - service.getChildNodeInNextLevel(header, path, resultHandler); - } - } - - @Override - public void getChildNodePathInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Get child node path in next level"); - if (service != null) { - service.getChildNodePathInNextLevel(header, path, resultHandler); - } - } - - @Override - public void getAllMeasurementSchema( - MeasurementSchemaRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, "Get all measurement schema"); - if (service != null) { - service.getAllMeasurementSchema(request, resultHandler); - } - } - - @Override - public void getAggrResult( - GetAggrResultRequest request, AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.getAggrResult(request, resultHandler); - } - } - - @Override - public void getUnregisteredTimeseries( - RaftNode header, - List timeseriesList, - AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Check if measurements are registered"); - if (service != null) { - service.getUnregisteredTimeseries(header, timeseriesList, resultHandler); - } - } - - @Override - public void getGroupByExecutor(GroupByRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.getGroupByExecutor(request, resultHandler); - } - } - - @Override - public void getGroupByResult( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback> resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "Fetch group by"); - if (service != null) { - service.getGroupByResult(header, executorId, startTime, endTime, resultHandler); - } - } - - @Override - public void previousFill( - PreviousFillRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, request); - if (service != null) { - service.previousFill(request, resultHandler); - } - } - - @Override - public void matchTerm( - long index, long term, RaftNode header, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "Match term"); - if (service != null) { - service.matchTerm(index, term, header, resultHandler); - } - } - - @Override - public void last(LastQueryRequest request, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(request.getHeader(), resultHandler, "last"); - if (service != null) { - service.last(request, resultHandler); - } - } - - @Override - public void getPathCount( - RaftNode header, - List pathsToQuery, - int level, - AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "count path"); - if (service != null) { - service.getPathCount(header, pathsToQuery, level, resultHandler); - } - } - - @Override - public void getDeviceCount( - RaftNode header, List pathsToQuery, AsyncMethodCallback resultHandler) - throws TException { - DataAsyncService service = - DataGroupEngine.getInstance().getDataAsyncService(header, resultHandler, "count device"); - if (service != null) { - service.getDeviceCount(header, pathsToQuery, resultHandler); - } - } - - @Override - public void onSnapshotApplied( - RaftNode header, List slots, AsyncMethodCallback resultHandler) { - DataAsyncService service = - DataGroupEngine.getInstance() - .getDataAsyncService(header, resultHandler, "Snapshot applied"); - if (service != null) { - service.onSnapshotApplied(header, slots, resultHandler); - } - } - - @Override - public long querySingleSeries(SingleSeriesQueryRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .querySingleSeries(request); - } - - @Override - public long queryMultSeries(MultSeriesQueryRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .queryMultSeries(request); - } - - @Override - public ByteBuffer fetchSingleSeries(RaftNode header, long readerId) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .fetchSingleSeries(header, readerId); - } - - @Override - public Map fetchMultSeries(RaftNode header, long readerId, List paths) - throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .fetchMultSeries(header, readerId, paths); - } - - @Override - public long querySingleSeriesByTimestamp(SingleSeriesQueryRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .querySingleSeriesByTimestamp(request); - } - - @Override - public ByteBuffer fetchSingleSeriesByTimestamps( - RaftNode header, long readerId, List timestamps) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .fetchSingleSeriesByTimestamps(header, readerId, timestamps); - } - - @Override - public void endQuery(RaftNode header, Node thisNode, long queryId) throws TException { - DataGroupEngine.getInstance().getDataSyncService(header).endQuery(header, thisNode, queryId); - } - - @Override - public GetAllPathsResult getAllPaths(RaftNode header, List path, boolean withAlias) - throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getAllPaths(header, path, withAlias); - } - - @Override - public Set getAllDevices(RaftNode header, List path, boolean isPrefixMatch) - throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getAllDevices(header, path, isPrefixMatch); - } - - @Override - public List getNodeList(RaftNode header, String path, int nodeLevel) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getNodeList(header, path, nodeLevel); - } - - @Override - public Set getChildNodeInNextLevel(RaftNode header, String path) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getChildNodeInNextLevel(header, path); - } - - @Override - public Set getChildNodePathInNextLevel(RaftNode header, String path) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getChildNodePathInNextLevel(header, path); - } - - @Override - public ByteBuffer getAllMeasurementSchema(MeasurementSchemaRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .getAllMeasurementSchema(request); - } - - @Override - public ByteBuffer getDevices(RaftNode header, ByteBuffer planBinary) throws TException { - return DataGroupEngine.getInstance().getDataSyncService(header).getDevices(header, planBinary); - } - - @Override - public List getAggrResult(GetAggrResultRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .getAggrResult(request); - } - - @Override - public List getUnregisteredTimeseries(RaftNode header, List timeseriesList) - throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getUnregisteredTimeseries(header, timeseriesList); - } - - @Override - public PullSnapshotResp pullSnapshot(PullSnapshotRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .pullSnapshot(request); - } - - @Override - public long getGroupByExecutor(GroupByRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .getGroupByExecutor(request); - } - - @Override - public List getGroupByResult( - RaftNode header, long executorId, long startTime, long endTime) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getGroupByResult(header, executorId, startTime, endTime); - } - - @Override - public PullSchemaResp pullTimeSeriesSchema(PullSchemaRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .pullTimeSeriesSchema(request); - } - - @Override - public PullSchemaResp pullMeasurementSchema(PullSchemaRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .pullMeasurementSchema(request); - } - - @Override - public ByteBuffer previousFill(PreviousFillRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .previousFill(request); - } - - @Override - public ByteBuffer last(LastQueryRequest request) throws TException { - return DataGroupEngine.getInstance().getDataSyncService(request.getHeader()).last(request); - } - - @Override - public int getPathCount(RaftNode header, List pathsToQuery, int level) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getPathCount(header, pathsToQuery, level); - } - - @Override - public boolean onSnapshotApplied(RaftNode header, List slots) { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .onSnapshotApplied(header, slots); - } - - @Override - public int getDeviceCount(RaftNode header, List pathsToQuery) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .getDeviceCount(header, pathsToQuery); - } - - @Override - public HeartBeatResponse sendHeartbeat(HeartBeatRequest request) { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .sendHeartbeat(request); - } - - @Override - public long startElection(ElectionRequest request) { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .startElection(request); - } - - @Override - public long appendEntries(AppendEntriesRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .appendEntries(request); - } - - @Override - public long appendEntry(AppendEntryRequest request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .appendEntry(request); - } - - @Override - public void sendSnapshot(SendSnapshotRequest request) throws TException { - DataGroupEngine.getInstance().getDataSyncService(request.getHeader()).sendSnapshot(request); - } - - @Override - public TSStatus executeNonQueryPlan(ExecutNonQueryReq request) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(request.getHeader()) - .executeNonQueryPlan(request); - } - - @Override - public RequestCommitIndexResponse requestCommitIndex(RaftNode header) throws TException { - return DataGroupEngine.getInstance().getDataSyncService(header).requestCommitIndex(header); - } - - @Override - public ByteBuffer readFile(String filePath, long offset, int length) throws TException { - try { - return IOUtils.readFile(filePath, offset, length); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public boolean matchTerm(long index, long term, RaftNode header) { - return DataGroupEngine.getInstance().getDataSyncService(header).matchTerm(index, term, header); - } - - @Override - public ByteBuffer peekNextNotNullValue( - RaftNode header, long executorId, long startTime, long endTime) throws TException { - return DataGroupEngine.getInstance() - .getDataSyncService(header) - .peekNextNotNullValue(header, executorId, startTime, endTime); - } - - @Override - public void peekNextNotNullValue( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback resultHandler) - throws TException { - resultHandler.onComplete( - DataGroupEngine.getInstance() - .getDataSyncService(header) - .peekNextNotNullValue(header, executorId, startTime, endTime)); - } - - @Override - public void removeHardLink(String hardLinkPath) throws TException { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public void removeHardLink(String hardLinkPath, AsyncMethodCallback resultHandler) { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - resultHandler.onComplete(null); - } catch (IOException e) { - resultHandler.onError(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataSyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataSyncService.java deleted file mode 100644 index 8e771653b53e..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/DataSyncService.java +++ /dev/null @@ -1,458 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.ReaderNotFoundException; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.TSDataService; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.filter.StatisticsClassException; -import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException; - -import org.apache.thrift.TApplicationException; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class DataSyncService extends BaseSyncService implements TSDataService.Iface { - - private static final Logger logger = LoggerFactory.getLogger(DataSyncService.class); - private DataGroupMember dataGroupMember; - - public DataSyncService(DataGroupMember member) { - super(member); - this.dataGroupMember = member; - } - - @Override - public void sendSnapshot(SendSnapshotRequest request) throws TException { - try { - dataGroupMember.receiveSnapshot(request); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public PullSnapshotResp pullSnapshot(PullSnapshotRequest request) throws TException { - PullSnapshotResp pullSnapshotResp; - try { - pullSnapshotResp = dataGroupMember.getSnapshot(request); - } catch (IOException e) { - throw new TException(e); - } - if (pullSnapshotResp == null) { - return forwardPullSnapshot(request); - } else { - return pullSnapshotResp; - } - } - - private PullSnapshotResp forwardPullSnapshot(PullSnapshotRequest request) throws TException { - // if this node has been set readOnly, then it must have been synchronized with the leader - // otherwise forward the request to the leader - if (dataGroupMember.getLeader() != null - && !ClusterConstant.EMPTY_NODE.equals(dataGroupMember.getLeader())) { - logger.debug( - "{} forwarding a pull snapshot request to the leader {}", - name, - dataGroupMember.getLeader()); - SyncDataClient client = - (SyncDataClient) dataGroupMember.getSyncClient(dataGroupMember.getLeader()); - if (client == null) { - logger.error("{}, can not get the client for node={}", name, dataGroupMember.getLeader()); - throw new TException(new LeaderUnknownException(dataGroupMember.getAllNodes())); - } - PullSnapshotResp pullSnapshotResp = null; - try { - pullSnapshotResp = client.pullSnapshot(request); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - throw e; - } finally { - ClientUtils.putBackSyncClient(client); - } - return pullSnapshotResp; - } else { - throw new TException(new LeaderUnknownException(dataGroupMember.getAllNodes())); - } - } - - /** - * forward the request to the leader return the schema, whose measurement Id is the series full - * path. - * - * @param request the pull request - * @return response pull schema resp - * @throws TException remind of thrift - */ - @Override - public PullSchemaResp pullTimeSeriesSchema(PullSchemaRequest request) throws TException { - if (dataGroupMember.getCharacter() == NodeCharacter.LEADER) { - try { - return dataGroupMember.getLocalQueryExecutor().queryTimeSeriesSchema(request); - } catch (CheckConsistencyException | MetadataException e) { - // maybe the partition table of this node is not up-to-date, try again after updating - // partition table - try { - dataGroupMember.getMetaGroupMember().syncLeaderWithConsistencyCheck(false); - return dataGroupMember.getLocalQueryExecutor().queryTimeSeriesSchema(request); - } catch (CheckConsistencyException | MetadataException ex) { - throw new TException(ex); - } - } - } - - // forward the request to the leader - dataGroupMember.waitLeader(); - SyncDataClient client = - (SyncDataClient) dataGroupMember.getSyncClient(dataGroupMember.getLeader()); - if (client == null) { - throw new TException(new LeaderUnknownException(dataGroupMember.getAllNodes())); - } - PullSchemaResp pullSchemaResp; - try { - pullSchemaResp = client.pullTimeSeriesSchema(request); - } catch (TException te) { - client.getInputProtocol().getTransport().close(); - throw te; - } finally { - ClientUtils.putBackSyncClient(client); - } - return pullSchemaResp; - } - - /** - * forward the request to the leader return the schema, whose measurement Id is the series name. - * - * @param request the pull request - * @return response pull schema resp - * @throws TException remind of thrift - */ - @Override - public PullSchemaResp pullMeasurementSchema(PullSchemaRequest request) throws TException { - if (dataGroupMember.getCharacter() == NodeCharacter.LEADER) { - try { - return dataGroupMember.getLocalQueryExecutor().queryMeasurementSchema(request); - } catch (CheckConsistencyException | MetadataException e) { - // maybe the partition table of this node is not up-to-date, try again after updating - // partition table - try { - dataGroupMember.getMetaGroupMember().syncLeaderWithConsistencyCheck(false); - return dataGroupMember.getLocalQueryExecutor().queryMeasurementSchema(request); - } catch (CheckConsistencyException | MetadataException ex) { - throw new TException(ex); - } - } - } - - // forward the request to the leader - dataGroupMember.waitLeader(); - SyncDataClient client = - (SyncDataClient) dataGroupMember.getSyncClient(dataGroupMember.getLeader()); - if (client == null) { - throw new TException(new LeaderUnknownException(dataGroupMember.getAllNodes())); - } - PullSchemaResp pullSchemaResp; - try { - pullSchemaResp = client.pullMeasurementSchema(request); - } catch (TException te) { - client.getInputProtocol().getTransport().close(); - throw te; - } finally { - ClientUtils.putBackSyncClient(client); - } - return pullSchemaResp; - } - - @Override - public long querySingleSeries(SingleSeriesQueryRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().querySingleSeries(request); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public long queryMultSeries(MultSeriesQueryRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().queryMultSeries(request); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public long querySingleSeriesByTimestamp(SingleSeriesQueryRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().querySingleSeriesByTimestamp(request); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public void endQuery(RaftNode header, Node requester, long queryId) throws TException { - try { - dataGroupMember.getQueryManager().endQuery(requester, queryId); - } catch (StorageEngineException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer fetchSingleSeries(RaftNode header, long readerId) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().fetchSingleSeries(readerId); - } catch (ReaderNotFoundException | IOException e) { - throw new TException(e); - } - } - - @Override - public Map fetchMultSeries(RaftNode header, long readerId, List paths) - throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().fetchMultSeries(readerId, paths); - } catch (ReaderNotFoundException | IOException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer fetchSingleSeriesByTimestamps( - RaftNode header, long readerId, List timestamps) throws TException { - try { - return dataGroupMember - .getLocalQueryExecutor() - .fetchSingleSeriesByTimestamps( - readerId, timestamps.stream().mapToLong(k -> k).toArray(), timestamps.size()); - } catch (ReaderNotFoundException | IOException e) { - throw new TException(e); - } - } - - @Override - public GetAllPathsResult getAllPaths(RaftNode header, List paths, boolean withAlias) - throws TException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - return ((CSchemaProcessor) IoTDB.schemaProcessor).getAllPaths(paths, withAlias); - } catch (MetadataException | CheckConsistencyException e) { - throw new TException(e); - } - } - - @Override - public Set getAllDevices(RaftNode header, List path, boolean isPrefixMatch) - throws TException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - return ((CSchemaProcessor) IoTDB.schemaProcessor).getAllDevices(path, isPrefixMatch); - } catch (MetadataException | CheckConsistencyException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer getDevices(RaftNode header, ByteBuffer planBinary) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getDevices(planBinary); - } catch (CheckConsistencyException | IOException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public List getNodeList(RaftNode header, String path, int nodeLevel) throws TException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - return ((CSchemaProcessor) IoTDB.schemaProcessor).getNodeList(path, nodeLevel); - } catch (CheckConsistencyException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public Set getChildNodeInNextLevel(RaftNode header, String path) throws TException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - return ((CSchemaProcessor) IoTDB.schemaProcessor).getChildNodeInNextLevel(path); - } catch (CheckConsistencyException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public Set getChildNodePathInNextLevel(RaftNode header, String path) throws TException { - try { - dataGroupMember.syncLeaderWithConsistencyCheck(false); - return ((CSchemaProcessor) IoTDB.schemaProcessor).getChildNodePathInNextLevel(path); - } catch (CheckConsistencyException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer getAllMeasurementSchema(MeasurementSchemaRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getAllMeasurementSchema(request); - } catch (CheckConsistencyException | IOException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public List getAggrResult(GetAggrResultRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getAggrResult(request); - } catch (StorageEngineException - | QueryProcessException - | IOException - | StatisticsClassException - | UnSupportedDataTypeException e) { - throw new TApplicationException(e.getMessage()); - } - } - - @Override - public List getUnregisteredTimeseries(RaftNode header, List timeseriesList) - throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getUnregisteredTimeseries(timeseriesList); - } catch (CheckConsistencyException e) { - throw new TException(e); - } - } - - @Override - public long getGroupByExecutor(GroupByRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getGroupByExecutor(request); - } catch (QueryProcessException | StorageEngineException e) { - throw new TException(e); - } - } - - @Override - public List getGroupByResult( - RaftNode header, long executorId, long startTime, long endTime) throws TException { - try { - return dataGroupMember - .getLocalQueryExecutor() - .getGroupByResult(executorId, startTime, endTime); - } catch (ReaderNotFoundException | IOException | QueryProcessException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer previousFill(PreviousFillRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().previousFill(request); - } catch (QueryProcessException - | StorageEngineException - | IOException - | IllegalPathException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer last(LastQueryRequest request) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().last(request); - } catch (CheckConsistencyException - | QueryProcessException - | IOException - | StorageEngineException - | MetadataException e) { - throw new TException(e); - } - } - - @Override - public int getPathCount(RaftNode header, List pathsToQuery, int level) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getPathCount(pathsToQuery, level); - } catch (CheckConsistencyException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public int getDeviceCount(RaftNode header, List pathsToQuery) throws TException { - try { - return dataGroupMember.getLocalQueryExecutor().getDeviceCount(pathsToQuery); - } catch (CheckConsistencyException | MetadataException e) { - throw new TException(e); - } - } - - @Override - public boolean onSnapshotApplied(RaftNode header, List slots) { - return dataGroupMember.onSnapshotInstalled(slots); - } - - @Override - public ByteBuffer peekNextNotNullValue( - RaftNode header, long executorId, long startTime, long endTime) throws TException { - try { - return dataGroupMember - .getLocalQueryExecutor() - .peekNextNotNullValue(executorId, startTime, endTime); - } catch (ReaderNotFoundException | IOException e) { - throw new TException(e); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaAsyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaAsyncService.java deleted file mode 100644 index 659f953d910a..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaAsyncService.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.AddSelfException; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.PartitionTableUnavailableException; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClusterUtils; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; - -public class MetaAsyncService extends BaseAsyncService implements TSMetaService.AsyncIface { - private static final String ERROR_MSG_META_NOT_READY = "The metadata not is not ready."; - private static final Logger logger = LoggerFactory.getLogger(MetaAsyncService.class); - - private MetaGroupMember metaGroupMember; - - public MetaAsyncService(MetaGroupMember metaGroupMember) { - super(metaGroupMember); - this.metaGroupMember = metaGroupMember; - } - - @Override - public void appendEntry(AppendEntryRequest request, AsyncMethodCallback resultHandler) { - // if the metaGroupMember is not ready (e.g., as a follower the PartitionTable is loaded - // locally, but the partition table is not verified), we do not handle the RPC requests. - if (!metaGroupMember.isReady() && metaGroupMember.getPartitionTable() == null) { - // the only special case is that the leader will send an empty entry for letting followers - // submit previous log - // at this time, the partitionTable has been loaded but is not verified. So the PRC is not - // ready. - // this node lacks information of the cluster and refuse to work - logger.debug("This node is blind to the cluster and cannot accept logs"); - resultHandler.onComplete(Response.RESPONSE_PARTITION_TABLE_UNAVAILABLE); - return; - } - - super.appendEntry(request, resultHandler); - } - - @Override - public void addNode( - Node node, StartUpStatus startUpStatus, AsyncMethodCallback resultHandler) { - if (!metaGroupMember.isReady()) { - logger.debug(ERROR_MSG_META_NOT_READY); - resultHandler.onError(new TException(ERROR_MSG_META_NOT_READY)); - return; - } - AddNodeResponse addNodeResponse = null; - try { - addNodeResponse = metaGroupMember.addNode(node, startUpStatus); - } catch (AddSelfException | LogExecutionException | CheckConsistencyException e) { - resultHandler.onError(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - resultHandler.onError(e); - } - if (addNodeResponse != null) { - resultHandler.onComplete(addNodeResponse); - return; - } - - if (member.getCharacter() == NodeCharacter.FOLLOWER - && member.getLeader() != null - && !ClusterConstant.EMPTY_NODE.equals(member.getLeader())) { - logger.info("Forward the join request of {} to leader {}", node, member.getLeader()); - if (forwardAddNode(node, startUpStatus, resultHandler)) { - return; - } - } - resultHandler.onError(new LeaderUnknownException(member.getAllNodes())); - } - - @Override - public void sendSnapshot(SendSnapshotRequest request, AsyncMethodCallback resultHandler) { - try { - metaGroupMember.receiveSnapshot(request); - } catch (Exception e) { - resultHandler.onError(e); - return; - } - resultHandler.onComplete(null); - } - - @Override - public void checkStatus( - StartUpStatus startUpStatus, AsyncMethodCallback resultHandler) { - CheckStatusResponse response = - ClusterUtils.checkStatus(startUpStatus, metaGroupMember.getNewStartUpStatus()); - resultHandler.onComplete(response); - } - - /** - * Forward the join cluster request to the leader. - * - * @return true if the forwarding succeeds, false otherwise. - */ - private boolean forwardAddNode( - Node node, StartUpStatus startUpStatus, AsyncMethodCallback resultHandler) { - TSMetaService.AsyncClient client = - (TSMetaService.AsyncClient) metaGroupMember.getAsyncClient(metaGroupMember.getLeader()); - if (client != null) { - try { - client.addNode(node, startUpStatus, resultHandler); - return true; - } catch (TException e) { - logger.warn("Cannot connect to node {}", node, e); - } - } - return false; - } - - /** - * Return the status of the node to the requester that will help the requester figure out the load - * of the this node and how well it may perform for a specific query. - * - * @param resultHandler - */ - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - resultHandler.onComplete(new TNodeStatus()); - } - - @Override - public void checkAlive(AsyncMethodCallback resultHandler) { - resultHandler.onComplete(metaGroupMember.getThisNode()); - } - - @Override - public void collectMigrationStatus(AsyncMethodCallback resultHandler) { - resultHandler.onComplete( - ClusterUtils.serializeMigrationStatus(metaGroupMember.collectMigrationStatus())); - } - - @Override - public void removeNode(Node node, AsyncMethodCallback resultHandler) { - if (!metaGroupMember.isReady()) { - logger.debug(ERROR_MSG_META_NOT_READY); - resultHandler.onError(new TException(ERROR_MSG_META_NOT_READY)); - return; - } - long result; - try { - result = metaGroupMember.removeNode(node); - } catch (PartitionTableUnavailableException - | LogExecutionException - | CheckConsistencyException e) { - logger.error("Can not remove node {}", node, e); - resultHandler.onError(e); - return; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Can not remove node {}", node, e); - resultHandler.onError(e); - return; - } - - if (result != Response.RESPONSE_NULL) { - resultHandler.onComplete(result); - return; - } - - if (metaGroupMember.getCharacter() == NodeCharacter.FOLLOWER - && metaGroupMember.getLeader() != null) { - logger.info( - "Forward the node removal request of {} to leader {}", node, metaGroupMember.getLeader()); - if (forwardRemoveNode(node, resultHandler)) { - return; - } - } - resultHandler.onError(new LeaderUnknownException(metaGroupMember.getAllNodes())); - } - - /** - * Forward a node removal request to the leader. - * - * @param node the node to be removed - * @param resultHandler - * @return true if the request is successfully forwarded, false otherwise - */ - private boolean forwardRemoveNode(Node node, AsyncMethodCallback resultHandler) { - TSMetaService.AsyncClient client = - (TSMetaService.AsyncClient) metaGroupMember.getAsyncClient(metaGroupMember.getLeader()); - if (client != null) { - try { - client.removeNode(node, resultHandler); - return true; - } catch (TException e) { - logger.warn("Cannot connect to node {}", node, e); - } - } - return false; - } - - /** - * Process a request that the local node is removed from the cluster. As a node is removed from - * the cluster, it no longer receive heartbeats or logs and cannot know it has been removed, so we - * must tell it directly. - * - * @param resultHandler - */ - @Override - public void exile(ByteBuffer removeNodeLogBuffer, AsyncMethodCallback resultHandler) { - logger.info("{}: start to exile.", name); - removeNodeLogBuffer.get(); - RemoveNodeLog removeNodeLog = new RemoveNodeLog(); - removeNodeLog.deserialize(removeNodeLogBuffer); - metaGroupMember.getPartitionTable().deserialize(removeNodeLog.getPartitionTable()); - metaGroupMember.applyRemoveNode(removeNodeLog); - resultHandler.onComplete(null); - } - - @Override - public void handshake(Node sender, AsyncMethodCallback resultHandler) { - metaGroupMember.handleHandshake(sender); - resultHandler.onComplete(null); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaSyncService.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaSyncService.java deleted file mode 100644 index f52b7c88303d..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/MetaSyncService.java +++ /dev/null @@ -1,253 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.service; - -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.AddSelfException; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.PartitionTableUnavailableException; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.rpc.thrift.TSMetaService; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.cluster.utils.ClusterUtils; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.ByteBuffer; - -public class MetaSyncService extends BaseSyncService implements TSMetaService.Iface { - - private static final Logger logger = LoggerFactory.getLogger(MetaSyncService.class); - - private MetaGroupMember metaGroupMember; - - public MetaSyncService(MetaGroupMember metaGroupMember) { - super(metaGroupMember); - this.metaGroupMember = metaGroupMember; - } - - // behavior of followers - @Override - public long appendEntry(AppendEntryRequest request) throws TException { - // if the metaGroupMember is not ready (e.g., as a follower the PartitionTable is loaded - // locally, but the partition table is not verified), we do not handle the RPC requests. - if (!metaGroupMember.isReady()) { - // the only special case is that the leader will send an empty entry for letting followers - // submit previous log - // at this time, the partitionTable has been loaded but is not verified. So the PRC is not - // ready. - if (metaGroupMember.getPartitionTable() == null) { - // this node lacks information of the cluster and refuse to work - logger.debug("This node is blind to the cluster and cannot accept logs, {}", request); - return Response.RESPONSE_PARTITION_TABLE_UNAVAILABLE; - } else { - // do nothing because we consider if the partitionTable is loaded, then it is corrected. - } - } - - return super.appendEntry(request); - } - - private static final String ERROR_MSG_META_NOT_READY = "The metadata not is not ready."; - - @Override - public AddNodeResponse addNode(Node node, StartUpStatus startUpStatus) throws TException { - AddNodeResponse addNodeResponse; - if (!metaGroupMember.isReady()) { - logger.debug(ERROR_MSG_META_NOT_READY); - throw new TException(ERROR_MSG_META_NOT_READY); - } - - try { - addNodeResponse = metaGroupMember.addNode(node, startUpStatus); - } catch (AddSelfException | LogExecutionException | CheckConsistencyException e) { - throw new TException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new TException(e); - } - if (addNodeResponse != null) { - return addNodeResponse; - } - - if (member.getCharacter() == NodeCharacter.FOLLOWER - && member.getLeader() != null - && !ClusterConstant.EMPTY_NODE.equals(member.getLeader())) { - logger.info("Forward the join request of {} to leader {}", node, member.getLeader()); - addNodeResponse = forwardAddNode(node, startUpStatus); - if (addNodeResponse != null) { - return addNodeResponse; - } - } - throw new TException(new LeaderUnknownException(member.getAllNodes())); - } - - @Override - public void sendSnapshot(SendSnapshotRequest request) throws TException { - // even the meta engine is not ready, we still need to catch up. - try { - metaGroupMember.receiveSnapshot(request); - } catch (Exception e) { - throw new TException(e); - } - } - - @Override - public CheckStatusResponse checkStatus(StartUpStatus startUpStatus) { - // this method is called before the meta engine is ready. - return ClusterUtils.checkStatus(startUpStatus, metaGroupMember.getStartUpStatus()); - } - - /** - * Forward the join cluster request to the leader. - * - * @return true if the forwarding succeeds, false otherwise. - */ - private AddNodeResponse forwardAddNode(Node node, StartUpStatus startUpStatus) { - SyncMetaClient client = - (SyncMetaClient) metaGroupMember.getSyncClient(metaGroupMember.getLeader()); - if (client != null) { - try { - return client.addNode(node, startUpStatus); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - logger.warn("Cannot connect to node {}", node, e); - } finally { - ClientUtils.putBackSyncClient(client); - } - } - return null; - } - - /** - * Return the status of the node to the requester that will help the requester figure out the load - * of the this node and how well it may perform for a specific query. - * - * @return - */ - @Override - public TNodeStatus queryNodeStatus() { - return new TNodeStatus(); - } - - @Override - public Node checkAlive() { - return metaGroupMember.getThisNode(); - } - - @Override - public ByteBuffer collectMigrationStatus() { - return ClusterUtils.serializeMigrationStatus(metaGroupMember.collectMigrationStatus()); - } - - @Override - public long removeNode(Node node) throws TException { - if (!metaGroupMember.isReady()) { - logger.debug(ERROR_MSG_META_NOT_READY); - throw new TException(ERROR_MSG_META_NOT_READY); - } - - long result; - try { - result = metaGroupMember.removeNode(node); - } catch (PartitionTableUnavailableException - | LogExecutionException - | CheckConsistencyException e) { - logger.error("Can not remove node {}", node, e); - throw new TException(e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Can not remove node {}", node, e); - throw new TException(e); - } - - if (result != Response.RESPONSE_NULL) { - return result; - } - - if (metaGroupMember.getCharacter() == NodeCharacter.FOLLOWER - && metaGroupMember.getLeader() != null) { - logger.info( - "Forward the node removal request of {} to leader {}", node, metaGroupMember.getLeader()); - Long rst = forwardRemoveNode(node); - if (rst != null) { - return rst; - } - } - throw new TException(new LeaderUnknownException(metaGroupMember.getAllNodes())); - } - - /** - * Forward a node removal request to the leader. - * - * @param node the node to be removed - * @return true if the request is successfully forwarded, false otherwise - */ - private Long forwardRemoveNode(Node node) { - SyncMetaClient client = - (SyncMetaClient) metaGroupMember.getSyncClient(metaGroupMember.getLeader()); - if (client != null) { - try { - return client.removeNode(node); - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - logger.warn("Cannot connect to node {}", node, e); - } finally { - ClientUtils.putBackSyncClient(client); - } - } - return null; - } - - /** - * Process a request that the local node is removed from the cluster. As a node is removed from - * the cluster, it no longer receive heartbeats or logs and cannot know it has been removed, so we - * must tell it directly. - */ - @Override - public void exile(ByteBuffer removeNodeLogBuffer) { - logger.info("{}: start to exile.", name); - removeNodeLogBuffer.get(); - RemoveNodeLog removeNodeLog = new RemoveNodeLog(); - removeNodeLog.deserialize(removeNodeLogBuffer); - metaGroupMember.getPartitionTable().deserialize(removeNodeLog.getPartitionTable()); - metaGroupMember.applyRemoveNode(removeNodeLog); - } - - @Override - public void handshake(Node sender) { - metaGroupMember.handleHandshake(sender); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/package-info.java b/cluster/src/main/java/org/apache/iotdb/cluster/server/service/package-info.java deleted file mode 100644 index dc1f3e824b2f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/server/service/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * This package contains classes that implement Thrift interfaces by accessing corresponding - * RaftMembers. - */ -package org.apache.iotdb.cluster.server.service; diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClientUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClientUtils.java deleted file mode 100644 index a39e8799eb59..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClientUtils.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -public class ClientUtils { - - private ClientUtils() { - // util class - } - - public static int getPort(Node node, ClientCategory category) { - int port = -1; - switch (category) { - case DATA: - port = node.getDataPort(); - break; - case DATA_HEARTBEAT: - port = node.getDataPort() + ClusterUtils.DATA_HEARTBEAT_PORT_OFFSET; - break; - case META: - port = node.getMetaPort(); - break; - case META_HEARTBEAT: - port = node.getMetaPort() + ClusterUtils.META_HEARTBEAT_PORT_OFFSET; - break; - case DATA_ASYNC_APPEND_CLIENT: - // special data client type - port = node.getDataPort(); - break; - default: - break; - } - return port; - } - - public static void putBackSyncClient(RaftService.Client client) { - if (client instanceof SyncMetaClient) { - ((SyncMetaClient) client).returnSelf(); - } else if (client instanceof SyncDataClient) { - ((SyncDataClient) client).returnSelf(); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterConsistent.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterConsistent.java deleted file mode 100644 index ef5d188a18d0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterConsistent.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** Configurations using this annotation should be the same across all nodes in a cluster. */ -@Target({ElementType.FIELD}) -@Retention(RetentionPolicy.SOURCE) -public @interface ClusterConsistent {} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterNode.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterNode.java deleted file mode 100644 index be87eb1d0466..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterNode.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import java.util.Objects; - -/** - * ClusterNode overrides hashcode() and equals() in Node to avoid duplicates in hash data structures - * caused by identifier change. - */ -public class ClusterNode extends Node { - - public ClusterNode() {} - - public ClusterNode( - String internalIp, - int metaPort, - int nodeIdentifier, - int dataPort, - int clientPort, - String clientIp) { - super(internalIp, metaPort, nodeIdentifier, dataPort, clientPort, clientIp); - } - - public ClusterNode(Node other) { - super(other); - } - - @Override - public boolean equals(Object that) { - if (!(that instanceof ClusterNode)) { - return false; - } - return equals(((ClusterNode) that)); - } - - public boolean equals(ClusterNode that) { - return Objects.equals(this.internalIp, that.internalIp) - && this.dataPort == that.dataPort - && this.metaPort == that.metaPort - && this.clientPort == that.clientPort - && this.clientIp.equals(that.clientIp); - } - - @Override - public int hashCode() { - return Objects.hash(internalIp, metaPort, dataPort, clientPort, clientIp); - } - - @Override - public String toString() { - return "ClusterNode{" - + " internalIp='" - + internalIp - + "', metaPort=" - + metaPort - + ", nodeIdentifier=" - + nodeIdentifier - + ", dataPort=" - + dataPort - + ", clientPort=" - + clientPort - + ", clientIp='" - + clientIp - + "'}"; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterQueryUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterQueryUtils.java deleted file mode 100644 index a16a16cab463..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterQueryUtils.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Path; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; - -public class ClusterQueryUtils { - - private static final Logger logger = LoggerFactory.getLogger(ClusterQueryUtils.class); - - private ClusterQueryUtils() { - // util class - } - - public static void checkPathExistence(PartialPath path) throws QueryProcessException { - if (!IoTDB.schemaProcessor.isPathExist(path)) { - try { - MetaPuller.getInstance().pullTimeSeriesSchemas(Collections.singletonList(path), null); - } catch (MetadataException e) { - throw new QueryProcessException(e); - } - } - } - - /** - * Generate path string list for RPC request. - * - *

If vector path, return its vectorId with all subSensors. Else just return path string. TODO - * aligned path - */ - public static String getPathStrListForRequest(Path path) { - // TODO aligned Path - return path.getFullPath(); - } - - /** - * Deserialize an assembled Path from path string list that's from RPC request. - * - *

This method is corresponding to getPathStringListForRequest(). - */ - public static MeasurementPath getAssembledPathFromRequest(String pathString, byte dataType) { - // TODO aligned path - try { - MeasurementPath matchedPath = new MeasurementPath(pathString); - matchedPath.setMeasurementSchema( - new MeasurementSchema(matchedPath.getMeasurement(), TSDataType.deserialize(dataType))); - return matchedPath; - } catch (IllegalPathException e) { - logger.error("Failed to create partial path, fullPath is {}.", pathString, e); - return null; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterUtils.java deleted file mode 100644 index b7d989e46fc0..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/ClusterUtils.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.ConfigInconsistentException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.service.IoTDB; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; - -public class ClusterUtils { - - private static final Logger logger = LoggerFactory.getLogger(ClusterUtils.class); - - public static final int WAIT_START_UP_CHECK_TIME_SEC = 5; - - public static final long START_UP_TIME_THRESHOLD_MS = 5 * 60 * 1000L; - - public static final long START_UP_CHECK_TIME_INTERVAL_MS = 3 * 1000L; - - /** - * the data group member's heartbeat offset relative to the {@link - * ClusterConfig#getInternalDataPort()}, which means the dataHeartbeatPort = getInternalDataPort() - * + DATA_HEARTBEAT_OFFSET. - */ - public static final int DATA_HEARTBEAT_PORT_OFFSET = 1; - - /** - * the meta group member's heartbeat offset relative to the {@link - * ClusterConfig#getInternalMetaPort()}, which means the metaHeartbeatPort = getInternalMetaPort() - * + META_HEARTBEAT_OFFSET. - */ - public static final int META_HEARTBEAT_PORT_OFFSET = 1; - - public static final String UNKNOWN_CLIENT_IP = "UNKNOWN_IP"; - - private ClusterUtils() { - // util class - } - - public static CheckStatusResponse checkStatus( - StartUpStatus remoteStartUpStatus, StartUpStatus localStartUpStatus) { - boolean partitionIntervalEquals = true; - boolean hashSaltEquals = true; - boolean replicationNumEquals = true; - boolean seedNodeListEquals = true; - boolean clusterNameEqual = true; - boolean multiRaftFactorEqual = true; - - if (localStartUpStatus.getPartitionInterval() != remoteStartUpStatus.getPartitionInterval()) { - partitionIntervalEquals = false; - logger.error( - "Remote partition interval conflicts with local. local: {}, remote: {}", - localStartUpStatus.getPartitionInterval(), - remoteStartUpStatus.getPartitionInterval()); - } - if (localStartUpStatus.getMultiRaftFactor() != remoteStartUpStatus.getMultiRaftFactor()) { - multiRaftFactorEqual = false; - logger.error( - "Remote multi-raft factor conflicts with local. local: {}, remote: {}", - localStartUpStatus.getMultiRaftFactor(), - remoteStartUpStatus.getMultiRaftFactor()); - } - if (localStartUpStatus.getHashSalt() != remoteStartUpStatus.getHashSalt()) { - hashSaltEquals = false; - logger.error( - "Remote hash salt conflicts with local. local: {}, remote: {}", - localStartUpStatus.getHashSalt(), - remoteStartUpStatus.getHashSalt()); - } - if (localStartUpStatus.getReplicationNumber() != remoteStartUpStatus.getReplicationNumber()) { - replicationNumEquals = false; - logger.error( - "Remote replication number conflicts with local. local: {}, remote: {}", - localStartUpStatus.getReplicationNumber(), - remoteStartUpStatus.getReplicationNumber()); - } - if (!Objects.equals( - localStartUpStatus.getClusterName(), remoteStartUpStatus.getClusterName())) { - clusterNameEqual = false; - logger.error( - "Remote cluster name conflicts with local. local: {}, remote: {}", - localStartUpStatus.getClusterName(), - remoteStartUpStatus.getClusterName()); - } - if (!ClusterUtils.checkSeedNodes( - false, localStartUpStatus.getSeedNodeList(), remoteStartUpStatus.getSeedNodeList())) { - seedNodeListEquals = false; - if (logger.isErrorEnabled()) { - logger.error( - "Remote seed node list conflicts with local. local: {}, remote: {}", - localStartUpStatus.getSeedNodeList(), - remoteStartUpStatus.getSeedNodeList()); - } - } - - return new CheckStatusResponse( - partitionIntervalEquals, - hashSaltEquals, - replicationNumEquals, - seedNodeListEquals, - clusterNameEqual, - multiRaftFactorEqual); - } - - public static boolean checkSeedNodes( - boolean isClusterEstablished, List localSeedNodes, List remoteSeedNodes) { - return isClusterEstablished - ? seedNodesContains(localSeedNodes, remoteSeedNodes) - : seedNodesEquals(localSeedNodes, remoteSeedNodes); - } - - private static boolean seedNodesEquals(List thisNodeList, List thatNodeList) { - Node[] thisNodeArray = thisNodeList.toArray(new Node[0]); - Node[] thatNodeArray = thatNodeList.toArray(new Node[0]); - Arrays.sort(thisNodeArray, ClusterUtils::compareSeedNode); - Arrays.sort(thatNodeArray, ClusterUtils::compareSeedNode); - if (thisNodeArray.length != thatNodeArray.length) { - return false; - } else { - for (int i = 0; i < thisNodeArray.length; i++) { - if (compareSeedNode(thisNodeArray[i], thatNodeArray[i]) != 0) { - return false; - } - } - return true; - } - } - - private static int compareSeedNode(Node thisSeedNode, Node thatSeedNode) { - int ipCompare = thisSeedNode.getInternalIp().compareTo(thatSeedNode.getInternalIp()); - if (ipCompare != 0) { - return ipCompare; - } else { - return thisSeedNode.getMetaPort() - thatSeedNode.getMetaPort(); - } - } - - private static boolean seedNodesContains(List seedNodeList, List subSeedNodeList) { - // Because identifier is not compared here, List.contains() is not suitable - if (subSeedNodeList == null) { - return false; - } - seedNodeList.sort(ClusterUtils::compareSeedNode); - subSeedNodeList.sort(ClusterUtils::compareSeedNode); - int i = 0; - int j = 0; - while (i < seedNodeList.size() && j < subSeedNodeList.size()) { - int compareResult = compareSeedNode(seedNodeList.get(i), subSeedNodeList.get(j)); - if (compareResult > 0) { - if (logger.isErrorEnabled()) { - logger.error("Node {} not found in cluster", subSeedNodeList.get(j)); - } - return false; - } else if (compareResult < 0) { - i++; - } else { - j++; - } - } - return j == subSeedNodeList.size(); - } - - public static void examineCheckStatusResponse( - CheckStatusResponse response, - AtomicInteger consistentNum, - AtomicInteger inconsistentNum, - Node seedNode) { - boolean partitionIntervalEquals = response.partitionalIntervalEquals; - boolean hashSaltEquals = response.hashSaltEquals; - boolean replicationNumEquals = response.replicationNumEquals; - boolean seedNodeListEquals = response.seedNodeEquals; - boolean clusterNameEqual = response.clusterNameEquals; - if (!partitionIntervalEquals) { - logger.error("Local partition interval conflicts with seed node[{}].", seedNode); - } - if (!hashSaltEquals) { - logger.error("Local hash salt conflicts with seed node[{}]", seedNode); - } - if (!replicationNumEquals) { - logger.error("Local replication number conflicts with seed node[{}]", seedNode); - } - if (!seedNodeListEquals) { - logger.error("Local seed node list conflicts with seed node[{}]", seedNode); - } - if (!clusterNameEqual) { - logger.error("Local cluster name conflicts with seed node[{}]", seedNode); - } - if (partitionIntervalEquals - && hashSaltEquals - && replicationNumEquals - && seedNodeListEquals - && clusterNameEqual) { - consistentNum.incrementAndGet(); - } else { - inconsistentNum.incrementAndGet(); - } - } - - public static boolean analyseStartUpCheckResult( - int consistentNum, int inconsistentNum, int totalSeedNum) throws ConfigInconsistentException { - if (consistentNum == totalSeedNum) { - // break the loop and establish the cluster - return true; - } else if (inconsistentNum > 0) { - // find config InConsistence, stop building cluster - throw new ConfigInconsistentException(); - } else { - // The status of some nodes was not obtained, possibly because those node did not start - // successfully, - // this node can't connect to those nodes, try in next turn - return false; - } - } - - /** - * Convert a string representation of a Node to an object. - * - * @param str A string that is generated by Node.toString() - * @return a Node object - */ - public static Node stringToNode(String str) { - - int ipFirstPos = str.indexOf("internalIp:") + "internalIp:".length(); - int ipLastPos = str.indexOf(',', ipFirstPos); - int metaPortFirstPos = str.indexOf("metaPort:", ipLastPos) + "metaPort:".length(); - int metaPortLastPos = str.indexOf(',', metaPortFirstPos); - int idFirstPos = str.indexOf("nodeIdentifier:", metaPortLastPos) + "nodeIdentifier:".length(); - int idLastPos = str.indexOf(',', idFirstPos); - int dataPortFirstPos = str.indexOf("dataPort:", idLastPos) + "dataPort:".length(); - int dataPortLastPos = str.indexOf(',', dataPortFirstPos); - int clientPortFirstPos = str.indexOf("clientPort:", dataPortLastPos) + "clientPort:".length(); - int clientPortLastPos = str.indexOf(',', clientPortFirstPos); - int clientIpFirstPos = str.indexOf("clientIp:", clientPortLastPos) + "clientIp:".length(); - int clientIpLastPos = str.indexOf(')', clientIpFirstPos); - - String ip = str.substring(ipFirstPos, ipLastPos); - int metaPort = Integer.parseInt(str.substring(metaPortFirstPos, metaPortLastPos)); - int id = Integer.parseInt(str.substring(idFirstPos, idLastPos)); - int dataPort = Integer.parseInt(str.substring(dataPortFirstPos, dataPortLastPos)); - int clientPort = Integer.parseInt(str.substring(clientPortFirstPos, clientPortLastPos)); - String clientIp = str.substring(clientIpFirstPos, clientIpLastPos); - return new Node(ip, metaPort, id, dataPort, clientPort, clientIp); - } - - public static Node parseNode(String nodeUrl) { - Node result = new Node(); - String[] split = nodeUrl.split(":"); - if (split.length != 2) { - logger.warn("Bad seed url: {}", nodeUrl); - return null; - } - String ip = split[0]; - try { - int metaPort = Integer.parseInt(split[1]); - result.setInternalIp(ip).setMetaPort(metaPort).setClientIp(UNKNOWN_CLIENT_IP); - } catch (NumberFormatException e) { - logger.warn("Bad seed url: {}", nodeUrl); - } - return result; - } - - public static PartitionGroup partitionByPathTimeWithSync( - PartialPath prefixPath, MetaGroupMember metaGroupMember) throws MetadataException { - PartitionGroup partitionGroup; - try { - partitionGroup = metaGroupMember.getPartitionTable().partitionByPathTime(prefixPath, 0); - } catch (StorageGroupNotSetException e) { - // the storage group is not found locally, but may be found in the leader, retry after - // synchronizing with the leader - try { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - } catch (CheckConsistencyException checkConsistencyException) { - throw new MetadataException(checkConsistencyException.getMessage()); - } - partitionGroup = metaGroupMember.getPartitionTable().partitionByPathTime(prefixPath, 0); - } - return partitionGroup; - } - - public static int getSlotByPathTimeWithSync( - PartialPath prefixPath, MetaGroupMember metaGroupMember) throws MetadataException { - int slot; - try { - PartialPath storageGroup = IoTDB.schemaProcessor.getBelongedStorageGroup(prefixPath); - slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum(storageGroup.getFullPath(), 0, ClusterConstant.SLOT_NUM); - } catch (StorageGroupNotSetException e) { - // the storage group is not found locally, but may be found in the leader, retry after - // synchronizing with the leader - try { - metaGroupMember.syncLeaderWithConsistencyCheck(true); - } catch (CheckConsistencyException checkConsistencyException) { - throw new MetadataException(checkConsistencyException.getMessage()); - } - PartialPath storageGroup = IoTDB.schemaProcessor.getBelongedStorageGroup(prefixPath); - slot = - SlotPartitionTable.getSlotStrategy() - .calculateSlotByPartitionNum(storageGroup.getFullPath(), 0, ClusterConstant.SLOT_NUM); - } - return slot; - } - - public static ByteBuffer serializeMigrationStatus(Map migrationStatus) { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - try (DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - dataOutputStream.writeInt(migrationStatus.size()); - for (Entry entry : migrationStatus.entrySet()) { - entry.getKey().serialize(dataOutputStream); - dataOutputStream.writeInt(entry.getValue()); - } - } catch (IOException e) { - // ignored - } - return ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - } - - public static Map deserializeMigrationStatus(ByteBuffer buffer) { - Map migrationStatus = new HashMap<>(); - int size = buffer.getInt(); - while (size-- > 0) { - PartitionGroup partitionGroup = new PartitionGroup(); - partitionGroup.deserialize(buffer); - migrationStatus.put(partitionGroup, buffer.getInt()); - } - return migrationStatus; - } - - public static boolean nodeEqual(Node node1, Node node2) { - ClusterNode clusterNode1 = new ClusterNode(node1); - ClusterNode clusterNode2 = new ClusterNode(node2); - return clusterNode1.equals(clusterNode2); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/IOUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/IOUtils.java deleted file mode 100644 index c906924a17ff..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/IOUtils.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.nio.file.Files; - -@SuppressWarnings("java:S1135") -public class IOUtils { - - private static final Logger logger = LoggerFactory.getLogger(IOUtils.class); - - private IOUtils() { - // util class - } - - /** - * An interface that is used for a node to pull chunks of files like TsFiles. The file should be a - * temporary hard link, and once the file is totally read, it will be removed. - */ - public static ByteBuffer readFile(String filePath, long offset, int length) throws IOException { - // TODO-Cluster: hold if the file is an unclosed TsFile - File file = new File(filePath); - if (!file.exists()) { - return ByteBuffer.allocate(0); - } - - ByteBuffer result; - boolean fileExhausted; - try (BufferedInputStream bufferedInputStream = - new BufferedInputStream(new FileInputStream(file))) { - skipExactly(bufferedInputStream, offset); - byte[] bytes = new byte[length]; - result = ByteBuffer.wrap(bytes); - int len = bufferedInputStream.read(bytes); - result.limit(Math.max(len, 0)); - fileExhausted = bufferedInputStream.available() <= 0; - } - - if (fileExhausted) { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.warn("Cannot delete an exhausted file {}", filePath, e); - } - } - return result; - } - - private static void skipExactly(InputStream stream, long byteToSkip) throws IOException { - while (byteToSkip > 0) { - byteToSkip -= stream.skip(byteToSkip); - } - } - - public static Throwable getRootCause(Throwable e) { - Throwable curr = e; - while (curr.getCause() != null) { - curr = curr.getCause(); - } - return curr; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/NodeSerializeUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/NodeSerializeUtils.java deleted file mode 100644 index bf86b9f1a759..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/NodeSerializeUtils.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; - -public class NodeSerializeUtils { - - private NodeSerializeUtils() {} - - public static void serialize(Node node, DataOutputStream dataOutputStream) { - try { - byte[] internalIpBytes = node.internalIp.getBytes(); - dataOutputStream.writeInt(internalIpBytes.length); - dataOutputStream.write(internalIpBytes); - dataOutputStream.writeInt(node.metaPort); - dataOutputStream.writeInt(node.nodeIdentifier); - dataOutputStream.writeInt(node.dataPort); - dataOutputStream.writeInt(node.clientPort); - byte[] clientIpBytes = node.clientIp.getBytes(); - dataOutputStream.writeInt(clientIpBytes.length); - dataOutputStream.write(clientIpBytes); - } catch (IOException e) { - // unreachable - } - } - - public static void deserialize(Node node, ByteBuffer buffer) { - int internalIpLength = buffer.getInt(); - byte[] internalIpBytes = new byte[internalIpLength]; - buffer.get(internalIpBytes); - node.setInternalIp(new String(internalIpBytes)); - node.setMetaPort(buffer.getInt()); - node.setNodeIdentifier(buffer.getInt()); - node.setDataPort(buffer.getInt()); - node.setClientPort(buffer.getInt()); - int clientIpLength = buffer.getInt(); - byte[] clientIpBytes = new byte[clientIpLength]; - buffer.get(clientIpBytes); - node.setClientIp(new String(clientIpBytes)); - } - - public static void deserialize(Node node, DataInputStream stream) throws IOException { - int ipLength = stream.readInt(); - byte[] ipBytes = new byte[ipLength]; - int readIpSize = stream.read(ipBytes); - if (readIpSize != ipLength) { - throw new IOException( - String.format( - "No sufficient bytes read when deserializing the ip of a node: %d/%d", - readIpSize, ipLength)); - } - node.setInternalIp(new String(ipBytes)); - node.setMetaPort(stream.readInt()); - node.setNodeIdentifier(stream.readInt()); - node.setDataPort(stream.readInt()); - node.setClientPort(stream.readInt()); - - int clientIpLength = stream.readInt(); - byte[] clientIpBytes = new byte[clientIpLength]; - int readClientIpSize = stream.read(clientIpBytes); - if (readClientIpSize != clientIpLength) { - throw new IOException( - String.format( - "No sufficient bytes read when deserializing the clientIp of a node: %d/%d", - readClientIpSize, clientIpLength)); - } - node.setClientIp(new String(clientIpBytes)); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/PartitionUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/PartitionUtils.java deleted file mode 100644 index 0b6cd6d5c8aa..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/PartitionUtils.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.DeletePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.AppendTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.AuthorPlan; -import org.apache.iotdb.db.qp.physical.sys.ClearCachePlan; -import org.apache.iotdb.db.qp.physical.sys.CreateFunctionPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.DataAuthPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.DropFunctionPlan; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.db.qp.physical.sys.KillQueryPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan.LoadConfigurationPlanType; -import org.apache.iotdb.db.qp.physical.sys.LoadDataPlan; -import org.apache.iotdb.db.qp.physical.sys.MergePlan; -import org.apache.iotdb.db.qp.physical.sys.OperateFilePlan; -import org.apache.iotdb.db.qp.physical.sys.PruneTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.qp.physical.sys.SetSystemModePlan; -import org.apache.iotdb.db.qp.physical.sys.SetTTLPlan; -import org.apache.iotdb.db.qp.physical.sys.SetTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.ShowTTLPlan; -import org.apache.iotdb.tsfile.utils.BitMap; -import org.apache.iotdb.tsfile.utils.Murmur128Hash; - -import java.util.List; -import java.util.Set; - -import static org.apache.iotdb.cluster.config.ClusterConstant.HASH_SALT; - -public class PartitionUtils { - - private PartitionUtils() { - // util class - } - - /** - * Whether the plan should be directly executed without spreading it into the cluster. - * - * @param plan - * @return - */ - public static boolean isLocalNonQueryPlan(PhysicalPlan plan) { - return plan instanceof LoadDataPlan - || plan instanceof OperateFilePlan - || plan instanceof KillQueryPlan - || (plan instanceof LoadConfigurationPlan - && ((LoadConfigurationPlan) plan) - .getLoadConfigurationPlanType() - .equals(LoadConfigurationPlanType.LOCAL)); - } - - /** - * GlobalMetaPlan will be executed on all meta group nodes. - * - * @param plan - * @return - */ - public static boolean isGlobalMetaPlan(PhysicalPlan plan) { - return plan instanceof SetStorageGroupPlan - || plan instanceof SetTTLPlan - || plan instanceof ShowTTLPlan - || (plan instanceof LoadConfigurationPlan - && ((LoadConfigurationPlan) plan) - .getLoadConfigurationPlanType() - .equals(LoadConfigurationPlanType.GLOBAL)) - || plan instanceof AuthorPlan - || plan instanceof DeleteStorageGroupPlan - // DataAuthPlan is global because all nodes must have all user info - || plan instanceof DataAuthPlan - || plan instanceof CreateTemplatePlan - || plan instanceof AppendTemplatePlan - || plan instanceof PruneTemplatePlan - || plan instanceof CreateFunctionPlan - || plan instanceof DropFunctionPlan - || plan instanceof SetSystemModePlan; - } - - /** - * GlobalDataPlan will be executed on all data group nodes. - * - * @param plan the plan to check - * @return is globalDataPlan or not - */ - public static boolean isGlobalDataPlan(PhysicalPlan plan) { - return - // because deletePlan has an infinite time range. - plan instanceof DeletePlan - || plan instanceof DeleteTimeSeriesPlan - || plan instanceof MergePlan - || plan instanceof FlushPlan - || plan instanceof SetTemplatePlan - || plan instanceof ClearCachePlan; - } - - public static int calculateStorageGroupSlotByTime( - String storageGroupName, long timestamp, int slotNum) { - long partitionNum = StorageEngine.getTimePartition(timestamp); - return calculateStorageGroupSlotByPartition(storageGroupName, partitionNum, slotNum); - } - - private static int calculateStorageGroupSlotByPartition( - String storageGroupName, long partitionNum, int slotNum) { - int hash = Murmur128Hash.hash(storageGroupName, partitionNum, HASH_SALT); - return Math.abs(hash % slotNum); - } - - public static InsertTabletPlan copy( - InsertTabletPlan plan, long[] times, Object[] values, BitMap[] bitMaps) { - InsertTabletPlan newPlan = new InsertTabletPlan(plan.getDevicePath(), plan.getMeasurements()); - newPlan.setDataTypes(plan.getDataTypes()); - // according to TSServiceImpl.insertBatch(), only the deviceId, measurements, dataTypes, - // times, columns, and rowCount are need to be maintained. - newPlan.setColumns(values); - newPlan.setBitMaps(bitMaps); - newPlan.setTimes(times); - newPlan.setRowCount(times.length); - newPlan.setMeasurementMNodes(plan.getMeasurementMNodes()); - return newPlan; - } - - public static void reordering(InsertTabletPlan plan, TSStatus[] status, TSStatus[] subStatus) { - List range = plan.getRange(); - int destLoc = 0; - for (int i = 0; i < range.size(); i += 2) { - int start = range.get(i); - int end = range.get(i + 1); - System.arraycopy(subStatus, destLoc, status, start, end - start); - destLoc += end - start; - } - } - - /** - * Calculate the headers of the groups that possibly store the data of a timeseries over the given - * time range. - * - * @param storageGroupName - * @param timeLowerBound - * @param timeUpperBound - * @param partitionTable - * @param result - */ - public static void getIntervalHeaders( - String storageGroupName, - long timeLowerBound, - long timeUpperBound, - PartitionTable partitionTable, - Set result) { - long partitionInterval = StorageEngine.getTimePartitionInterval(); - long currPartitionStart = timeLowerBound / partitionInterval * partitionInterval; - while (currPartitionStart <= timeUpperBound) { - result.add(partitionTable.routeToHeaderByTime(storageGroupName, currPartitionStart)); - currPartitionStart += partitionInterval; - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/PlanSerializer.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/PlanSerializer.java deleted file mode 100644 index da5e7e58babf..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/PlanSerializer.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.commons.utils.JVMCommonUtils; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.concurrent.BlockingDeque; -import java.util.concurrent.LinkedBlockingDeque; - -public class PlanSerializer { - - private static final Logger logger = LoggerFactory.getLogger(PlanSerializer.class); - private static final int DEFAULT_BAOS_SIZE = JVMCommonUtils.getCpuCores() * 4; - private BlockingDeque baosBlockingDeque = new LinkedBlockingDeque<>(); - - private static final PlanSerializer instance = new PlanSerializer(); - - private PlanSerializer() { - for (int i = 0; i < DEFAULT_BAOS_SIZE; i++) { - baosBlockingDeque.push(new ByteArrayOutputStream(4096)); - } - } - - public static PlanSerializer getInstance() { - return instance; - } - - public byte[] serialize(PhysicalPlan plan) throws IOException { - ByteArrayOutputStream poll; - try { - poll = baosBlockingDeque.take(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IOException("take byte array output stream interrupted", e); - } - poll.reset(); - - try (DataOutputStream dataOutputStream = new DataOutputStream(poll)) { - plan.serialize(dataOutputStream); - return poll.toByteArray(); - } finally { - try { - baosBlockingDeque.put(poll); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.error("Putting byte array output stream back interrupted"); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/StatusUtils.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/StatusUtils.java deleted file mode 100644 index 96cb26e53200..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/StatusUtils.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.rpc.TSStatusCode; - -public class StatusUtils { - - private StatusUtils() { - // util class - } - - public static final TSStatus PARTITION_TABLE_NOT_READY = - getStatus(TSStatusCode.PARTITION_NOT_READY); - public static final TSStatus OK = getStatus(TSStatusCode.SUCCESS_STATUS); - public static final TSStatus TIME_OUT = getStatus(TSStatusCode.TIME_OUT); - public static final TSStatus NO_LEADER = getStatus(TSStatusCode.NO_LEADER); - public static final TSStatus INTERNAL_ERROR = getStatus(TSStatusCode.INTERNAL_SERVER_ERROR); - public static final TSStatus UNSUPPORTED_OPERATION = - getStatus(TSStatusCode.UNSUPPORTED_OPERATION); - public static final TSStatus EXECUTE_STATEMENT_ERROR = - getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR); - public static final TSStatus NO_STORAGE_GROUP = getStatus(TSStatusCode.STORAGE_GROUP_ERROR); - public static final TSStatus NODE_READ_ONLY = getStatus(TSStatusCode.NODE_READ_ONLY); - public static final TSStatus CONSISTENCY_FAILURE = getStatus(TSStatusCode.CONSISTENCY_FAILURE); - public static final TSStatus TIMESERIES_NOT_EXIST_ERROR = - getStatus(TSStatusCode.TIMESERIES_NOT_EXIST); - public static final TSStatus NO_CONNECTION = getStatus(TSStatusCode.NO_CONNECTION); - public static final TSStatus PARSE_LOG_ERROR = getStatus(TSStatusCode.PARSE_LOG_ERROR); - public static final TSStatus DUPLICATED_TEMPLATE = getStatus(TSStatusCode.DUPLICATED_TEMPLATE); - - public static TSStatus getStatus(TSStatusCode statusCode) { - TSStatus status = new TSStatus(); - status.setCode(statusCode.getStatusCode()); - switch (statusCode) { - case SUCCESS_STATUS: - status.setMessage("Executed successfully. "); - break; - case TIME_OUT: - status.setMessage("Request timed out. "); - break; - case NO_LEADER: - status.setMessage("Leader cannot be found. "); - break; - case PARTITION_NOT_READY: - status.setMessage("Partition table is not ready. "); - break; - case NODE_READ_ONLY: - status.setMessage( - "Current node is read-only, please retry to find another available node. "); - break; - case INCOMPATIBLE_VERSION: - status.setMessage("Incompatible version. "); - break; - case NODE_DELETE_FAILED_ERROR: - status.setMessage("Failed while deleting node. "); - break; - case ALIAS_ALREADY_EXIST_ERROR: - status.setMessage("Alias already exists. "); - break; - case PATH_ALREADY_EXIST_ERROR: - status.setMessage("Path already exist. "); - break; - case PATH_NOT_EXIST_ERROR: - case TIMESERIES_NOT_EXIST: - status.setMessage("Path does not exist. "); - break; - case UNSUPPORTED_FETCH_METADATA_OPERATION_ERROR: - status.setMessage("Unsupported fetch metadata operation. "); - break; - case METADATA_ERROR: - status.setMessage("Meet error when dealing with metadata. "); - break; - case OUT_OF_TTL_ERROR: - status.setMessage("Insertion time is less than TTL time bound. "); - break; - case CONFIG_ADJUSTER: - status.setMessage("IoTDB system load is too large. "); - break; - case MERGE_ERROR: - status.setMessage("Meet error while merging. "); - break; - case SYSTEM_CHECK_ERROR: - status.setMessage("Meet error while system checking. "); - break; - case SYNC_CONNECTION_EXCEPTION: - status.setMessage("Meet error while sync connecting. "); - break; - case DATA_REGION_ERROR: - status.setMessage("Storage group processor related error. "); - break; - case STORAGE_GROUP_ERROR: - status.setMessage("No associated storage group. "); - break; - case STORAGE_ENGINE_ERROR: - status.setMessage("Storage engine related error. "); - break; - case TSFILE_PROCESSOR_ERROR: - status.setMessage("TsFile processor related error. "); - break; - case PATH_ILLEGAL: - status.setMessage("Illegal path. "); - break; - case LOAD_FILE_ERROR: - status.setMessage("Meet error while loading file. "); - break; - case EXECUTE_STATEMENT_ERROR: - status.setMessage("Execute statement error. "); - break; - case SQL_PARSE_ERROR: - status.setMessage("Meet error while parsing SQL. "); - break; - case GENERATE_TIME_ZONE_ERROR: - status.setMessage("Meet error while generating time zone. "); - break; - case SET_TIME_ZONE_ERROR: - status.setMessage("Meet error while setting time zone. "); - break; - case NOT_STORAGE_GROUP_ERROR: - status.setMessage("Operating object is not a storage group. "); - break; - case QUERY_NOT_ALLOWED: - status.setMessage("Query statements are not allowed error. "); - break; - case AST_FORMAT_ERROR: - status.setMessage("AST format related error. "); - break; - case LOGICAL_OPERATOR_ERROR: - status.setMessage("Logical operator related error. "); - break; - case LOGICAL_OPTIMIZE_ERROR: - status.setMessage("Logical optimize related error. "); - break; - case UNSUPPORTED_FILL_TYPE_ERROR: - status.setMessage("Unsupported fill type related error. "); - break; - case PATH_ERROR: - status.setMessage("Path related error. "); - break; - case QUERY_PROCESS_ERROR: - status.setMessage("Query process related error. "); - break; - case WRITE_PROCESS_ERROR: - status.setMessage("Writing data related error. "); - break; - case INTERNAL_SERVER_ERROR: - status.setMessage("Internal server error. "); - break; - case CLOSE_OPERATION_ERROR: - status.setMessage("Meet error in close operation. "); - break; - case READ_ONLY_SYSTEM_ERROR: - status.setMessage("Database is read-only. "); - break; - case DISK_SPACE_INSUFFICIENT_ERROR: - status.setMessage("Disk space is insufficient. "); - break; - case START_UP_ERROR: - status.setMessage("Meet error while starting up. "); - break; - case WRONG_LOGIN_PASSWORD_ERROR: - status.setMessage("Username or password is wrong. "); - break; - case NOT_LOGIN_ERROR: - status.setMessage("Has not logged in. "); - break; - case NO_PERMISSION_ERROR: - status.setMessage("No permissions for this operation. "); - break; - case UNINITIALIZED_AUTH_ERROR: - status.setMessage("Uninitialized authorizer. "); - break; - case UNSUPPORTED_OPERATION: - status.setMessage("Unsupported operation. "); - break; - case NO_CONNECTION: - status.setMessage("Node cannot be reached."); - break; - case PARSE_LOG_ERROR: - status.setMessage("Parse log error."); - break; - case PIPESINK_ERROR: - status.setMessage("PipeSink error."); - break; - default: - status.setMessage(""); - break; - } - return status; - } - - public static TSStatus getStatus(TSStatusCode statusCode, TEndPoint redirectedNode) { - TSStatus status = getStatus(statusCode); - status.setRedirectNode(redirectedNode); - return status; - } - - public static TSStatus getStatus(TSStatus status, String message) { - TSStatus newStatus = status.deepCopy(); - newStatus.setMessage(message); - return newStatus; - } - - public static TSStatus getStatus(TSStatus status, TEndPoint redirectedNode) { - TSStatus newStatus = status.deepCopy(); - newStatus.setRedirectNode(redirectedNode); - return newStatus; - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java deleted file mode 100644 index dda37f9f2585..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitor.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.monitor.Timer; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.cluster.utils.nodetool.function.NodeToolCmd; -import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; -import org.apache.iotdb.commons.concurrent.ThreadName; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.service.IService; -import org.apache.iotdb.commons.service.JMXService; -import org.apache.iotdb.commons.service.ServiceType; -import org.apache.iotdb.db.service.metrics.Metric; -import org.apache.iotdb.db.service.metrics.MetricsService; -import org.apache.iotdb.db.service.metrics.Tag; -import org.apache.iotdb.metrics.config.MetricConfigDescriptor; -import org.apache.iotdb.metrics.utils.MetricLevel; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.apache.commons.collections4.map.MultiKeyMap; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.apache.iotdb.cluster.utils.nodetool.function.NodeToolCmd.BUILDING_CLUSTER_INFO; -import static org.apache.iotdb.cluster.utils.nodetool.function.NodeToolCmd.META_LEADER_UNKNOWN_INFO; - -public class ClusterMonitor implements ClusterMonitorMBean, IService { - - private static final Logger LOGGER = LoggerFactory.getLogger(ClusterMonitor.class); - - public static final ClusterMonitor INSTANCE = new ClusterMonitor(); - - private final String mbeanName = - String.format( - "%s:%s=%s", IoTDBConstant.IOTDB_PACKAGE, IoTDBConstant.JMX_TYPE, getID().getJmxName()); - - private ClusterMonitor() {} - - @Override - public void start() throws StartupException { - try { - JMXService.registerMBean(INSTANCE, mbeanName); - if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { - startCollectClusterStatus(); - } - } catch (Exception e) { - String errorMessage = - String.format("Failed to start %s because of %s", this.getID().getName(), e.getMessage()); - throw new StartupException(errorMessage); - } - } - - private void startCollectClusterStatus() { - // monitor all nodes' live status - LOGGER.info("start metric node status and leader distribution"); - IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(ThreadName.CLUSTER_MONITOR.getName()) - .scheduleAtFixedRate( - () -> { - MetaGroupMember metaGroupMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaGroupMember != null - && metaGroupMember.getLeader().equals(metaGroupMember.getThisNode())) { - metricNodeStatus(metaGroupMember); - metricLeaderDistribution(metaGroupMember); - } - }, - 10L, - 10L, - TimeUnit.SECONDS); - } - - private void metricLeaderDistribution(MetaGroupMember metaGroupMember) { - Map leaderCountMap = new HashMap<>(); - ClusterIoTDB.getInstance() - .getDataGroupEngine() - .getHeaderGroupMap() - .forEach( - (header, dataGroupMember) -> { - Node leader = dataGroupMember.getLeader(); - int delta = 1; - Integer count = leaderCountMap.getOrDefault(leader, 0); - leaderCountMap.put(leader, count + delta); - }); - List ring = getRing(); - for (Node node : ring) { - Integer count = leaderCountMap.getOrDefault(node, 0); - MetricsService.getInstance() - .getMetricManager() - .gauge( - count, - Metric.CLUSTER_NODE_LEADER_COUNT.toString(), - MetricLevel.IMPORTANT, - Tag.NAME.toString(), - node.internalIp); - } - } - - private void metricNodeStatus(MetaGroupMember metaGroupMember) { - List ring = getRing(); - for (Node node : ring) { - boolean isAlive = false; - if (node.equals(metaGroupMember.getThisNode())) { - isAlive = true; - } - SyncMetaClient client = (SyncMetaClient) metaGroupMember.getSyncClient(node); - if (client != null) { - try { - client.checkAlive(); - isAlive = true; - } catch (TException e) { - client.getInputProtocol().getTransport().close(); - } finally { - ClientUtils.putBackSyncClient(client); - } - } - MetricsService.getInstance() - .getMetricManager() - .gauge( - isAlive ? 1 : 0, - Metric.CLUSTER_NODE_STATUS.toString(), - MetricLevel.IMPORTANT, - Tag.NAME.toString(), - node.internalIp); - } - } - - @Override - public List> getMetaGroup() { - MetaGroupMember metaMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaMember == null || metaMember.getPartitionTable() == null) { - return null; - } - List> res = new ArrayList<>(); - Node leader = metaMember.getLeader(); - List nodes = metaMember.getPartitionTable().getAllNodes(); - for (Node node : nodes) { - if (node.equals(leader)) { - res.add(new Pair<>(node, NodeCharacter.LEADER)); - } else { - res.add(new Pair<>(node, NodeCharacter.FOLLOWER)); - } - } - return res; - } - - public List getRing() { - MetaGroupMember metaMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaMember == null || metaMember.getPartitionTable() == null) { - return null; - } - return metaMember.getPartitionTable().getAllNodes(); - } - - @Override - public List> getDataGroup(int raftId) throws Exception { - MetaGroupMember metaMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaMember == null || metaMember.getPartitionTable() == null) { - return null; - } - RaftNode raftNode = new RaftNode(metaMember.getThisNode(), raftId); - DataGroupMember dataMember = - ClusterIoTDB.getInstance() - .getDataGroupEngine() - .getHeaderGroupMap() - .getOrDefault(raftNode, null); - if (dataMember == null) { - throw new Exception(String.format("Partition whose header is %s doesn't exist.", raftNode)); - } - List> res = new ArrayList<>(); - for (Node node : dataMember.getAllNodes()) { - if (node.equals(metaMember.getThisNode())) { - res.add(new Pair<>(node, NodeCharacter.LEADER)); - } else { - res.add(new Pair<>(node, NodeCharacter.FOLLOWER)); - } - } - return res; - } - - @Override - public Map getSlotNumInDataMigration() throws Exception { - MetaGroupMember member = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (member == null || member.getPartitionTable() == null) { - throw new Exception(BUILDING_CLUSTER_INFO); - } - if (member.getCharacter() != NodeCharacter.LEADER) { - if (member.getLeader() == null || member.getLeader().equals(ClusterConstant.EMPTY_NODE)) { - throw new Exception(META_LEADER_UNKNOWN_INFO); - } else { - throw new Exception(NodeToolCmd.redirectToQueryMetaLeader(member.getLeader())); - } - } - return member.collectAllPartitionMigrationStatus(); - } - - @Override - public MultiKeyMap getDataPartition( - String path, long startTime, long endTime) { - PartitionTable partitionTable = getPartitionTable(); - if (partitionTable == null) { - return null; - } - try { - return partitionTable.partitionByPathRangeTime(new PartialPath(path), startTime, endTime); - } catch (MetadataException e) { - return new MultiKeyMap<>(); - } - } - - @Override - public PartitionGroup getMetaPartition(String path) { - PartitionTable partitionTable = getPartitionTable(); - if (partitionTable == null) { - return null; - } - try { - return partitionTable.partitionByPathTime(new PartialPath(path), 0); - } catch (MetadataException e) { - return new PartitionGroup(); - } - } - - @Override - public Map getSlotNumOfAllNode() { - PartitionTable partitionTable = getPartitionTable(); - if (partitionTable == null) { - return null; - } - List allNodes = partitionTable.getAllNodes(); - Map> nodeSlotMap = - ((SlotPartitionTable) partitionTable).getAllNodeSlots(); - Map raftGroupMapSlotNum = new HashMap<>(); - for (Node header : allNodes) { - for (int raftId = 0; - raftId < ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor(); - raftId++) { - RaftNode raftNode = new RaftNode(header, raftId); - raftGroupMapSlotNum.put( - partitionTable.getPartitionGroup(raftNode), nodeSlotMap.get(raftNode).size()); - } - } - return raftGroupMapSlotNum; - } - - @Override - public Map getAllNodeStatus() { - MetaGroupMember metaGroupMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaGroupMember == null) { - return null; - } - return metaGroupMember.getAllNodeStatus(); - } - - private PartitionTable getPartitionTable() { - MetaGroupMember metaGroupMember = ClusterIoTDB.getInstance().getMetaGroupMember(); - if (metaGroupMember == null) { - return null; - } - return metaGroupMember.getPartitionTable(); - } - - @Override - public void stop() { - JMXService.deregisterMBean(mbeanName); - } - - @Override - public ServiceType getID() { - return ServiceType.CLUSTER_MONITOR_SERVICE; - } - - public String getMbeanName() { - return mbeanName; - } - - @Override - public String getInstrumentingInfo() { - return Timer.getReport(); - } - - @Override - public void resetInstrumenting() { - Timer.Statistic.resetAll(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitorMBean.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitorMBean.java deleted file mode 100644 index cf9ab4530cb6..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/ClusterMonitorMBean.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.apache.commons.collections4.map.MultiKeyMap; - -import java.util.List; -import java.util.Map; - -public interface ClusterMonitorMBean { - - /** Show the character of meta raft group. */ - List> getMetaGroup(); - - /** Show the character of target data raft group whose header is this node. */ - List> getDataGroup(int raftId) throws Exception; - - /** - * Query how many slots are still PULLING or PULLING_WRITABLE, it means whether user can - * add/remove a node. - * - * @return key: group, value: slot num that still in the process of data migration - */ - Map getSlotNumInDataMigration() throws Exception; - - /** - * Get data partition information of input path and time range. - * - * @param path input path - * @return data partition information: ((start time, end time), PartitionGroup) - */ - MultiKeyMap getDataPartition(String path, long startTime, long endTime); - - /** - * Get metadata partition information of input path - * - * @param path input path - * @return metadata partition information - */ - PartitionGroup getMetaPartition(String path); - - /** - * Get all data partition groups and the slot number in each partition group. - * - * @return key: the partition group, value: the slot number - */ - Map getSlotNumOfAllNode(); - - /** - * Get status of all nodes - * - * @return key: node, value: 0(live), 1(offline), 2(joining), 3(leaving) - */ - Map getAllNodeStatus(); - - /** - * @return A multi-line string with each line representing the total time consumption, invocation - * number, and average time consumption. - */ - String getInstrumentingInfo(); - - /** Reset all instrumenting statistics in Timer. */ - void resetInstrumenting(); -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/NodeTool.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/NodeTool.java deleted file mode 100644 index 7a90f68a1568..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/NodeTool.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool; - -import org.apache.iotdb.cluster.utils.nodetool.function.Header; -import org.apache.iotdb.cluster.utils.nodetool.function.LogView; -import org.apache.iotdb.cluster.utils.nodetool.function.Migration; -import org.apache.iotdb.cluster.utils.nodetool.function.Partition; -import org.apache.iotdb.cluster.utils.nodetool.function.Ring; -import org.apache.iotdb.cluster.utils.nodetool.function.Slot; -import org.apache.iotdb.cluster.utils.nodetool.function.Status; -import org.apache.iotdb.db.utils.CommonUtils; - -import com.google.common.collect.Lists; -import io.airlift.airline.Help; - -import java.util.List; - -public class NodeTool { - - public static void main(String... args) { - List> commands = - Lists.newArrayList( - Help.class, - Ring.class, - Partition.class, - Slot.class, - Status.class, - LogView.class, - Migration.class, - Header.class); - - int status = CommonUtils.runCli(commands, args, "nodetool", "Manage your IoTDB cluster"); - System.exit(status); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/Printer.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/Printer.java deleted file mode 100644 index 45ec2f875549..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/Printer.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool; - -import java.io.PrintStream; - -@SuppressWarnings("java:S106") // for console outputs -public class Printer { - - private static final PrintStream SCREEN_PRINTER = new PrintStream(System.out); - private static final PrintStream ERR_PRINTER = new PrintStream(System.err); - - private Printer() {} - - public static void msgPrintln(String s) { - SCREEN_PRINTER.println(s); - } - - public static void errPrintln(String s) { - ERR_PRINTER.println(s); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Header.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Header.java deleted file mode 100644 index c3994c8e5f4f..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Header.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; -import org.apache.iotdb.tsfile.utils.Pair; - -import io.airlift.airline.Command; -import io.airlift.airline.Option; - -import java.util.List; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command( - name = "header", - description = "Print information about data group whose header is this node") -public class Header extends NodeToolCmd { - - @Option( - title = "raft id", - name = {"-id", "--raftid"}, - description = - "Specify the raft id of data group whose header is this node, default value is 0") - private int id = 0; - - @Override - public void execute(ClusterMonitorMBean proxy) { - try { - List> allNodes = proxy.getDataGroup(id); - if (allNodes == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - return; - } - msgPrintln(String.format("%-20s %30s", "Node Identifier", "Node")); - for (Pair pair : allNodes) { - Node node = pair.left; - msgPrintln( - String.format( - "%-20d->%30s", node.nodeIdentifier, nodeCharacterToString(node, pair.right))); - } - } catch (Exception e) { - msgPrintln(e.getMessage()); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/LogView.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/LogView.java deleted file mode 100644 index a16e56921dea..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/LogView.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.manage.serializable.LogManagerMeta; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; -import org.apache.iotdb.cluster.utils.nodetool.Printer; - -import io.airlift.airline.Command; -import io.airlift.airline.Option; - -import java.io.File; -import java.util.List; - -@Command(name = "log", description = "Print raft logs from a log file") -public class LogView implements Runnable { - - @Option( - title = "detailed information", - name = {"-d", "--detail"}, - description = "Show detail information of logs") - private boolean detail = false; - - @Option( - title = "path", - required = true, - name = {"-path", "--path"}, - description = "Specify a path for accurate hosts information") - private String path = null; - - @Override - public void run() { - SyncLogDequeSerializer logDequeSerializer = new SyncLogDequeSerializer(path); - - List logs = logDequeSerializer.getAllEntriesAfterAppliedIndex(); - HardState state = logDequeSerializer.getHardState(); - LogManagerMeta managerMeta = logDequeSerializer.getMeta(); - - Printer.msgPrintln("-------------------LOG META-------------------------"); - Printer.msgPrintln(managerMeta.toString()); - Printer.msgPrintln("-------------------LOG DATA-------------------------"); - Printer.msgPrintln("-------------------NODE STATE-------------------------"); - Printer.msgPrintln(state.toString()); - Printer.msgPrintln("-------------------NODE STATE-------------------------"); - - Printer.msgPrintln("-------------------LOG DATA FILES-------------------------"); - List dataFileList = logDequeSerializer.getLogDataFileList(); - List indexFileList = logDequeSerializer.getLogIndexFileList(); - for (int i = 0; i < dataFileList.size(); i++) { - Printer.msgPrintln( - "name=" + dataFileList.get(i).getName() + ",length=" + dataFileList.get(i).length()); - Printer.msgPrintln( - "name=" + indexFileList.get(i).getName() + ",length=" + indexFileList.get(i).length()); - } - - Printer.msgPrintln("-------------------LOG DATA FILES-------------------------"); - - int count = 0; - - for (Log log : logs) { - Printer.msgPrintln("Log NO " + count + ": "); - count++; - if (detail) { - Printer.msgPrintln(log.toString()); - } else { - Printer.msgPrintln(log.getClass().getSimpleName()); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Migration.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Migration.java deleted file mode 100644 index a6b0f0c055b4..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Migration.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; - -import io.airlift.airline.Command; - -import java.util.Map; -import java.util.Map.Entry; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command( - name = "migration", - description = - "Print information about how many slots are in the state of data migration for each data group. ") -public class Migration extends NodeToolCmd { - - @Override - public void execute(ClusterMonitorMBean proxy) { - try { - Map groupSlotsMap = proxy.getSlotNumInDataMigration(); - if (groupSlotsMap == null) { - msgPrintln(FAIL_TO_GET_ALL_SLOT_STATUS_INFO); - return; - } - if (groupSlotsMap.isEmpty()) { - msgPrintln("No slots are in the state of data migration, users can change membership."); - } else { - msgPrintln( - "Some slots are in the state of data migration, users can not change membership until the end of data migration:"); - msgPrintln(String.format("%-20s %30s", "Slot num", "Data Group")); - for (Entry entry : groupSlotsMap.entrySet()) { - PartitionGroup group = entry.getKey(); - msgPrintln(String.format("%-20d->%30s", entry.getValue(), partitionGroupToString(group))); - } - } - } catch (LeaderUnknownException e) { - msgPrintln(META_LEADER_UNKNOWN_INFO); - } catch (Exception e) { - msgPrintln(e.getMessage()); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/NodeToolCmd.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/NodeToolCmd.java deleted file mode 100644 index 25e735097eee..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/NodeToolCmd.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitor; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; - -import com.google.common.base.Throwables; -import io.airlift.airline.Option; -import io.airlift.airline.OptionType; - -import javax.management.JMX; -import javax.management.MBeanServerConnection; -import javax.management.MalformedObjectNameException; -import javax.management.ObjectName; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; - -import java.io.IOException; -import java.util.Collections; -import java.util.Map; - -import static java.lang.String.format; -import static org.apache.iotdb.cluster.utils.nodetool.Printer.errPrintln; - -@SuppressWarnings("squid:S2068") -public abstract class NodeToolCmd implements Runnable { - - @Option( - type = OptionType.GLOBAL, - name = {"-h", "--host"}, - description = "Node hostname or ip address") - private String host = "127.0.0.1"; - - @Option( - type = OptionType.GLOBAL, - name = {"-p", "--port"}, - description = "Remote jmx agent port number") - private String port = "31999"; - - @Option( - type = OptionType.GLOBAL, - name = {"-u", "--user"}, - description = "The username to access the remote jmx") - private String user = "root"; - - @Option( - type = OptionType.GLOBAL, - name = {"-pw", "--password"}, - description = "The password to access the remote jmx") - private String password = "passw!d"; - - private static final String JMX_URL_FORMAT = "service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi"; - - public static final String BUILDING_CLUSTER_INFO = "The cluster is being created."; - - public static final String META_LEADER_UNKNOWN_INFO = - "Meta group leader is unknown, please try again later."; - - static final String FAIL_TO_GET_ALL_SLOT_STATUS_INFO = - "Fail to get all slot status, please check node status and try again later."; - - @Override - public void run() { - try { - MBeanServerConnection mbsc = connect(); - ObjectName name = new ObjectName(ClusterMonitor.INSTANCE.getMbeanName()); - ClusterMonitorMBean clusterMonitorProxy = - JMX.newMBeanProxy(mbsc, name, ClusterMonitorMBean.class); - execute(clusterMonitorProxy); - } catch (MalformedObjectNameException e) { - errPrintln(e.getMessage()); - } - } - - protected abstract void execute(ClusterMonitorMBean probe); - - private MBeanServerConnection connect() { - MBeanServerConnection mbsc = null; - - try { - String jmxURL = String.format(JMX_URL_FORMAT, host, port); - JMXServiceURL serviceURL = new JMXServiceURL(jmxURL); - Map environment = - Collections.singletonMap(JMXConnector.CREDENTIALS, new String[] {user, password}); - JMXConnector connector = JMXConnectorFactory.connect(serviceURL, environment); - mbsc = connector.getMBeanServerConnection(); - } catch (IOException e) { - Throwable rootCause = Throwables.getRootCause(e); - errPrintln( - format( - "nodetool: Failed to connect to '%s:%s' - %s: '%s'.", - host, port, rootCause.getClass().getSimpleName(), rootCause.getMessage())); - System.exit(1); - } - - return mbsc; - } - - public static String nodeCharacterToString(Node node, NodeCharacter character) { - return String.format("%s (%s)", nodeToString(node), character); - } - - public static String nodeToString(Node node) { - return String.format( - "%s:%d:%d:%d", - node.getInternalIp(), node.getMetaPort(), node.getDataPort(), node.getClientPort()); - } - - public static String redirectToQueryMetaLeader(Node node) { - return String.format("Please redirect to query meta group leader %s", nodeToString(node)); - } - - public static String partitionGroupToString(PartitionGroup group) { - StringBuilder stringBuilder = new StringBuilder("["); - if (!group.isEmpty()) { - stringBuilder.append(nodeToString(group.get(0))); - } - for (int i = 1; i < group.size(); i++) { - stringBuilder.append(", ").append(nodeToString(group.get(i))); - } - stringBuilder.append("]"); - return stringBuilder.toString(); - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Partition.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Partition.java deleted file mode 100644 index 7a30292ead75..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Partition.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; - -import io.airlift.airline.Command; -import io.airlift.airline.Option; -import org.apache.commons.collections4.map.MultiKeyMap; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command( - name = "partition", - description = "Print the hosts information of specific storage group and time range") -public class Partition extends NodeToolCmd { - - @Option( - title = "path", - required = true, - name = {"-path", "--path"}, - description = "Specify a path for accurate hosts information") - private String path = null; - - @Option( - title = "start time", - name = {"-st", "--starttime"}, - description = "Specify a start time for partition") - private long startTime = System.currentTimeMillis(); - - @Option( - title = "end time", - name = {"-et", "--endtime"}, - description = "Specify a end time for partition") - private long endTime = System.currentTimeMillis(); - - @Option( - title = "metadata", - name = {"-m", "--metadata"}, - description = "Query metadata") - private boolean metadata = false; - - @Override - public void execute(ClusterMonitorMBean proxy) { - if (endTime < startTime) { - endTime = startTime; - } - if (metadata) { - queryMetaPartition(proxy); - } else { - queryDataPartition(proxy); - } - } - - private void queryMetaPartition(ClusterMonitorMBean proxy) { - PartitionGroup partitionGroup = proxy.getMetaPartition(path); - if (partitionGroup == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - } else if (partitionGroup.isEmpty()) { - msgPrintln(String.format("The storage group of path <%s> doesn't exist.", path)); - } else { - msgPrintln(String.format("META<%s>\t->\t%s", path, partitionGroupToString(partitionGroup))); - } - } - - private void queryDataPartition(ClusterMonitorMBean proxy) { - MultiKeyMap timeRangeMapRaftGroup = - proxy.getDataPartition(path, startTime, endTime); - if (timeRangeMapRaftGroup == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - } else if (timeRangeMapRaftGroup.isEmpty()) { - msgPrintln(String.format("The storage group of path <%s> doesn't exist.", path)); - } else { - timeRangeMapRaftGroup.forEach( - (timeRange, raftGroup) -> - msgPrintln( - String.format( - "DATA<%s, %s, %s>\t->\t%s", - path, - timeRange.getKey(0), - timeRange.getKey(1), - partitionGroupToString(raftGroup)))); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Ring.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Ring.java deleted file mode 100644 index 23a62731b162..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Ring.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; -import org.apache.iotdb.tsfile.utils.Pair; - -import io.airlift.airline.Command; - -import java.util.List; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command(name = "ring", description = "Print information about the meta raft group") -public class Ring extends NodeToolCmd { - - @Override - public void execute(ClusterMonitorMBean proxy) { - List> allNodes = proxy.getMetaGroup(); - if (allNodes == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - } else { - msgPrintln(String.format("%-20s %30s", "Node Identifier", "Node")); - for (Pair pair : allNodes) { - Node node = pair.left; - msgPrintln( - String.format( - "%-20d->%30s", node.nodeIdentifier, nodeCharacterToString(node, pair.right))); - } - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Slot.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Slot.java deleted file mode 100644 index c319b01323b1..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Slot.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; - -import io.airlift.airline.Command; - -import java.util.Map; -import java.util.Map.Entry; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command(name = "slot", description = "Print slot information of all data groups") -public class Slot extends NodeToolCmd { - - @Override - public void execute(ClusterMonitorMBean proxy) { - Map raftGroupMapSlotNum = proxy.getSlotNumOfAllNode(); - if (raftGroupMapSlotNum == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - return; - } - showInfo(raftGroupMapSlotNum); - } - - private void showInfo(Map raftGroupMapSlotNum) { - StringBuilder builder = new StringBuilder(); - builder.append(String.format("%-50s %20s", "Raft group", "Slot Number")); - msgPrintln(builder.toString()); - for (Entry entry : raftGroupMapSlotNum.entrySet()) { - builder = new StringBuilder(); - PartitionGroup raftGroup = entry.getKey(); - Integer slotNum = entry.getValue(); - builder.append('('); - if (!raftGroup.isEmpty()) { - builder.append(nodeToString(raftGroup.get(0))); - } - for (int i = 1; i < raftGroup.size(); i++) { - builder.append(", ").append(nodeToString(raftGroup.get(i))); - } - builder.append("),id=").append(raftGroup.getRaftId()); - msgPrintln(String.format("%-50s->%20s", builder.toString(), slotNum)); - } - } -} diff --git a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Status.java b/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Status.java deleted file mode 100644 index 8c6d4828f581..000000000000 --- a/cluster/src/main/java/org/apache/iotdb/cluster/utils/nodetool/function/Status.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.utils.nodetool.function; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.nodetool.ClusterMonitorMBean; - -import io.airlift.airline.Command; - -import java.util.Map; -import java.util.Map.Entry; - -import static org.apache.iotdb.cluster.utils.nodetool.Printer.msgPrintln; - -@Command(name = "status", description = "Print status of all nodes") -public class Status extends NodeToolCmd { - - public static final int LIVE = 0; - public static final int OFFLINE = 1; - public static final int JOINING = 2; - public static final int LEAVING = 3; - - @Override - public void execute(ClusterMonitorMBean proxy) { - Map statusMap = proxy.getAllNodeStatus(); - if (statusMap == null) { - msgPrintln(BUILDING_CLUSTER_INFO); - return; - } - msgPrintln(String.format("%-30s %10s", "Node", "Status")); - for (Entry entry : statusMap.entrySet()) { - Node node = entry.getKey(); - Integer statusNum = entry.getValue(); - String status; - if (statusNum.equals(LIVE)) { - status = "on"; - } else if (statusNum.equals(OFFLINE)) { - status = "off"; - } else if (statusNum.equals(JOINING)) { - status = "joining"; - } else { - status = "leaving"; - } - msgPrintln(String.format("%-30s->%10s", nodeToString(node), status)); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/BaseClientTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/BaseClientTest.java deleted file mode 100644 index efa4c70d8da0..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/BaseClientTest.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.ClientUtils; - -import java.io.IOException; -import java.net.ServerSocket; - -public class BaseClientTest { - - protected Node defaultNode = constructDefaultNode(); - - private ServerSocket metaServer; - private Thread metaServerListeningThread; - - private ServerSocket dataServer; - private Thread dataServerListeningThread; - - private ServerSocket metaHeartbeatServer; - private Thread metaHeartbeatServerListeningThread; - - private ServerSocket dataHeartbeatServer; - private Thread dataHeartbeatServerListeningThread; - - public void startMetaServer() throws IOException { - metaServer = new ServerSocket(ClientUtils.getPort(defaultNode, ClientCategory.META)); - metaServerListeningThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - metaServer.accept(); - } catch (IOException e) { - return; - } - } - }); - metaServerListeningThread.start(); - } - - public void startDataServer() throws IOException { - dataServer = new ServerSocket(ClientUtils.getPort(defaultNode, ClientCategory.DATA)); - dataServerListeningThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - dataServer.accept(); - } catch (IOException e) { - return; - } - } - }); - dataServerListeningThread.start(); - } - - public void startMetaHeartbeatServer() throws IOException { - metaHeartbeatServer = - new ServerSocket(ClientUtils.getPort(defaultNode, ClientCategory.META_HEARTBEAT)); - metaHeartbeatServerListeningThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - metaHeartbeatServer.accept(); - } catch (IOException e) { - return; - } - } - }); - metaHeartbeatServerListeningThread.start(); - } - - public void startDataHeartbeatServer() throws IOException { - dataHeartbeatServer = - new ServerSocket(ClientUtils.getPort(defaultNode, ClientCategory.DATA_HEARTBEAT)); - dataHeartbeatServerListeningThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - dataHeartbeatServer.accept(); - } catch (IOException e) { - return; - } - } - }); - dataHeartbeatServerListeningThread.start(); - } - - public void stopMetaServer() throws InterruptedException, IOException { - if (metaServer != null) { - metaServer.close(); - } - if (metaServerListeningThread != null) { - metaServerListeningThread.interrupt(); - metaServerListeningThread.join(); - } - } - - public void stopDataServer() throws IOException, InterruptedException { - if (dataServer != null) { - dataServer.close(); - } - if (dataServerListeningThread != null) { - dataServerListeningThread.interrupt(); - dataServerListeningThread.join(); - } - } - - public void stopMetaHeartbeatServer() throws IOException, InterruptedException { - if (metaHeartbeatServer != null) { - metaHeartbeatServer.close(); - } - if (metaHeartbeatServerListeningThread != null) { - metaHeartbeatServerListeningThread.interrupt(); - metaHeartbeatServerListeningThread.join(); - } - } - - public void stopDataHeartbeatServer() throws IOException, InterruptedException { - if (dataHeartbeatServer != null) { - dataHeartbeatServer.close(); - } - if (dataHeartbeatServerListeningThread != null) { - dataHeartbeatServerListeningThread.interrupt(); - dataHeartbeatServerListeningThread.join(); - } - } - - public Node constructDefaultNode() { - Node node = new Node(); - node.setMetaPort(9003).setInternalIp("localhost").setClientIp("localhost"); - node.setDataPort(40010); - return node; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientManagerTest.java deleted file mode 100644 index 19794ab04253..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientManagerTest.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -public class ClientManagerTest extends BaseClientTest { - - @Before - public void setUp() throws IOException { - startDataServer(); - startMetaServer(); - startDataHeartbeatServer(); - startMetaHeartbeatServer(); - } - - @After - public void tearDown() throws IOException, InterruptedException { - stopDataServer(); - stopMetaServer(); - stopDataHeartbeatServer(); - stopMetaHeartbeatServer(); - } - - @Test - public void syncClientManagersTest() throws Exception { - // ---------Sync cluster clients manager test------------ - ClientManager clusterManager = - new ClientManager(false, ClientManager.Type.RequestForwardClient); - RaftService.Client syncClusterClient = - clusterManager.borrowSyncClient(defaultNode, ClientCategory.DATA); - - Assert.assertNotNull(syncClusterClient); - Assert.assertTrue(syncClusterClient instanceof SyncDataClient); - Assert.assertEquals(((SyncDataClient) syncClusterClient).getNode(), defaultNode); - Assert.assertTrue(syncClusterClient.getInputProtocol().getTransport().isOpen()); - ((SyncDataClient) syncClusterClient).returnSelf(); - - // cluster test - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - // ---------Sync meta(meta heartbeat) clients manager test------------ - ClientManager metaManager = new ClientManager(false, ClientManager.Type.MetaGroupClient); - RaftService.Client metaClient = metaManager.borrowSyncClient(defaultNode, ClientCategory.META); - Assert.assertNotNull(metaClient); - Assert.assertTrue(metaClient instanceof SyncMetaClient); - Assert.assertEquals(((SyncMetaClient) metaClient).getNode(), defaultNode); - Assert.assertTrue(metaClient.getInputProtocol().getTransport().isOpen()); - ((SyncMetaClient) metaClient).returnSelf(); - - RaftService.Client metaHeartClient = - metaManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT); - Assert.assertNotNull(metaHeartClient); - Assert.assertTrue(metaHeartClient instanceof SyncMetaClient); - Assert.assertEquals(((SyncMetaClient) metaHeartClient).getNode(), defaultNode); - Assert.assertTrue(metaHeartClient.getInputProtocol().getTransport().isOpen()); - ((SyncMetaClient) metaHeartClient).returnSelf(); - - // cluster test - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - // ---------Sync data(data heartbeat) clients manager test------------ - ClientManager dataManager = new ClientManager(false, ClientManager.Type.DataGroupClient); - - RaftService.Client dataClient = dataManager.borrowSyncClient(defaultNode, ClientCategory.DATA); - Assert.assertNotNull(dataClient); - Assert.assertTrue(dataClient instanceof SyncDataClient); - Assert.assertEquals(((SyncDataClient) dataClient).getNode(), defaultNode); - Assert.assertTrue(dataClient.getInputProtocol().getTransport().isOpen()); - ((SyncDataClient) dataClient).returnSelf(); - - RaftService.Client dataHeartClient = - dataManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT); - Assert.assertNotNull(dataHeartClient); - Assert.assertTrue(dataHeartClient instanceof SyncDataClient); - Assert.assertEquals(((SyncDataClient) dataHeartClient).getNode(), defaultNode); - Assert.assertTrue(dataHeartClient.getInputProtocol().getTransport().isOpen()); - ((SyncDataClient) dataHeartClient).returnSelf(); - - // cluster test - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - } - - @Test - public void asyncClientManagersTest() throws Exception { - // ---------async cluster clients manager test------------ - ClientManager clusterManager = new ClientManager(true, ClientManager.Type.RequestForwardClient); - RaftService.AsyncClient clusterClient = - clusterManager.borrowAsyncClient(defaultNode, ClientCategory.DATA); - - Assert.assertNotNull(clusterClient); - Assert.assertTrue(clusterClient instanceof AsyncDataClient); - Assert.assertEquals(((AsyncDataClient) clusterClient).getNode(), defaultNode); - Assert.assertTrue(((AsyncDataClient) clusterClient).isValid()); - Assert.assertTrue(((AsyncDataClient) clusterClient).isReady()); - - Assert.assertNotSame( - clusterClient, clusterManager.borrowAsyncClient(defaultNode, ClientCategory.DATA)); - - // cluster test - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(clusterManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(clusterManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - // ---------async meta(meta heartbeat) clients manager test------------ - ClientManager metaManager = new ClientManager(true, ClientManager.Type.MetaGroupClient); - RaftService.AsyncClient metaClient = - metaManager.borrowAsyncClient(defaultNode, ClientCategory.META); - Assert.assertNotNull(metaClient); - Assert.assertTrue(metaClient instanceof AsyncMetaClient); - Assert.assertEquals(((AsyncMetaClient) metaClient).getNode(), defaultNode); - Assert.assertTrue(((AsyncMetaClient) metaClient).isValid()); - Assert.assertTrue(((AsyncMetaClient) metaClient).isReady()); - - RaftService.AsyncClient metaHeartClient = - metaManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT); - Assert.assertNotNull(metaHeartClient); - Assert.assertTrue(metaHeartClient instanceof AsyncMetaClient); - Assert.assertEquals(((AsyncMetaClient) metaHeartClient).getNode(), defaultNode); - Assert.assertTrue(((AsyncMetaClient) metaHeartClient).isValid()); - Assert.assertTrue(((AsyncMetaClient) metaHeartClient).isReady()); - - // cluster test - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(metaManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(metaManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - // ---------async data(data heartbeat) clients manager test------------ - ClientManager dataManager = new ClientManager(true, ClientManager.Type.DataGroupClient); - - RaftService.AsyncClient dataClient = - dataManager.borrowAsyncClient(defaultNode, ClientCategory.DATA); - Assert.assertNotNull(dataClient); - Assert.assertTrue(dataClient instanceof AsyncDataClient); - Assert.assertEquals(((AsyncDataClient) dataClient).getNode(), defaultNode); - Assert.assertTrue(((AsyncDataClient) dataClient).isValid()); - Assert.assertTrue(((AsyncDataClient) dataClient).isReady()); - - RaftService.AsyncClient dataHeartClient = - dataManager.borrowAsyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT); - Assert.assertNotNull(dataHeartClient); - Assert.assertTrue(dataHeartClient instanceof AsyncDataClient); - Assert.assertEquals(((AsyncDataClient) dataHeartClient).getNode(), defaultNode); - Assert.assertTrue(((AsyncDataClient) dataHeartClient).isValid()); - Assert.assertTrue(((AsyncDataClient) dataHeartClient).isReady()); - - // cluster test - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(dataManager.borrowAsyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.DATA)); - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.DATA_HEARTBEAT)); - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.META)); - Assert.assertNull(dataManager.borrowSyncClient(defaultNode, ClientCategory.META_HEARTBEAT)); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientPoolFactoryTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientPoolFactoryTest.java deleted file mode 100644 index 838876250056..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/ClientPoolFactoryTest.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncMetaClient; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.utils.ClientUtils; - -import org.apache.commons.pool2.impl.GenericKeyedObjectPool; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.ServerSocket; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.NoSuchElementException; - -// TODO: add case: invalidate client and verify it can not be return by pool -public class ClientPoolFactoryTest { - private ClusterConfig clusterConfig = ClusterDescriptor.getInstance().getConfig(); - - private long mockMaxWaitTimeoutMs = 10 * 1000L; - private int mockMaxClientPerMember = 10; - private int mockMaxIdleClientPerMember = 5; - - private int maxClientPerNodePerMember = clusterConfig.getMaxClientPerNodePerMember(); - private int maxIdleClientPerNodePerMember = clusterConfig.getMaxIdleClientPerNodePerMember(); - private long waitClientTimeoutMS = clusterConfig.getWaitClientTimeoutMS(); - - private ClientPoolFactory clientPoolFactory; - private MockClientManager mockClientManager; - - @Before - public void setUp() { - clusterConfig.setMaxClientPerNodePerMember(mockMaxClientPerMember); - clusterConfig.setMaxIdleClientPerNodePerMember(mockMaxIdleClientPerMember); - clusterConfig.setWaitClientTimeoutMS(mockMaxWaitTimeoutMs); - clientPoolFactory = new ClientPoolFactory(); - mockClientManager = - new MockClientManager() { - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) { - Assert.assertSame(client, asyncClient); - } - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) { - Assert.assertSame(client, syncClient); - } - }; - clientPoolFactory.setClientManager(mockClientManager); - } - - @After - public void tearDown() { - clusterConfig.setMaxClientPerNodePerMember(maxClientPerNodePerMember); - clusterConfig.setMaxIdleClientPerNodePerMember(maxIdleClientPerNodePerMember); - clusterConfig.setWaitClientTimeoutMS(waitClientTimeoutMS); - } - - @Test - public void poolConfigTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createAsyncDataPool(ClientCategory.DATA); - Node node = constructDefaultNode(); - - for (int i = 0; i < mockMaxClientPerMember; i++) { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNotNull(client); - } - - long timeStart = System.currentTimeMillis(); - try { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNull(client); - } catch (Exception e) { - Assert.assertTrue(e instanceof NoSuchElementException); - } finally { - Assert.assertTrue(System.currentTimeMillis() - timeStart + 10 > mockMaxWaitTimeoutMs); - } - } - - @Test - public void poolRecycleTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createAsyncDataPool(ClientCategory.DATA); - - Node node = constructDefaultNode(); - List clientList = new ArrayList<>(); - for (int i = 0; i < pool.getMaxIdlePerKey(); i++) { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNotNull(client); - clientList.add(client); - } - - for (RaftService.AsyncClient client : clientList) { - pool.returnObject(node, client); - } - - for (int i = 0; i < pool.getMaxIdlePerKey(); i++) { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNotNull(client); - Assert.assertTrue(clientList.contains(client)); - } - } - - @Test - public void poolIdleObjectEvictionTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createAsyncDataPool(ClientCategory.DATA); - - Node node = constructDefaultNode(); - List clientList = new ArrayList<>(); - for (int i = 0; i < pool.getMaxTotalPerKey(); i++) { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNotNull(client); - clientList.add(client); - } - - for (RaftService.AsyncClient client : clientList) { - pool.returnObject(node, client); - } - - Assert.assertEquals(0, pool.getNumActive(node)); - Assert.assertEquals(pool.getMaxIdlePerKey(), pool.getNumIdle(node)); - - for (int i = 0; i < pool.getMaxIdlePerKey(); i++) { - RaftService.AsyncClient client = pool.borrowObject(node); - Assert.assertNotNull(client); - Assert.assertTrue(clientList.contains(client)); - } - } - - @Test - public void createAsyncDataClientTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createAsyncDataPool(ClientCategory.DATA); - - Assert.assertEquals(pool.getMaxTotalPerKey(), mockMaxClientPerMember); - Assert.assertEquals(pool.getMaxWaitDuration(), Duration.ofMillis(mockMaxWaitTimeoutMs)); - - RaftService.AsyncClient asyncClient = null; - - Node node = constructDefaultNode(); - - asyncClient = pool.borrowObject(node); - Assert.assertNotNull(asyncClient); - Assert.assertTrue(asyncClient instanceof AsyncDataClient); - } - - @Test - public void createAsyncMetaClientTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createAsyncMetaPool(ClientCategory.META); - - Assert.assertEquals(pool.getMaxTotalPerKey(), mockMaxClientPerMember); - Assert.assertEquals(pool.getMaxWaitDuration(), Duration.ofMillis(mockMaxWaitTimeoutMs)); - - Node node = constructDefaultNode(); - - RaftService.AsyncClient asyncClient = null; - asyncClient = pool.borrowObject(node); - mockClientManager.setAsyncClient(asyncClient); - Assert.assertNotNull(asyncClient); - Assert.assertTrue(asyncClient instanceof AsyncMetaClient); - } - - @Test - public void createSyncDataClientTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createSyncDataPool(ClientCategory.DATA_HEARTBEAT); - - Assert.assertEquals(pool.getMaxTotalPerKey(), mockMaxClientPerMember); - Assert.assertEquals(pool.getMaxWaitDuration(), Duration.ofMillis(mockMaxWaitTimeoutMs)); - - Node node = constructDefaultNode(); - - RaftService.Client client = null; - ServerSocket serverSocket = - new ServerSocket(ClientUtils.getPort(node, ClientCategory.DATA_HEARTBEAT)); - Thread listenThread = null; - try { - listenThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - serverSocket.accept(); - } catch (IOException e) { - return; - } - } - }); - listenThread.start(); - - client = pool.borrowObject(node); - mockClientManager.setSyncClient(client); - Assert.assertNotNull(client); - Assert.assertTrue(client instanceof SyncDataClient); - - } catch (Exception e) { - e.printStackTrace(); - } finally { - ((SyncDataClient) client).returnSelf(); - if (serverSocket != null) { - serverSocket.close(); - listenThread.interrupt(); - listenThread.join(); - } - } - } - - @Test - public void createSyncMetaClientTest() throws Exception { - GenericKeyedObjectPool pool = - clientPoolFactory.createSyncMetaPool(ClientCategory.META_HEARTBEAT); - - Assert.assertEquals(pool.getMaxTotalPerKey(), mockMaxClientPerMember); - Assert.assertEquals(pool.getMaxWaitDuration(), Duration.ofMillis(mockMaxWaitTimeoutMs)); - - Node node = constructDefaultNode(); - - RaftService.Client client = null; - ServerSocket serverSocket = - new ServerSocket(ClientUtils.getPort(node, ClientCategory.META_HEARTBEAT)); - Thread listenThread = null; - try { - listenThread = - new Thread( - () -> { - while (!Thread.interrupted()) { - try { - serverSocket.accept(); - } catch (IOException e) { - return; - } - } - }); - listenThread.start(); - - client = pool.borrowObject(node); - mockClientManager.setSyncClient(client); - Assert.assertNotNull(client); - Assert.assertTrue(client instanceof SyncMetaClient); - - } catch (Exception e) { - e.printStackTrace(); - } finally { - ((SyncMetaClient) client).returnSelf(); - if (serverSocket != null) { - serverSocket.close(); - listenThread.interrupt(); - listenThread.join(); - } - } - } - - private Node constructDefaultNode() { - Node node = new Node(); - node.setMetaPort(9003).setInternalIp("localhost").setClientIp("localhost"); - node.setDataPort(40010); - return node; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/MockClientManager.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/MockClientManager.java deleted file mode 100644 index c3153a1820ba..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/MockClientManager.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; - -public abstract class MockClientManager implements IClientManager { - - RaftService.AsyncClient asyncClient; - RaftService.Client syncClient; - - public void setAsyncClient(RaftService.AsyncClient asyncClient) { - this.asyncClient = asyncClient; - } - - public void setSyncClient(RaftService.Client client) { - this.syncClient = client; - } - - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java deleted file mode 100644 index e9e1b1a3e6b4..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncDataClientTest.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. - */ - -package org.apache.iotdb.cluster.client.async; - -import org.apache.iotdb.cluster.client.BaseClientTest; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; - -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class AsyncDataClientTest extends BaseClientTest { - - private final ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - private TProtocolFactory protocolFactory; - - @Before - public void setUp() { - config.setUseAsyncServer(true); - protocolFactory = - config.isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - } - - @Test - public void testDataClient() throws Exception { - - AsyncDataClient.AsyncDataClientFactory factory = - new AsyncDataClient.AsyncDataClientFactory(protocolFactory, ClientCategory.DATA); - - AsyncDataClient dataClient = factory.makeObject(defaultNode).getObject(); - - assertEquals( - "AsyncDataClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40010}", - dataClient.toString()); - assertCheck(dataClient); - } - - @Test - public void testDataHeartbeatClient() throws Exception { - - AsyncDataClient.AsyncDataClientFactory factory = - new AsyncDataClient.AsyncDataClientFactory(protocolFactory, ClientCategory.DATA_HEARTBEAT); - - AsyncDataClient dataClient = factory.makeObject(defaultNode).getObject(); - - assertEquals( - "AsyncDataHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40011}", - dataClient.toString()); - assertCheck(dataClient); - } - - private void assertCheck(AsyncDataClient dataClient) { - Assert.assertNotNull(dataClient); - assertTrue(dataClient.isReady()); - assertTrue(dataClient.isValid()); - Assert.assertEquals(dataClient.getNode(), defaultNode); - - dataClient.setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - Assert.assertEquals(dataClient.getTimeout(), ClusterConstant.getConnectionTimeoutInMS()); - - dataClient.close(); - Assert.assertNull(dataClient.getCurrMethod()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java deleted file mode 100644 index af588696b56a..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/async/AsyncMetaClientTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. - */ - -package org.apache.iotdb.cluster.client.async; - -import org.apache.iotdb.cluster.client.BaseClientTest; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; - -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class AsyncMetaClientTest extends BaseClientTest { - - private final ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - private TProtocolFactory protocolFactory; - - @Before - public void setUp() { - config.setUseAsyncServer(true); - protocolFactory = - config.isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - } - - @Test - public void testMetaClient() throws Exception { - - AsyncMetaClient.AsyncMetaClientFactory factory = - new AsyncMetaClient.AsyncMetaClientFactory(protocolFactory, ClientCategory.META); - - AsyncMetaClient metaClient = factory.makeObject(defaultNode).getObject(); - - assertEquals( - "AsyncMetaClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9003}", - metaClient.toString()); - assertCheck(metaClient); - } - - @Test - public void testMetaHeartbeatClient() throws Exception { - AsyncMetaClient.AsyncMetaClientFactory factory = - new AsyncMetaClient.AsyncMetaClientFactory(protocolFactory, ClientCategory.META_HEARTBEAT); - - AsyncMetaClient metaClient = factory.makeObject(defaultNode).getObject(); - - assertEquals( - "AsyncMetaHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9004}", - metaClient.toString()); - assertCheck(metaClient); - } - - private void assertCheck(AsyncMetaClient dataClient) { - Assert.assertNotNull(dataClient); - assertTrue(dataClient.isReady()); - assertTrue(dataClient.isValid()); - Assert.assertEquals(dataClient.getNode(), defaultNode); - - dataClient.setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - Assert.assertEquals(dataClient.getTimeout(), ClusterConstant.getConnectionTimeoutInMS()); - - dataClient.close(); - Assert.assertNull(dataClient.getCurrMethod()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptorTest.java deleted file mode 100644 index e136a6bf607b..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncClientAdaptorTest.java +++ /dev/null @@ -1,464 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.snapshot.SimpleSnapshot; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowTimeSeriesPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class SyncClientAdaptorTest { - - private AsyncMetaClient metaClient; - private AsyncDataClient dataClient; - - private TNodeStatus nodeStatus; - private CheckStatusResponse checkStatusResponse; - private AddNodeResponse addNodeResponse; - private List aggregateResults; - private ByteBuffer getAllMeasurementSchemaResult; - private ByteBuffer fillResult; - private ByteBuffer readFileResult; - private ByteBuffer peekNextNotNullValueResult; - private Map snapshotMap; - private ByteBuffer lastResult; - private List measurementSchemas; - private List timeseriesSchemas; - private List paths; - - @Before - public void setUp() { - nodeStatus = new TNodeStatus(); - checkStatusResponse = new CheckStatusResponse(true, false, true, false, true, true); - addNodeResponse = new AddNodeResponse((int) Response.RESPONSE_AGREE); - aggregateResults = - Arrays.asList( - ByteBuffer.wrap("1".getBytes()), - ByteBuffer.wrap("2".getBytes()), - ByteBuffer.wrap("2".getBytes())); - getAllMeasurementSchemaResult = ByteBuffer.wrap("get all measurement schema".getBytes()); - fillResult = ByteBuffer.wrap("fill result".getBytes()); - readFileResult = ByteBuffer.wrap("read file".getBytes()); - peekNextNotNullValueResult = ByteBuffer.wrap("peek next not null value".getBytes()); - measurementSchemas = new ArrayList<>(); - timeseriesSchemas = new ArrayList<>(); - snapshotMap = new HashMap<>(); - for (int i = 0; i < 3; i++) { - snapshotMap.put(i, new SimpleSnapshot(i, i)); - measurementSchemas.add(new MeasurementSchema(String.valueOf(i), TSDataType.INT64)); - timeseriesSchemas.add(new TimeseriesSchema(String.valueOf(i), TSDataType.INT64)); - } - lastResult = ByteBuffer.wrap("last".getBytes()); - paths = Arrays.asList("1", "2", "3", "4"); - - metaClient = - new AsyncMetaClient(null, null, null) { - @Override - public void removeNode(Node node, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(Response.RESPONSE_AGREE); - } - - @Override - public void matchTerm( - long index, long term, RaftNode header, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(true); - } - - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - resultHandler.onComplete(nodeStatus); - } - - @Override - public void checkStatus( - StartUpStatus startUpStatus, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(checkStatusResponse); - } - - @Override - public void addNode( - Node node, - StartUpStatus startUpStatus, - AsyncMethodCallback resultHandler) { - resultHandler.onComplete(addNodeResponse); - } - - @Override - public void executeNonQueryPlan( - ExecutNonQueryReq request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(StatusUtils.OK); - } - }; - - dataClient = - new AsyncDataClient(null, null, null) { - @Override - public void querySingleSeriesByTimestamp( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(1L); - } - - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(1L); - } - - @Override - public void getNodeList( - RaftNode header, - String path, - int nodeLevel, - AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(Arrays.asList("1", "2", "3")); - } - - @Override - public void getChildNodeInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(new HashSet<>(Arrays.asList("1", "2", "3"))); - } - - @Override - public void getChildNodePathInNextLevel( - RaftNode header, String path, AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(new HashSet<>(Arrays.asList("1", "2", "3"))); - } - - @Override - public void getAllMeasurementSchema( - MeasurementSchemaRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(getAllMeasurementSchemaResult); - } - - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - ByteBuffer byteBuffer = ByteBuffer.allocate(4096); - byteBuffer.putInt(measurementSchemas.size()); - for (IMeasurementSchema schema : measurementSchemas) { - schema.partialSerializeTo(byteBuffer); - } - byteBuffer.flip(); - resultHandler.onComplete(new PullSchemaResp(byteBuffer)); - } - - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - ByteBuffer byteBuffer = ByteBuffer.allocate(4096); - byteBuffer.putInt(timeseriesSchemas.size()); - for (TimeseriesSchema schema : timeseriesSchemas) { - schema.serializeTo(byteBuffer); - } - byteBuffer.flip(); - resultHandler.onComplete(new PullSchemaResp(byteBuffer)); - } - - @Override - public void getAggrResult( - GetAggrResultRequest request, AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(aggregateResults); - } - - @Override - public void getUnregisteredTimeseries( - RaftNode header, - List timeseriesList, - AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(timeseriesList.subList(0, timeseriesList.size() / 2)); - } - - @Override - public void getAllPaths( - RaftNode header, - List path, - boolean withAlias, - AsyncMethodCallback resultHandler) { - List dataTypes = new ArrayList<>(); - List underAlignedEntity = new ArrayList<>(); - for (int i = 0; i < path.size(); i++) { - dataTypes.add(TSDataType.DOUBLE.serialize()); - underAlignedEntity.add(false); - } - resultHandler.onComplete(new GetAllPathsResult(path, dataTypes, underAlignedEntity)); - } - - @Override - public void getPathCount( - RaftNode header, - List pathsToQuery, - int level, - AsyncMethodCallback resultHandler) { - resultHandler.onComplete(pathsToQuery.size()); - } - - @Override - public void getAllDevices( - RaftNode header, - List path, - boolean isPrefixMatch, - AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(new HashSet<>(path)); - } - - @Override - public void getGroupByExecutor( - GroupByRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(1L); - } - - @Override - public void previousFill( - PreviousFillRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(fillResult); - } - - @Override - public void readFile( - String filePath, - long offset, - int length, - AsyncMethodCallback resultHandler) { - resultHandler.onComplete(readFileResult); - } - - @Override - public void getGroupByResult( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback> resultHandler) { - resultHandler.onComplete(aggregateResults); - } - - @Override - public void peekNextNotNullValue( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback resultHandler) { - resultHandler.onComplete(peekNextNotNullValueResult); - } - - @Override - public void pullSnapshot( - PullSnapshotRequest request, AsyncMethodCallback resultHandler) { - Map snapshotBytes = new HashMap<>(); - for (Entry integerSimpleSnapshotEntry : - snapshotMap.entrySet()) { - snapshotBytes.put( - integerSimpleSnapshotEntry.getKey(), - integerSimpleSnapshotEntry.getValue().serialize()); - } - PullSnapshotResp pullSnapshotResp = new PullSnapshotResp(); - pullSnapshotResp.snapshotBytes = snapshotBytes; - resultHandler.onComplete(pullSnapshotResp); - } - - @Override - public void last( - LastQueryRequest request, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(lastResult); - } - - @Override - public void onSnapshotApplied( - RaftNode header, List slots, AsyncMethodCallback resultHandler) { - resultHandler.onComplete(true); - } - }; - } - - @Test - public void testMetaClient() throws TException, InterruptedException, IOException { - assertEquals( - Response.RESPONSE_AGREE, - (long) SyncClientAdaptor.removeNode(metaClient, TestUtils.getNode(0))); - assertTrue( - SyncClientAdaptor.matchTerm( - metaClient, TestUtils.getNode(0), 1, 1, TestUtils.getRaftNode(0, 0))); - assertEquals(nodeStatus, SyncClientAdaptor.queryNodeStatus(metaClient)); - assertEquals( - checkStatusResponse, SyncClientAdaptor.checkStatus(metaClient, new StartUpStatus())); - assertEquals( - addNodeResponse, - SyncClientAdaptor.addNode(metaClient, TestUtils.getNode(0), new StartUpStatus())); - assertEquals( - StatusUtils.OK, - SyncClientAdaptor.executeNonQuery( - metaClient, new FlushPlan(), TestUtils.getRaftNode(0, 0), TestUtils.getNode(1))); - } - - @Test - public void testDataClient() - throws TException, InterruptedException, IOException, IllegalPathException { - assertEquals( - 1L, - (long) - SyncClientAdaptor.querySingleSeriesByTimestamp( - dataClient, new SingleSeriesQueryRequest())); - assertEquals( - 1L, - (long) SyncClientAdaptor.querySingleSeries(dataClient, new SingleSeriesQueryRequest(), 0)); - assertEquals( - Arrays.asList("1", "2", "3"), - SyncClientAdaptor.getNodeList(dataClient, TestUtils.getRaftNode(0, 0), "root", 0)); - assertEquals( - new HashSet<>(Arrays.asList("1", "2", "3")), - SyncClientAdaptor.getChildNodeInNextLevel(dataClient, TestUtils.getRaftNode(0, 0), "root")); - assertEquals( - new HashSet<>(Arrays.asList("1", "2", "3")), - SyncClientAdaptor.getNextChildren(dataClient, TestUtils.getRaftNode(0, 0), "root")); - - MeasurementSchemaRequest request; - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream)) { - new ShowTimeSeriesPlan(new PartialPath("root")).serialize(dataOutputStream); - request = - new MeasurementSchemaRequest( - 0, - TestUtils.getRaftNode(0, 0), - TestUtils.getNode(1), - ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - } - assertEquals( - getAllMeasurementSchemaResult, - SyncClientAdaptor.getAllMeasurementSchema(dataClient, request)); - assertEquals( - measurementSchemas, - SyncClientAdaptor.pullMeasurementSchema(dataClient, new PullSchemaRequest())); - assertEquals( - timeseriesSchemas, - SyncClientAdaptor.pullTimeseriesSchema(dataClient, new PullSchemaRequest())); - assertEquals( - aggregateResults, SyncClientAdaptor.getAggrResult(dataClient, new GetAggrResultRequest())); - assertEquals( - paths.subList(0, paths.size() / 2), - SyncClientAdaptor.getUnregisteredMeasurements( - dataClient, TestUtils.getRaftNode(0, 0), paths)); - List result = - new ArrayList<>( - SyncClientAdaptor.getAllPaths(dataClient, TestUtils.getRaftNode(0, 0), paths, false) - .paths); - assertEquals(paths, result); - assertEquals( - paths.size(), - (int) SyncClientAdaptor.getPathCount(dataClient, TestUtils.getRaftNode(0, 0), paths, 0)); - assertEquals( - new HashSet<>(paths), - SyncClientAdaptor.getAllDevices(dataClient, TestUtils.getRaftNode(0, 0), paths, false)); - assertEquals(1L, (long) SyncClientAdaptor.getGroupByExecutor(dataClient, new GroupByRequest())); - assertEquals(fillResult, SyncClientAdaptor.previousFill(dataClient, new PreviousFillRequest())); - assertEquals(readFileResult, SyncClientAdaptor.readFile(dataClient, "a file", 0, 1000)); - assertEquals( - aggregateResults, - SyncClientAdaptor.getGroupByResult(dataClient, TestUtils.getRaftNode(0, 0), 1, 1, 2)); - assertEquals( - peekNextNotNullValueResult, - SyncClientAdaptor.peekNextNotNullValue(dataClient, TestUtils.getRaftNode(0, 0), 1, 1, 1)); - assertEquals( - snapshotMap, - SyncClientAdaptor.pullSnapshot( - dataClient, - new PullSnapshotRequest(), - Arrays.asList(0, 1, 2), - new SnapshotFactory() { - @Override - public Snapshot create() { - return new SimpleSnapshot(0, 0); - } - - @Override - public Snapshot copy(Snapshot origin) { - return new SimpleSnapshot(0, 0); - } - })); - assertEquals( - lastResult, - SyncClientAdaptor.last( - dataClient, - Collections.singletonList(new PartialPath("1")), - Collections.singletonList(TSDataType.INT64.ordinal()), - null, - new QueryContext(), - Collections.emptyMap(), - TestUtils.getRaftNode(0, 0))); - assertTrue( - SyncClientAdaptor.onSnapshotApplied( - dataClient, TestUtils.getRaftNode(0, 0), Arrays.asList(0, 1, 2))); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncDataClientTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncDataClientTest.java deleted file mode 100644 index c95f883f3c3e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncDataClientTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.BaseClientTest; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; - -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TTransportException; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.SocketException; - -import static org.junit.Assert.assertEquals; - -public class SyncDataClientTest extends BaseClientTest { - - private TProtocolFactory protocolFactory; - - @Before - public void setUp() { - protocolFactory = - ClusterDescriptor.getInstance().getConfig().isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - } - - @Test - public void testDataClient() throws IOException, InterruptedException, TTransportException { - try { - startDataServer(); - SyncDataClient dataClient = - new SyncDataClient(protocolFactory, defaultNode, ClientCategory.DATA); - - assertEquals( - "SyncDataClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40010}", - dataClient.toString()); - - assertCheck(dataClient); - - dataClient = - new SyncDataClient.SyncDataClientFactory(protocolFactory, ClientCategory.DATA) - .makeObject(defaultNode) - .getObject(); - - assertEquals( - "SyncDataClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40010}", - dataClient.toString()); - - assertCheck(dataClient); - } catch (Exception e) { - Assert.fail(e.getMessage()); - } finally { - stopDataServer(); - } - } - - @Test - public void testDataHeartbeatClient() - throws IOException, InterruptedException, TTransportException { - try { - startDataHeartbeatServer(); - SyncDataClient dataHeartbeatClient = - new SyncDataClient(protocolFactory, defaultNode, ClientCategory.DATA_HEARTBEAT); - - assertCheck(dataHeartbeatClient); - assertEquals( - "SyncDataHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40011}", - dataHeartbeatClient.toString()); - - dataHeartbeatClient = - new SyncDataClient.SyncDataClientFactory(protocolFactory, ClientCategory.DATA_HEARTBEAT) - .makeObject(defaultNode) - .getObject(); - assertCheck(dataHeartbeatClient); - assertEquals( - "SyncDataHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=40011}", - dataHeartbeatClient.toString()); - } catch (Exception e) { - Assert.fail(e.getMessage()); - } finally { - stopDataHeartbeatServer(); - } - } - - private void assertCheck(SyncDataClient dataClient) throws SocketException { - Assert.assertNotNull(dataClient); - Assert.assertTrue(dataClient.getInputProtocol().getTransport().isOpen()); - Assert.assertEquals(dataClient.getNode(), defaultNode); - - dataClient.setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - Assert.assertEquals(dataClient.getTimeout(), ClusterConstant.getConnectionTimeoutInMS()); - - dataClient.close(); - Assert.assertFalse(dataClient.getInputProtocol().getTransport().isOpen()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncMetaClientTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncMetaClientTest.java deleted file mode 100644 index 088d8a93377d..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/client/sync/SyncMetaClientTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. - */ - -package org.apache.iotdb.cluster.client.sync; - -import org.apache.iotdb.cluster.client.BaseClientTest; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; - -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TTransportException; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.SocketException; - -import static org.junit.Assert.assertEquals; - -public class SyncMetaClientTest extends BaseClientTest { - - private TProtocolFactory protocolFactory; - - @Before - public void setUp() { - protocolFactory = - ClusterDescriptor.getInstance().getConfig().isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - } - - @Test - public void testMetaClient() throws IOException, InterruptedException, TTransportException { - try { - startMetaServer(); - SyncMetaClient metaClient = - new SyncMetaClient(protocolFactory, defaultNode, ClientCategory.META); - - assertEquals( - "SyncMetaClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9003}", - metaClient.toString()); - - assertCheck(metaClient); - - metaClient = - new SyncMetaClient.SyncMetaClientFactory(protocolFactory, ClientCategory.META) - .makeObject(defaultNode) - .getObject(); - - assertEquals( - "SyncMetaClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9003}", - metaClient.toString()); - - assertCheck(metaClient); - } catch (Exception e) { - e.printStackTrace(); - } finally { - stopMetaServer(); - } - } - - @Test - public void testDataHeartbeatClient() - throws IOException, InterruptedException, TTransportException { - try { - startMetaHeartbeatServer(); - SyncMetaClient metaHeartbeatClient = - new SyncMetaClient(protocolFactory, defaultNode, ClientCategory.META_HEARTBEAT); - - assertCheck(metaHeartbeatClient); - assertEquals( - "SyncMetaHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9004}", - metaHeartbeatClient.toString()); - - metaHeartbeatClient = - new SyncMetaClient.SyncMetaClientFactory(protocolFactory, ClientCategory.META_HEARTBEAT) - .makeObject(defaultNode) - .getObject(); - assertCheck(metaHeartbeatClient); - assertEquals( - "SyncMetaHeartbeatClient{node=Node(internalIp:localhost, metaPort:9003, nodeIdentifier:0, " - + "dataPort:40010, clientPort:0, clientIp:localhost),port=9004}", - metaHeartbeatClient.toString()); - } catch (Exception e) { - e.printStackTrace(); - } finally { - stopMetaHeartbeatServer(); - } - } - - private void assertCheck(SyncMetaClient metaClient) throws SocketException { - Assert.assertNotNull(metaClient); - Assert.assertTrue(metaClient.getInputProtocol().getTransport().isOpen()); - Assert.assertEquals(metaClient.getNode(), defaultNode); - - metaClient.setTimeout(ClusterConstant.getConnectionTimeoutInMS()); - Assert.assertEquals(metaClient.getTimeout(), ClusterConstant.getConnectionTimeoutInMS()); - - metaClient.close(); - Assert.assertFalse(metaClient.getInputProtocol().getTransport().isOpen()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/IoTDBTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/IoTDBTest.java deleted file mode 100644 index 577f6fd8a9bd..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/IoTDBTest.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; - -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** IoTDBTests are tests that need a IoTDB daemon to support the tests. */ -public abstract class IoTDBTest { - - private PlanExecutor planExecutor; - private boolean prevEnableAutoSchema; - private boolean prevUseAsyncServer; - - @Before - public void setUp() throws StartupException, QueryProcessException, IllegalPathException { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - prevEnableAutoSchema = IoTDBDescriptor.getInstance().getConfig().isAutoCreateSchemaEnabled(); - IoTDBDescriptor.getInstance().getConfig().setAutoCreateSchemaEnabled(false); - EnvironmentUtils.envSetUp(); - planExecutor = new PlanExecutor(); - prepareSchema(); - prepareData(0, 0, 100); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - EnvironmentUtils.cleanEnv(); - IoTDBDescriptor.getInstance().getConfig().setAutoCreateSchemaEnabled(prevEnableAutoSchema); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - private void prepareSchema() { - for (int i = 0; i < 4; i++) { - // storage groups that has timeseries schema locally - setStorageGroup(TestUtils.getTestSg(i)); - for (int j = 0; j < 10; j++) { - createTimeSeries(i, j); - } - } - // storage groups that has timeseries schema remotely - setStorageGroup(TestUtils.getTestSg(4)); - // storage groups that does not have timeseries schema remotely or locally - for (int i = 5; i < 10; i++) { - setStorageGroup(TestUtils.getTestSg(i)); - } - } - - protected void prepareData(int sgNum, int timeOffset, int size) - throws QueryProcessException, IllegalPathException { - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(sgNum))); - String[] measurements = new String[10]; - for (int i = 0; i < measurements.length; i++) { - measurements[i] = TestUtils.getTestMeasurement(i); - } - IMeasurementMNode[] schemas = new IMeasurementMNode[10]; - for (int i = 0; i < measurements.length; i++) { - schemas[i] = TestUtils.getTestMeasurementMNode(i); - } - insertPlan.setMeasurements(measurements); - insertPlan.setNeedInferType(true); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - - Object[] values = new Object[10]; - for (int i = timeOffset; i < timeOffset + size; i++) { - insertPlan.setTime(i); - Arrays.fill(values, String.valueOf(i * 1.0)); - insertPlan.setValues(values); - insertPlan.setMeasurementMNodes(schemas); - planExecutor.insert(insertPlan); - } - } - - protected void setStorageGroup(String storageGroupName) { - try { - planExecutor.setStorageGroup(new SetStorageGroupPlan(new PartialPath(storageGroupName))); - } catch (QueryProcessException | IllegalPathException e) { - // ignore - } - } - - private void createTimeSeries(int sgNum, int seriesNum) { - try { - IMeasurementSchema schema = TestUtils.getTestMeasurementSchema(seriesNum); - planExecutor.processNonQuery( - new CreateTimeSeriesPlan( - new PartialPath( - TestUtils.getTestSg(sgNum) - + IoTDBConstant.PATH_SEPARATOR - + schema.getMeasurementId()), - schema.getType(), - schema.getEncodingType(), - schema.getCompressor(), - schema.getProps(), - Collections.emptyMap(), - Collections.emptyMap(), - null)); - } catch (QueryProcessException - | StorageGroupNotSetException - | StorageEngineException - | IllegalPathException e) { - // ignore - } - } - - protected QueryDataSet query(List pathStrs, IExpression expression) - throws QueryProcessException, QueryFilterOptimizationException, StorageEngineException, - IOException, MetadataException, InterruptedException { - QueryContext context = new QueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - RawDataQueryPlan queryPlan = new RawDataQueryPlan(); - queryPlan.setExpression(expression); - List paths = new ArrayList<>(); - for (String pathStr : pathStrs) { - MeasurementPath path = new MeasurementPath(pathStr); - path.setMeasurementSchema(new MeasurementSchema(path.getMeasurement(), TSDataType.DOUBLE)); - paths.add(path); - } - queryPlan.setDeduplicatedPathsAndUpdate(paths); - queryPlan.setPaths(paths); - queryPlan.setExpression(expression); - - return planExecutor.processQuery(queryPlan, context); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncClient.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncClient.java deleted file mode 100644 index ee4da17fb3cb..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncClient.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; - -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TNonblockingTransport; - -public class TestAsyncClient extends AsyncClient { - - private int serialNum; - - public TestAsyncClient() { - super(null, null, null); - } - - public TestAsyncClient(int serialNum) { - this(); - this.serialNum = serialNum; - } - - TestAsyncClient( - TProtocolFactory protocolFactory, - TAsyncClientManager clientManager, - TNonblockingTransport transport, - int serialNum) { - super(protocolFactory, clientManager, transport); - this.serialNum = serialNum; - } - - public int getSerialNum() { - return serialNum; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncDataClient.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncDataClient.java deleted file mode 100644 index 65e3011f80b1..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncDataClient.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.GetAggrResultRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.LastQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.MeasurementSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PreviousFillRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.BaseMember; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.service.DataAsyncService; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.LogPlan; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; - -public class TestAsyncDataClient extends AsyncDataClient { - - private PlanExecutor planExecutor; - private Map dataGroupMemberMap; - - public TestAsyncDataClient(Node node, Map dataGroupMemberMap) - throws IOException { - super(null, null, node, ClientCategory.DATA); - this.dataGroupMemberMap = dataGroupMemberMap; - try { - this.planExecutor = new PlanExecutor(); - } catch (QueryProcessException e) { - throw new IOException(e); - } - } - - @Override - public void fetchSingleSeries( - RaftNode header, long readerId, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(header)) - .fetchSingleSeries(header, readerId, resultHandler)) - .start(); - } - - @Override - public void fetchMultSeries( - RaftNode header, - long readerId, - List paths, - AsyncMethodCallback> resultHandler) { - new Thread( - () -> { - try { - new DataAsyncService(dataGroupMemberMap.get(header)) - .fetchMultSeries(header, readerId, paths, resultHandler); - } catch (TException e) { - e.printStackTrace(); - } - }) - .start(); - } - - @Override - public void getAggrResult( - GetAggrResultRequest request, AsyncMethodCallback> resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .getAggrResult(request, resultHandler)) - .start(); - } - - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .querySingleSeries(request, resultHandler)) - .start(); - } - - @Override - public void queryMultSeries( - MultSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .queryMultSeries(request, resultHandler); - } catch (TException e) { - e.printStackTrace(); - } - }) - .start(); - } - - @Override - public void fetchSingleSeriesByTimestamps( - RaftNode header, - long readerId, - List timestamps, - AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(header)) - .fetchSingleSeriesByTimestamps(header, readerId, timestamps, resultHandler)) - .start(); - } - - @Override - public void getAllPaths( - RaftNode header, - List paths, - boolean withAlias, - AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(header)) - .getAllPaths(header, paths, withAlias, resultHandler)) - .start(); - } - - @Override - public void executeNonQueryPlan( - ExecutNonQueryReq request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - PhysicalPlan plan = PhysicalPlan.Factory.create(request.planBytes); - if (!(plan instanceof LogPlan)) { - planExecutor.processNonQuery(plan); - } - resultHandler.onComplete(StatusUtils.OK); - } catch (IOException - | QueryProcessException - | StorageGroupNotSetException - | StorageEngineException - | IllegalPathException e) { - resultHandler.onError(e); - } - }) - .start(); - } - - @Override - public void readFile( - String filePath, long offset, int length, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - File file = new File(filePath); - if (file.exists()) { - try { - resultHandler.onComplete(IOUtils.readFile(filePath, offset, length)); - } catch (IOException e) { - resultHandler.onError(e); - } - } else { - if (offset == 0) { - resultHandler.onComplete( - ByteBuffer.wrap((filePath + "@" + offset + "#" + length).getBytes())); - } else { - resultHandler.onComplete(ByteBuffer.allocate(0)); - } - } - }) - .start(); - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) {} - - @Override - public void appendEntry(AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(BaseMember.dummyResponse.get())).start(); - } - - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .pullTimeSeriesSchema(request, resultHandler)) - .start(); - } - - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .pullMeasurementSchema(request, resultHandler)) - .start(); - } - - @Override - public void querySingleSeriesByTimestamp( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .querySingleSeriesByTimestamp(request, resultHandler)) - .start(); - } - - @Override - public void getGroupByExecutor(GroupByRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .getGroupByExecutor(request, resultHandler)) - .start(); - } - - @Override - public void getGroupByResult( - RaftNode header, - long executorId, - long startTime, - long endTime, - AsyncMethodCallback> resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(header)) - .getGroupByResult(header, executorId, startTime, endTime, resultHandler)) - .start(); - } - - @Override - public void previousFill( - PreviousFillRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .previousFill(request, resultHandler)) - .start(); - } - - @Override - public void getAllMeasurementSchema( - MeasurementSchemaRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .getAllMeasurementSchema(request, resultHandler); - }) - .start(); - } - - @Override - public void last(LastQueryRequest request, AsyncMethodCallback resultHandler) - throws TException { - new Thread( - () -> - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .last(request, resultHandler)) - .start(); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncMetaClient.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncMetaClient.java deleted file mode 100644 index 529c5e4f1f7e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestAsyncMetaClient.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.async.AsyncMetaClient; -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.apache.thrift.async.TAsyncClientManager; -import org.apache.thrift.protocol.TProtocolFactory; - -import java.io.IOException; - -public class TestAsyncMetaClient extends AsyncMetaClient { - - private Node node; - - public TestAsyncMetaClient( - TProtocolFactory protocolFactory, TAsyncClientManager clientManager, Node node) - throws IOException { - super(protocolFactory, clientManager, node, ClientCategory.META); - this.node = node; - } - - @Override - public Node getNode() { - return node; - } - - public void setNode(Node node) { - this.node = node; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestDataGroupMember.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestDataGroupMember.java deleted file mode 100644 index 6056fff27507..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestDataGroupMember.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotManager; -import org.apache.iotdb.cluster.query.manage.ClusterQueryManager; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.DataGroupMember; - -import java.util.Collections; - -public class TestDataGroupMember extends DataGroupMember { - - public TestDataGroupMember() { - super(new PartitionGroup(Collections.singletonList(TestUtils.getNode(0)))); - setQueryManager(new ClusterQueryManager()); - this.slotManager = new SlotManager(ClusterConstant.SLOT_NUM, null, ""); - } - - public TestDataGroupMember(Node thisNode, PartitionGroup allNodes) { - super(allNodes); - this.thisNode = thisNode; - this.slotManager = new SlotManager(ClusterConstant.SLOT_NUM, null, ""); - setQueryManager(new ClusterQueryManager()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestException.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestException.java deleted file mode 100644 index 3f636f5ffd17..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestException.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -public class TestException extends Exception { - - public TestException() { - super("Don't worry, this exception is faked"); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLog.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLog.java deleted file mode 100644 index 8e00382a5c27..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLog.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.Log; - -import java.nio.ByteBuffer; -import java.util.Objects; - -public class TestLog extends Log { - - @Override - public ByteBuffer serialize() { - int totalSize = Long.BYTES * 2; - byte[] buffer = new byte[totalSize]; - - ByteBuffer byteBuffer = ByteBuffer.wrap(buffer); - - byteBuffer.putLong(getCurrLogIndex()); - byteBuffer.putLong(getCurrLogTerm()); - - byteBuffer.flip(); - return byteBuffer; - } - - @Override - public void deserialize(ByteBuffer buffer) { - setCurrLogIndex(buffer.getLong()); - setCurrLogTerm(buffer.getLong()); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof TestLog)) { - return false; - } - TestLog obj1 = (TestLog) obj; - return getCurrLogIndex() == obj1.getCurrLogIndex() && getCurrLogTerm() == obj1.getCurrLogTerm(); - } - - @Override - public int hashCode() { - return Objects.hash(getCurrLogIndex(), getCurrLogTerm()); - } - - @Override - public String toString() { - return "TestLog{" + getCurrLogIndex() + "-" + getCurrLogTerm() + "}"; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogApplier.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogApplier.java deleted file mode 100644 index 56ca05c2efa7..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogApplier.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; - -public class TestLogApplier implements LogApplier { - - private PlanExecutor planExecutor; - - @Override - public void apply(Log log) { - try { - if (log instanceof PhysicalPlanLog) { - PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log; - getPlanExecutor().processNonQuery(physicalPlanLog.getPlan()); - } else if (log instanceof CloseFileLog) { - CloseFileLog closeFileLog = ((CloseFileLog) log); - try { - StorageEngine.getInstance() - .closeStorageGroupProcessor( - new PartialPath(closeFileLog.getStorageGroupName()), - closeFileLog.getPartitionId(), - closeFileLog.isSeq(), - false); - } catch (StorageGroupNotSetException | IllegalPathException e) { - throw new QueryProcessException(e); - } - } - } catch (Exception e) { - log.setException(e); - } finally { - log.setApplied(true); - } - } - - public PlanExecutor getPlanExecutor() throws QueryProcessException { - return planExecutor == null ? planExecutor = new PlanExecutor() : planExecutor; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogManager.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogManager.java deleted file mode 100644 index 9471f64771a9..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestLogManager.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; - -public class TestLogManager extends RaftLogManager { - - public TestLogManager(int nodeIdentifier) { - super(new SyncLogDequeSerializer(nodeIdentifier), new TestLogApplier(), "Test"); - } - - @Override - public Snapshot getSnapshot(long minIndex) { - return null; - } - - @Override - public void takeSnapshot() {} -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestManagedSeriesReader.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestManagedSeriesReader.java deleted file mode 100644 index cbd9bb6f22fa..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestManagedSeriesReader.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import java.util.NoSuchElementException; - -public class TestManagedSeriesReader implements ManagedSeriesReader, IReaderByTimestamp { - - private BatchData batchData; - private boolean batchUsed = false; - private boolean managedByQueryManager = false; - private boolean hasRemaining = false; - - public TestManagedSeriesReader(BatchData batchData) { - this.batchData = batchData; - } - - @Override - public boolean isManagedByQueryManager() { - return managedByQueryManager; - } - - @Override - public void setManagedByQueryManager(boolean managedByQueryManager) { - this.managedByQueryManager = managedByQueryManager; - } - - @Override - public boolean hasRemaining() { - return hasRemaining; - } - - @Override - public void setHasRemaining(boolean hasRemaining) { - this.hasRemaining = hasRemaining; - } - - @Override - public Object[] getValuesInTimestamps(long[] timestamps, int length) { - Object[] results = new Object[length]; - for (int i = 0; i < length; i++) { - while (batchData.hasCurrent()) { - long currTime = batchData.currentTime(); - if (currTime == timestamps[i]) { - results[i] = batchData.currentValue(); - break; - } else if (currTime > timestamps[i]) { - results[i] = null; - break; - } - batchData.next(); - } - } - return results; - } - - @Override - public boolean hasNextBatch() { - return !batchUsed; - } - - @Override - public BatchData nextBatch() { - if (batchUsed) { - throw new NoSuchElementException(); - } - batchUsed = true; - return batchData; - } - - @Override - public void close() { - // nothing to be done - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestMetaGroupMember.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestMetaGroupMember.java deleted file mode 100644 index 761f5e530d98..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestMetaGroupMember.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.manage.MetaSingleSnapshotLogManager; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; - -public class TestMetaGroupMember extends MetaGroupMember { - - public TestMetaGroupMember() { - super(); - allNodes = new PartitionGroup(); - thisNode = TestUtils.getNode(0); - for (int i = 0; i < 10; i++) { - allNodes.add(TestUtils.getNode(i)); - } - MetaSingleSnapshotLogManager manager = - new MetaSingleSnapshotLogManager(new TestLogApplier(), this); - setLogManager(manager); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestPartitionedLogManager.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestPartitionedLogManager.java deleted file mode 100644 index 52681d28c24f..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestPartitionedLogManager.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.manage.PartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.Constants; - -public class TestPartitionedLogManager extends PartitionedSnapshotLogManager { - - public TestPartitionedLogManager() { - super( - new TestLogApplier(), - null, - new Node("localhost", 30001, 1, Constants.RPC_PORT, 6667, "localhost"), - null, - null, - null); - } - - public TestPartitionedLogManager( - LogApplier logApplier, PartitionTable partitionTable, Node header, SnapshotFactory factory) { - super( - logApplier, - partitionTable, - header, - new Node("localhost", 30001, 1, 40001, Constants.RPC_PORT, "localhost"), - factory, - null); - } - - @Override - public void takeSnapshot() {} -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSnapshot.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSnapshot.java deleted file mode 100644 index 495e233f78c5..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSnapshot.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.snapshot.SnapshotFactory; -import org.apache.iotdb.cluster.log.snapshot.SnapshotInstaller; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; - -public class TestSnapshot extends Snapshot { - - private int id; - private ByteBuffer data; - - public TestSnapshot() { - data = ByteBuffer.wrap(new byte[1024 * 2048]); - } - - public TestSnapshot(int id) { - this.id = id; - data = ByteBuffer.wrap(new byte[1024 * 2048]); - } - - @Override - public ByteBuffer serialize() { - ByteBuffer byteBuffer = ByteBuffer.allocate(Integer.BYTES + 1024 * 2048); - byteBuffer.putInt(id); - byteBuffer.put(data); - byteBuffer.flip(); - return byteBuffer; - } - - @Override - public void deserialize(ByteBuffer buffer) { - id = buffer.getInt(); - data.put(buffer); - } - - @Override - public SnapshotInstaller getDefaultInstaller(RaftMember member) { - return new SnapshotInstaller() { - @Override - public void install(Snapshot snapshot, int slot, boolean isDataMigration) { - // do nothing - } - - @Override - public void install(Map snapshotMap, boolean isDataMigration) { - // do nothing - } - }; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - TestSnapshot that = (TestSnapshot) o; - return id == that.id; - } - - @Override - public int hashCode() { - return Objects.hash(id); - } - - public static class Factory implements SnapshotFactory { - - public static final Factory INSTANCE = new Factory(); - - @Override - public TestSnapshot create() { - return new TestSnapshot(); - } - - @Override - public TestSnapshot copy(TestSnapshot origin) { - TestSnapshot testSnapshot = create(); - testSnapshot.id = origin.id; - testSnapshot.lastLogIndex = origin.lastLogIndex; - testSnapshot.lastLogTerm = origin.lastLogTerm; - return testSnapshot; - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSyncClient.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSyncClient.java deleted file mode 100644 index f0281b54e1ae..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestSyncClient.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; - -import org.apache.thrift.protocol.TProtocol; - -public class TestSyncClient extends Client { - - private int serialNum; - - public TestSyncClient() { - super(null, null); - } - - public TestSyncClient(int serialNum) { - this(); - this.serialNum = serialNum; - } - - TestSyncClient(TProtocol in, TProtocol out, int serialNum) { - super(in, out); - this.serialNum = serialNum; - } - - public int getSerialNum() { - return serialNum; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestUtils.java b/cluster/src/test/java/org/apache/iotdb/cluster/common/TestUtils.java deleted file mode 100644 index 6dcaafb6b9a4..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/common/TestUtils.java +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.common; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.LargeTestLog; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.metadata.mnode.MeasurementMNode; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.tsfile.common.constant.TsFileConstant; -import org.apache.iotdb.tsfile.exception.write.WriteProcessException; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.common.Path; -import org.apache.iotdb.tsfile.utils.Binary; -import org.apache.iotdb.tsfile.utils.TsPrimitiveType; -import org.apache.iotdb.tsfile.write.TsFileWriter; -import org.apache.iotdb.tsfile.write.record.TSRecord; -import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class TestUtils { - - public static long TEST_TIME_OUT_MS = 200; - - private static ByteBuffer seralizePartitionTable = getPartitionTable(3).serialize(); - - // we need to reset the bytebuffer's position because it may be changed. e.g., in - // MetaLogApplierTest.testApplyAddNode() - public static ByteBuffer getSeralizePartitionTable() { - seralizePartitionTable.rewind(); - return seralizePartitionTable; - } - - private TestUtils() { - // util class - } - - public static Node getNode(int nodeNum) { - Node node = new Node(); - node.setInternalIp("192.168.0." + nodeNum); - node.setMetaPort(ClusterDescriptor.getInstance().getConfig().getInternalMetaPort()); - node.setDataPort(ClusterDescriptor.getInstance().getConfig().getInternalDataPort()); - node.setNodeIdentifier(nodeNum); - node.setClientPort(IoTDBDescriptor.getInstance().getConfig().getRpcPort()); - node.setClientIp(IoTDBDescriptor.getInstance().getConfig().getRpcAddress()); - return node; - } - - public static RaftNode getRaftNode(int nodeNum, int raftId) { - return new RaftNode(getNode(nodeNum), raftId); - } - - public static List prepareNodeLogs(int logNum) { - List logList = new ArrayList<>(); - for (int i = 0; i < logNum; i++) { - AddNodeLog log = new AddNodeLog(); - log.setNewNode(getNode(i)); - log.setPartitionTable(getSeralizePartitionTable()); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - return logList; - } - - public static StartUpStatus getStartUpStatus() { - StartUpStatus startUpStatus = new StartUpStatus(); - startUpStatus.setPartitionInterval( - IoTDBDescriptor.getInstance().getConfig().getPartitionInterval()); - startUpStatus.setHashSalt(ClusterConstant.HASH_SALT); - startUpStatus.setReplicationNumber( - ClusterDescriptor.getInstance().getConfig().getReplicationNum()); - startUpStatus.setClusterName(ClusterDescriptor.getInstance().getConfig().getClusterName()); - startUpStatus.setMultiRaftFactor( - ClusterDescriptor.getInstance().getConfig().getMultiRaftFactor()); - List seedNodeList = new ArrayList<>(); - for (int i = 0; i < 100; i += 10) { - seedNodeList.add(getNode(i)); - } - startUpStatus.setSeedNodeList(seedNodeList); - return startUpStatus; - } - - public static List prepareTestLogs(int logNum) { - List logList = new ArrayList<>(); - for (int i = 0; i < logNum; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - return logList; - } - - public static List prepareLargeTestLogs(int logNum) { - List logList = new ArrayList<>(); - for (int i = 0; i < logNum; i++) { - Log log = new LargeTestLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - log.setByteSize(8192); - logList.add(log); - } - return logList; - } - - public static List getTestTimeValuePairs( - int offset, int size, int step, TSDataType dataType) { - List ret = new ArrayList<>(size); - long currTime = offset; - for (int i = 0; i < size; i++) { - TsPrimitiveType value = TsPrimitiveType.getByType(dataType, currTime); - TimeValuePair pair = new TimeValuePair(currTime, value); - currTime += step; - ret.add(pair); - } - return ret; - } - - public static List getTestBatches( - int offset, int size, int batchSize, int step, TSDataType dataType) { - List ret = new ArrayList<>(size); - long currTime = offset; - BatchData currBatch = null; - for (int i = 0; i < size; i++) { - if (i % batchSize == 0) { - if (currBatch != null) { - ret.add(currBatch); - } - currBatch = new BatchData(dataType); - } - TsPrimitiveType value = TsPrimitiveType.getByType(dataType, currTime); - currBatch.putAnObject(currTime, value.getValue()); - currTime += step; - } - if (currBatch != null) { - ret.add(currBatch); - } - return ret; - } - - public static PartitionTable getPartitionTable(int nodeNum) { - List nodes = new ArrayList<>(); - for (int i = 0; i < nodeNum; i++) { - nodes.add(getNode(i)); - } - return new SlotPartitionTable(nodes, getNode(0)); - } - - public static String getTestSg(int i) { - return "root.test" + i; - } - - public static String getTestSeries(int sgNum, int seriesNum) { - return getTestSg(sgNum) + "." + getTestMeasurement(seriesNum); - } - - public static String getTestMeasurement(int seriesNum) { - return "s" + seriesNum; - } - - public static IMeasurementSchema getTestMeasurementSchema(int seriesNum) { - TSDataType dataType = TSDataType.DOUBLE; - TSEncoding encoding = IoTDBDescriptor.getInstance().getConfig().getDefaultDoubleEncoding(); - return new MeasurementSchema( - TestUtils.getTestMeasurement(seriesNum), - dataType, - encoding, - CompressionType.UNCOMPRESSED, - Collections.emptyMap()); - } - - public static IMeasurementMNode getTestMeasurementMNode(int seriesNum) { - TSDataType dataType = TSDataType.DOUBLE; - TSEncoding encoding = IoTDBDescriptor.getInstance().getConfig().getDefaultDoubleEncoding(); - IMeasurementSchema measurementSchema = - new MeasurementSchema( - TestUtils.getTestMeasurement(seriesNum), - dataType, - encoding, - CompressionType.UNCOMPRESSED, - Collections.emptyMap()); - return MeasurementMNode.getMeasurementMNode( - null, measurementSchema.getMeasurementId(), measurementSchema, null); - } - - public static TimeseriesSchema getTestTimeSeriesSchema(int sgNum, int seriesNum) { - TSDataType dataType = TSDataType.DOUBLE; - TSEncoding encoding = IoTDBDescriptor.getInstance().getConfig().getDefaultDoubleEncoding(); - return new TimeseriesSchema( - TestUtils.getTestSeries(sgNum, seriesNum), - dataType, - encoding, - CompressionType.UNCOMPRESSED, - Collections.emptyMap()); - } - - public static BatchData genBatchData(TSDataType dataType, int offset, int size) { - BatchData batchData = new BatchData(dataType); - for (long i = offset; i < offset + size; i++) { - switch (dataType) { - case DOUBLE: - batchData.putDouble(i, i * 1.0); - break; - case TEXT: - batchData.putBinary(i, new Binary(String.valueOf(i))); - break; - case INT64: - batchData.putLong(i, i); - break; - case INT32: - batchData.putInt(i, (int) i); - break; - case FLOAT: - batchData.putFloat(i, i * 1.0f); - break; - case BOOLEAN: - batchData.putBoolean(i, (i % 2) == 1); - break; - } - } - return batchData; - } - - public static boolean batchEquals(BatchData batchA, BatchData batchB) { - if (batchA == batchB) { - return true; - } - if (batchA == null || batchB == null) { - return false; - } - if (!batchA.getDataType().equals(batchB.getDataType())) { - return false; - } - if (batchA.length() != batchB.length()) { - return false; - } - while (batchA.hasCurrent()) { - if (!batchB.hasCurrent()) { - return false; - } - long timeA = batchA.currentTime(); - Object valueA = batchA.currentValue(); - long timeB = batchB.currentTime(); - Object valueB = batchB.currentValue(); - if (timeA != timeB || !valueA.equals(valueB)) { - return false; - } - batchA.next(); - batchB.next(); - } - return true; - } - - public static void prepareData() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - InsertRowPlan insertPlan = new InsertRowPlan(); - // data for raw data query and aggregation - // 10 devices (storage groups) - for (int j = 0; j < 10; j++) { - insertPlan.setDevicePath(new PartialPath(getTestSg(j))); - String[] measurements = new String[10]; - IMeasurementMNode[] mNodes = new IMeasurementMNode[10]; - // 10 series each device, all double - for (int i = 0; i < 10; i++) { - measurements[i] = getTestMeasurement(i); - mNodes[i] = TestUtils.getTestMeasurementMNode(i); - } - insertPlan.setMeasurements(measurements); - insertPlan.setNeedInferType(true); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - // the first sequential file - for (int i = 10; i < 20; i++) { - insertPlan.setTime(i); - Object[] values = new Object[10]; - for (int k = 0; k < 10; k++) { - values[k] = String.valueOf(i); - } - insertPlan.setValues(values); - insertPlan.setMeasurementMNodes(mNodes); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - StorageEngine.getInstance().syncCloseAllProcessor(); - // the first unsequential file, not overlapped with the sequential file - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - Object[] values = new Object[10]; - for (int k = 0; k < 10; k++) { - values[k] = String.valueOf(i); - } - insertPlan.setValues(values); - insertPlan.setMeasurementMNodes(mNodes); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - StorageEngine.getInstance().syncCloseAllProcessor(); - // the second unsequential file, overlapped with the sequential file - for (int i = 10; i < 20; i++) { - insertPlan.setTime(i); - Object[] values = new Object[10]; - for (int k = 0; k < 10; k++) { - values[k] = String.valueOf(i); - } - insertPlan.setValues(values); - insertPlan.setMeasurementMNodes(mNodes); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - StorageEngine.getInstance().syncCloseAllProcessor(); - } - - // data for fill - insertPlan.setDevicePath(new PartialPath(getTestSg(0))); - String[] measurements = new String[] {getTestMeasurement(10)}; - IMeasurementMNode[] schemas = new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(10)}; - insertPlan.setMeasurements(measurements); - insertPlan.setNeedInferType(true); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i : new int[] {0, 10}) { - insertPlan.setTime(i); - Object[] values = new Object[] {String.valueOf(i)}; - insertPlan.setValues(values); - insertPlan.setMeasurementMNodes(schemas); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - } - - /** - * The TsFileResource's path should be consist with the {@link - * org.apache.iotdb.tsfile.utils.FilePathUtils#splitTsFilePath(String)} - */ - public static List prepareTsFileResources( - int sgNum, int fileNum, int seriesNum, int ptNum, boolean asHardLink) - throws IOException, WriteProcessException { - List ret = new ArrayList<>(); - for (int i = 0; i < fileNum; i++) { - String fileName = - "target" - + File.separator - + "data" - + File.separator - + String.format( - TestUtils.getTestSg(sgNum) - + File.separator - + 0 - + File.separator - + 0 - + File.separator - + "0-%d-0-0" - + TsFileConstant.TSFILE_SUFFIX, - i); - if (asHardLink) { - fileName = fileName + ".0_0"; - } - File file = new File(fileName); - file.getParentFile().mkdirs(); - try (TsFileWriter writer = new TsFileWriter(file)) { - for (int k = 0; k < seriesNum; k++) { - MeasurementSchema schema = (MeasurementSchema) getTestMeasurementSchema(k); - writer.registerTimeseries(new Path(getTestSg(sgNum)), schema); - } - - for (int j = 0; j < ptNum; j++) { - long timestamp = i * ptNum + j; - TSRecord record = new TSRecord(timestamp, getTestSg(sgNum)); - for (int k = 0; k < seriesNum; k++) { - IMeasurementSchema schema = getTestMeasurementSchema(k); - DataPoint dataPoint = - DataPoint.getDataPoint( - schema.getType(), schema.getMeasurementId(), String.valueOf(k)); - record.addTuple(dataPoint); - } - writer.write(record); - } - } - - TsFileResource resource = new TsFileResource(file); - resource.updateStartTime(TestUtils.getTestSg(sgNum), i * ptNum); - resource.updateEndTime(TestUtils.getTestSg(sgNum), (i + 1) * ptNum - 1); - resource.setMaxPlanIndex(i); - resource.setMinPlanIndex(i); - - resource.serialize(); - ret.add(resource); - } - return ret; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/BaseSingleNodeTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/BaseSingleNodeTest.java deleted file mode 100644 index 0cc8fa3629c0..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/BaseSingleNodeTest.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.integration; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.server.service.DataGroupEngine; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.session.Session; - -import org.junit.After; -import org.junit.Before; - -import java.util.Collections; -import java.util.List; - -public abstract class BaseSingleNodeTest { - - private ClusterIoTDB daemon; - - private boolean useAsyncServer; - private List seedNodeUrls; - private int replicaNum; - private boolean autoCreateSchema; - - @Before - public void setUp() throws Exception { - initConfigs(); - daemon = ClusterIoTDB.getInstance(); - daemon.initLocalEngines(); - DataGroupEngine.getInstance().resetFactory(); - daemon.activeStartNodeMode(); - } - - @After - public void tearDown() throws Exception { - daemon.stop(); - recoverConfigs(); - EnvironmentUtils.cleanEnv(); - } - - private void initConfigs() { - // remember the original values - useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - seedNodeUrls = ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls(); - replicaNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - autoCreateSchema = ClusterDescriptor.getInstance().getConfig().isEnableAutoCreateSchema(); - // set the cluster as a single node cluster. - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - ClusterDescriptor.getInstance() - .getConfig() - .setSeedNodeUrls( - Collections.singletonList( - String.format("127.0.0.1:9003:40011:%d", Constants.RPC_PORT))); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(1); - ClusterDescriptor.getInstance().getConfig().setEnableAutoCreateSchema(true); - } - - private void recoverConfigs() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - ClusterDescriptor.getInstance().getConfig().setSeedNodeUrls(seedNodeUrls); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(replicaNum); - ClusterDescriptor.getInstance().getConfig().setEnableAutoCreateSchema(autoCreateSchema); - } - - public Session openSession() throws IoTDBConnectionException { - Session session = new Session("127.0.0.1", Constants.RPC_PORT); - session.open(); - return session; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/integration/SingleNodeTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/integration/SingleNodeTest.java deleted file mode 100644 index 8e16d46e3ddf..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/integration/SingleNodeTest.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.integration; - -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.jdbc.Config; -import org.apache.iotdb.rpc.IoTDBConnectionException; -import org.apache.iotdb.rpc.StatementExecutionException; -import org.apache.iotdb.session.Session; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.Statement; -import java.util.Arrays; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class SingleNodeTest extends BaseSingleNodeTest { - - private Session session; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - session = openSession(); - } - - @Override - @After - public void tearDown() throws Exception { - if (session != null) { - session.close(); - } - super.tearDown(); - } - - @Test - public void testInsertRecordsWithIllegalPath() - throws StatementExecutionException, IoTDBConnectionException { - List deviceIds = Arrays.asList("root..ln1", "root.sg.ln1", "root..ln1", "root.sg3.ln1"); - List timestamps = Arrays.asList(3L, 3L, 3L, 3L); - List measurements = Arrays.asList("dev1", "dev2", "dev3"); - List> allMeasurements = - Arrays.asList(measurements, measurements, measurements, measurements); - List values = Arrays.asList("123", "333", "444"); - List> allValues = Arrays.asList(values, values, values, values); - try { - session.insertRecords(deviceIds, timestamps, allMeasurements, allValues); - fail("Exception expected"); - } catch (StatementExecutionException e) { - assertTrue(e.getMessage().contains("root..ln1 is not a legal path")); - } - - List legalDevices = Arrays.asList("root.sg.ln1", "root.sg3.ln1"); - for (String legalDevice : legalDevices) { - for (String measurement : measurements) { - assertTrue( - session.checkTimeseriesExists( - legalDevice + IoTDBConstant.PATH_SEPARATOR + measurement)); - } - } - } - - @Test - public void testDeleteNonExistTimeSeries() - throws StatementExecutionException, IoTDBConnectionException { - session.insertRecord( - "root.sg1.d1", 0, Arrays.asList("t1", "t2", "t3"), Arrays.asList("123", "333", "444")); - session.deleteTimeseries(Arrays.asList("root.sg1.d1.t6", "root.sg1.d1.t2", "root.sg1.d1.t3")); - - assertTrue(session.checkTimeseriesExists("root.sg1.d1.t1")); - assertFalse(session.checkTimeseriesExists("root.sg1.d1.t2")); - assertFalse(session.checkTimeseriesExists("root.sg1.d1.t3")); - } - - @Test - public void testUserPrivilege() throws ClassNotFoundException { - Class.forName(Config.JDBC_DRIVER_NAME); - try (Connection connection = - DriverManager.getConnection( - Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root"); - Statement statement = connection.createStatement()) { - statement.execute("create user user1 '1234'"); - try (Connection connection1 = - DriverManager.getConnection( - Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "user1", "1234"); - Statement userStatement = connection1.createStatement()) { - userStatement.addBatch("create timeseries root.sg1.d1.s1 with datatype=int32"); - userStatement.addBatch("create timeseries root.sg2.d1.s1 with datatype=int32"); - userStatement.executeBatch(); - } catch (Exception e) { - assertEquals( - System.lineSeparator() - + "No permissions for this operation CREATE_TIMESERIES for SQL: \"create timeseries root.sg1.d1.s1 with datatype=int32\"" - + System.lineSeparator() - + "No permissions for this operation CREATE_TIMESERIES for SQL: \"create timeseries root.sg2.d1.s1 with datatype=int32\"" - + System.lineSeparator(), - e.getMessage()); - } - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogCallbackTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogCallbackTest.java deleted file mode 100644 index 07eff47ba22f..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogCallbackTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Test; - -import static org.junit.Assert.assertTrue; - -public class CommitLogCallbackTest { - - @Test - public void test() throws InterruptedException { - RaftMember raftMember = new TestMetaGroupMember(); - CommitLogCallback commitLogCallback = new CommitLogCallback(raftMember); - synchronized (raftMember.getSyncLock()) { - new Thread( - () -> { - commitLogCallback.onComplete(null); - }) - .start(); - raftMember.getSyncLock().wait(); - } - assertTrue(true); - raftMember.stop(); - } - - @After - public void tearDown() throws Exception { - EnvironmentUtils.cleanAllDir(); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogTaskTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogTaskTest.java deleted file mode 100644 index 8cf42add6f58..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/CommitLogTaskTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class CommitLogTaskTest { - - @After - public void tearDown() throws Exception { - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void test() { - RaftLogManager manager = new TestLogManager(0); - try { - manager.append(TestUtils.prepareTestLogs(10)); - - AtomicBoolean complete = new AtomicBoolean(false); - CommitLogTask task = new CommitLogTask(manager, 9, 9); - AsyncMethodCallback callback = - new AsyncMethodCallback() { - @Override - public void onComplete(Void unused) { - complete.set(true); - } - - @Override - public void onError(Exception e) { - fail(e.getMessage()); - } - }; - task.registerCallback(callback); - - task.run(); - assertEquals(9, manager.getCommitLogIndex()); - assertTrue(complete.get()); - - complete.set(false); - task.run(); - assertFalse(complete.get()); - } finally { - manager.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/HardStateTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/HardStateTest.java deleted file mode 100644 index 5ee3f89be8a1..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/HardStateTest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.Constants; - -import org.junit.Test; - -import java.nio.ByteBuffer; - -import static org.junit.Assert.assertEquals; - -public class HardStateTest { - - @Test - public void testHardState() { - // Not NULL - HardState state = new HardState(); - state.setCurrentTerm(2); - state.setVoteFor(new Node("127.0.0.1", 30000, 0, 40000, Constants.RPC_PORT, "127.0.0.1")); - ByteBuffer buffer = state.serialize(); - HardState newState = HardState.deserialize(buffer); - assertEquals(state, newState); - - // NULL - state.setVoteFor(null); - buffer = state.serialize(); - newState = HardState.deserialize(buffer); - assertEquals(state, newState); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java deleted file mode 100644 index 098d4364b931..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogDispatcherTest.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestSyncClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.LogDispatcher.SendLogRequest; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicInteger; - -public class LogDispatcherTest { - - private RaftMember raftMember; - private Map appendedEntries; - private Set downNode; - - @Before - public void setUp() { - appendedEntries = new ConcurrentSkipListMap<>(); - downNode = new HashSet<>(); - raftMember = - new TestMetaGroupMember() { - @Override - public AsyncClient getSendLogAsyncClient(Node node) { - return new TestAsyncClient() { - @Override - public void appendEntry( - AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (!downNode.contains(node)) { - try { - resultHandler.onComplete(mockedAppendEntry(request)); - } catch (UnknownLogTypeException e) { - resultHandler.onError(e); - } - } - }) - .start(); - } - - @Override - public void appendEntries( - AppendEntriesRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (!downNode.contains(node)) { - try { - resultHandler.onComplete(mockedAppendEntries(request)); - } catch (UnknownLogTypeException e) { - resultHandler.onError(e); - } - } - }) - .start(); - } - }; - } - - @Override - public Client getSyncClient(Node node) { - return new TestSyncClient() { - @Override - public long appendEntry(AppendEntryRequest request) throws TException { - try { - if (!downNode.contains(node)) { - return mockedAppendEntry(request); - } - return -1; - } catch (UnknownLogTypeException e) { - throw new TException(e); - } - } - - @Override - public long appendEntries(AppendEntriesRequest request) throws TException { - try { - if (!downNode.contains(node)) { - return mockedAppendEntries(request); - } - return -1; - } catch (UnknownLogTypeException e) { - throw new TException(e); - } - } - }; - } - }; - PartitionGroup allNodes = new PartitionGroup(); - for (int i = 0; i < 10; i++) { - allNodes.add(TestUtils.getNode(i)); - } - raftMember.setAllNodes(allNodes); - raftMember.setCharacter(NodeCharacter.LEADER); - } - - private long mockedAppendEntry(AppendEntryRequest request) throws UnknownLogTypeException { - LogParser logParser = LogParser.getINSTANCE(); - Log parse = logParser.parse(request.entry.duplicate()); - appendedEntries.computeIfAbsent(parse, p -> new AtomicInteger()).incrementAndGet(); - return Response.RESPONSE_AGREE; - } - - private long mockedAppendEntries(AppendEntriesRequest request) throws UnknownLogTypeException { - List entries = request.getEntries(); - List logs = new ArrayList<>(); - for (ByteBuffer entry : entries) { - LogParser logParser = LogParser.getINSTANCE(); - Log parse = logParser.parse(entry.duplicate()); - logs.add(parse); - } - for (Log log : logs) { - appendedEntries.computeIfAbsent(log, p -> new AtomicInteger()).incrementAndGet(); - } - return Response.RESPONSE_AGREE; - } - - @Test - public void testAsync() throws InterruptedException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - LogDispatcher dispatcher = new LogDispatcher(raftMember); - try { - List logs = TestUtils.prepareTestLogs(10); - for (Log log : logs) { - SendLogRequest request = raftMember.buildSendLogRequest(log); - dispatcher.offer(request); - } - while (!checkResult(logs, 9)) { - // wait - } - } finally { - dispatcher.close(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testSync() throws InterruptedException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - LogDispatcher dispatcher = new LogDispatcher(raftMember); - try { - List logs = TestUtils.prepareTestLogs(10); - for (Log log : logs) { - SendLogRequest request = raftMember.buildSendLogRequest(log); - dispatcher.offer(request); - } - while (!checkResult(logs, 9)) { - // wait - } - } finally { - dispatcher.close(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testWithFailure() throws InterruptedException { - for (int i = 1; i < 4; i++) { - downNode.add(TestUtils.getNode(i)); - } - LogDispatcher dispatcher = new LogDispatcher(raftMember); - try { - List logs = TestUtils.prepareTestLogs(10); - for (Log log : logs) { - SendLogRequest request = raftMember.buildSendLogRequest(log); - dispatcher.offer(request); - } - while (!checkResult(logs, 6)) { - // wait - } - } finally { - dispatcher.close(); - } - } - - @Test - public void testWithLargeLog() throws InterruptedException { - IoTDBDescriptor.getInstance() - .getConfig() - .setThriftMaxFrameSize(64 * 1024 + IoTDBConstant.LEFT_SIZE_IN_REQUEST); - for (int i = 1; i < 4; i++) { - downNode.add(TestUtils.getNode(i)); - } - LogDispatcher dispatcher = new LogDispatcher(raftMember); - try { - List logs = TestUtils.prepareLargeTestLogs(20); - for (Log log : logs) { - SendLogRequest request = raftMember.buildSendLogRequest(log); - dispatcher.offer(request); - } - while (!checkResult(logs, 6)) { - // wait - } - } finally { - dispatcher.close(); - } - } - - @SuppressWarnings("java:S2925") - public boolean checkResult(List logs, int requestedSuccess) throws InterruptedException { - for (Log log : logs) { - AtomicInteger atomicInteger = appendedEntries.get(log); - if (atomicInteger == null) { - Thread.sleep(10); - return false; - } - if (atomicInteger.get() != requestedSuccess) { - Thread.sleep(10); - return false; - } - } - return true; - } - - @After - public void tearDown() throws Exception { - raftMember.stop(); - raftMember.closeLogManager(); - EnvironmentUtils.cleanAllDir(); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogParserTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/LogParserTest.java deleted file mode 100644 index 69694480909d..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/LogParserTest.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.utils.PlanSerializer; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.LogPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; - -import org.junit.Test; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -public class LogParserTest { - - private LogParser logParser = LogParser.getINSTANCE(); - - @Test - public void testAddNodeLog() throws UnknownLogTypeException { - AddNodeLog log = new AddNodeLog(); - log.setNewNode(TestUtils.getNode(5)); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - log.setCurrLogIndex(8); - log.setCurrLogTerm(8); - - ByteBuffer buffer = log.serialize(); - Log serialized = logParser.parse(buffer); - assertEquals(log, serialized); - } - - @Test - public void testPhysicalPlanLog() throws UnknownLogTypeException, IllegalPathException { - PhysicalPlanLog log = new PhysicalPlanLog(); - SetStorageGroupPlan setStorageGroupPlan = - new SetStorageGroupPlan(new PartialPath(TestUtils.getTestSg(5))); - log.setPlan(setStorageGroupPlan); - log.setCurrLogIndex(8); - log.setCurrLogTerm(8); - - ByteBuffer buffer = log.serialize(); - Log serialized = logParser.parse(buffer); - assertEquals(log, serialized); - } - - @Test - public void testCloseFileLog() throws UnknownLogTypeException { - CloseFileLog log = new CloseFileLog(TestUtils.getTestSg(5), 0, false); - log.setCurrLogIndex(8); - log.setCurrLogTerm(8); - - ByteBuffer buffer = log.serialize(); - Log serialized = logParser.parse(buffer); - assertEquals(log, serialized); - } - - @Test - public void testRemoveNodeLog() throws UnknownLogTypeException { - RemoveNodeLog log = new RemoveNodeLog(); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - log.setRemovedNode(TestUtils.getNode(0)); - log.setCurrLogIndex(8); - log.setCurrLogTerm(8); - - ByteBuffer buffer = log.serialize(); - Log serialized = logParser.parse(buffer); - assertEquals(log, serialized); - } - - @Test - public void testEmptyContentLog() throws UnknownLogTypeException { - EmptyContentLog log = new EmptyContentLog(); - log.setCurrLogIndex(8); - log.setCurrLogTerm(8); - ByteBuffer byteBuffer = log.serialize(); - Log serialized = logParser.parse(byteBuffer); - assertEquals(log, serialized); - } - - @Test - public void testLogPlan() { - AddNodeLog log = new AddNodeLog(TestUtils.getSeralizePartitionTable(), TestUtils.getNode(0)); - log.setMetaLogIndex(1); - try { - LogPlan logPlan = new LogPlan(log.serialize()); - ByteBuffer buffer = ByteBuffer.wrap(PlanSerializer.getInstance().serialize(logPlan)); - PhysicalPlan plan = PhysicalPlan.Factory.create(buffer); - LogParser.getINSTANCE().parse(((LogPlan) plan).getLog()); - } catch (IllegalPathException | IOException | UnknownLogTypeException e) { - fail(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplierTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplierTest.java deleted file mode 100644 index af86c48b57af..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/AsyncDataLogApplierTest.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -import static org.junit.Assert.assertEquals; - -public class AsyncDataLogApplierTest { - - private List logsToApply; - private Set appliedLogs; - - @Before - public void setUp() throws Exception { - logsToApply = new ArrayList<>(); - appliedLogs = new ConcurrentSkipListSet<>(); - IoTDB.configManager.init(); - for (int i = 0; i < 10; i++) { - IoTDB.schemaProcessor.setStorageGroup(new PartialPath(TestUtils.getTestSg(i))); - } - } - - @After - public void tearDown() throws IOException { - IoTDB.configManager.clear(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void test() throws IllegalPathException, InterruptedException { - LogApplier dummyApplier = - log -> { - if (log instanceof PhysicalPlanLog) { - PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log; - PhysicalPlan plan = physicalPlanLog.getPlan(); - if (plan instanceof InsertRowPlan) { - appliedLogs.add(log); - log.setApplied(true); - } - } else { - // make sure all previous insertions are applied before applying the last log - if (appliedLogs.size() == 10) { - appliedLogs.add(log); - log.setApplied(true); - } - } - }; - AsyncDataLogApplier asyncDataLogApplier = new AsyncDataLogApplier(dummyApplier, "test"); - try { - for (int i = 0; i < 10; i++) { - PhysicalPlan plan = - new InsertRowPlan( - new PartialPath(TestUtils.getTestSg(i)), i, new String[0], new String[0]); - PhysicalPlanLog log = new PhysicalPlanLog(plan); - log.setCurrLogIndex(i); - logsToApply.add(log); - } - Log finalLog = new EmptyContentLog(); - finalLog.setCurrLogIndex(10); - logsToApply.add(finalLog); - - for (Log log : logsToApply) { - asyncDataLogApplier.apply(log); - } - - synchronized (finalLog) { - while (!finalLog.isApplied()) { - finalLog.wait(); - } - } - assertEquals(11, appliedLogs.size()); - } finally { - asyncDataLogApplier.close(); - } - } - - @Test - public void testParallel() { - LogApplier dummyApplier = - log -> { - if (log instanceof PhysicalPlanLog) { - PhysicalPlanLog physicalPlanLog = (PhysicalPlanLog) log; - PhysicalPlan plan = physicalPlanLog.getPlan(); - if (plan instanceof InsertRowPlan) { - appliedLogs.add(log); - log.setApplied(true); - } - } else { - appliedLogs.add(log); - log.setApplied(true); - } - }; - - AsyncDataLogApplier asyncDataLogApplier = new AsyncDataLogApplier(dummyApplier, "test"); - - try { - for (int i = 0; i < 10; i++) { - int finalI = i; - new Thread( - () -> { - List threadLogsToApply = new ArrayList<>(); - for (int j = 0; j < 10; j++) { - PhysicalPlan plan = null; - try { - plan = - new InsertRowPlan( - new PartialPath(TestUtils.getTestSg(finalI)), - j, - new String[0], - new String[0]); - } catch (IllegalPathException e) { - // ignore - } - PhysicalPlanLog log = new PhysicalPlanLog(plan); - log.setCurrLogIndex(finalI * 11 + j); - threadLogsToApply.add(log); - } - Log finalLog = new EmptyContentLog(); - finalLog.setCurrLogIndex(finalI * 11 + 10); - threadLogsToApply.add(finalLog); - - for (Log log : threadLogsToApply) { - asyncDataLogApplier.apply(log); - } - }) - .start(); - } - - while (appliedLogs.size() < 11 * 10) {} - - assertEquals(110, appliedLogs.size()); - } finally { - asyncDataLogApplier.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/DataLogApplierTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/DataLogApplierTest.java deleted file mode 100644 index 110401264e53..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/DataLogApplierTest.java +++ /dev/null @@ -1,499 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestAsyncMetaClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.cluster.server.service.DataAsyncService; -import org.apache.iotdb.cluster.server.service.MetaAsyncService; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.IoTDBException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.DataRegion.TimePartitionFilter; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupAlreadySetException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.DeletePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowsPlan; -import org.apache.iotdb.db.qp.physical.sys.ClearCachePlan; -import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.db.qp.physical.sys.MergePlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import junit.framework.TestCase; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class DataLogApplierTest extends IoTDBTest { - - private static final Logger logger = LoggerFactory.getLogger(DataLogApplierTest.class); - private boolean partialWriteEnabled; - private boolean isPartitionEnabled; - private IClientManager clientManager; - - private TestMetaGroupMember testMetaGroupMember = - new TestMetaGroupMember() { - @Override - public boolean syncLeader(RaftMember.CheckConsistency checkConsistency) { - try { - // for testApplyCreateMultiTimeseiresWithPulling() - IoTDB.schemaProcessor.setStorageGroup(new PartialPath("root.sg2")); - } catch (StorageGroupAlreadySetException e) { - logger.warn("[may ignore me in tests] {}", e.getMessage(), e); - } catch (MetadataException e) { - logger.error("Cannot set sg for test", e); - } - return true; - } - - @Override - public DataGroupMember getLocalDataMember(RaftNode header, Object request) { - return testDataGroupMember; - } - - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncMetaClient(null, null, node) { - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - new Thread( - () -> - new MetaAsyncService(testMetaGroupMember) - .queryNodeStatus(resultHandler)) - .start(); - } - }; - } catch (IOException e) { - return null; - } - } - }; - - private TestDataGroupMember testDataGroupMember = - new TestDataGroupMember( - TestUtils.getNode(10), - new PartitionGroup(Collections.singletonList(TestUtils.getNode(10)))); - - private DataLogApplier applier; - - @Override - @Before - public void setUp() throws StartupException, QueryProcessException, IllegalPathException { - IoTDB.setSchemaProcessor(CSchemaProcessor.getInstance()); - testMetaGroupMember.setCoordinator(new Coordinator()); - MetaPuller.getInstance().init(testMetaGroupMember); - super.setUp(); - MetaPuller.getInstance().init(testMetaGroupMember); - PartitionGroup allNodes = new PartitionGroup(); - for (int i = 0; i < 100; i += 10) { - allNodes.add(TestUtils.getNode(i)); - } - - testMetaGroupMember.setAllNodes(allNodes); - testMetaGroupMember.setPartitionTable(new SlotPartitionTable(allNodes, TestUtils.getNode(0))); - testMetaGroupMember.setThisNode(TestUtils.getNode(0)); - - testMetaGroupMember.setLeader(testMetaGroupMember.getThisNode()); - testDataGroupMember.setLeader(TestUtils.getNode(10)); - testDataGroupMember.setCharacter(NodeCharacter.LEADER); - testMetaGroupMember.setCharacter(NodeCharacter.LEADER); - NodeStatusManager.getINSTANCE().setMetaGroupMember(testMetaGroupMember); - partialWriteEnabled = IoTDBDescriptor.getInstance().getConfig().isEnablePartialInsert(); - IoTDBDescriptor.getInstance().getConfig().setEnablePartialInsert(false); - isPartitionEnabled = IoTDBDescriptor.getInstance().getConfig().isEnablePartition(); - IoTDBDescriptor.getInstance().getConfig().setEnablePartition(true); - clientManager = ClusterIoTDB.getInstance().getClientManager(); - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public AsyncClient borrowAsyncClient(Node node, ClientCategory category) { - try { - AsyncDataClient dataClient = - new AsyncDataClient(null, null, node, ClientCategory.DATA) { - @Override - public void getAllPaths( - RaftNode header, - List path, - boolean withAlias, - AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(testDataGroupMember) - .getAllPaths(header, path, withAlias, resultHandler)) - .start(); - } - - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - List timeseriesSchemas = new ArrayList<>(); - for (String path : request.prefixPaths) { - if (path.startsWith(TestUtils.getTestSg(4))) { - for (int i = 0; i < 10; i++) { - timeseriesSchemas.add( - TestUtils.getTestTimeSeriesSchema(4, i)); - } - } else if (path.startsWith(TestUtils.getTestSg(1)) - || path.startsWith(TestUtils.getTestSg(2)) - || path.startsWith(TestUtils.getTestSg(3))) { - // do nothing - } else if (!path.startsWith(TestUtils.getTestSg(5))) { - resultHandler.onError( - new StorageGroupNotSetException(path)); - return; - } - } - PullSchemaResp resp = new PullSchemaResp(); - // serialize the schemas - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = - new DataOutputStream(byteArrayOutputStream); - try { - dataOutputStream.writeInt(timeseriesSchemas.size()); - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - timeseriesSchema.serializeTo(dataOutputStream); - } - } catch (IOException ignored) { - // unreachable for we are using a ByteArrayOutputStream - } - resp.setSchemaBytes(byteArrayOutputStream.toByteArray()); - resultHandler.onComplete(resp); - }) - .start(); - } - - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, - AsyncMethodCallback resultHandler) { - new Thread( - () -> - new DataAsyncService(testDataGroupMember) - .pullMeasurementSchema(request, resultHandler)) - .start(); - } - }; - return dataClient; - } catch (Exception e) { - return null; - } - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - ((CSchemaProcessor) IoTDB.schemaProcessor).setMetaGroupMember(testMetaGroupMember); - testDataGroupMember.setMetaGroupMember(testMetaGroupMember); - applier = new DataLogApplier(testMetaGroupMember, testDataGroupMember); - } - - @Override - @After - public void tearDown() throws IOException, StorageEngineException { - testDataGroupMember.stop(); - testDataGroupMember.closeLogManager(); - testMetaGroupMember.stop(); - testMetaGroupMember.closeLogManager(); - super.tearDown(); - ClusterIoTDB.getInstance().setClientManager(clientManager); - NodeStatusManager.getINSTANCE().setMetaGroupMember(null); - IoTDBDescriptor.getInstance().getConfig().setEnablePartialInsert(partialWriteEnabled); - IoTDBDescriptor.getInstance().getConfig().setEnablePartition(isPartitionEnabled); - } - - @Test - public void testApplyInsert() - throws QueryProcessException, IOException, QueryFilterOptimizationException, - StorageEngineException, MetadataException, InterruptedException { - InsertRowPlan insertPlan = new InsertRowPlan(); - PhysicalPlanLog log = new PhysicalPlanLog(); - log.setPlan(insertPlan); - - // this series is already created - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(1))); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(1))); - insertPlan.setTime(1); - insertPlan.setMeasurements(new String[] {TestUtils.getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - insertPlan.setValues(new Object[] {"1.0"}); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurementMNodes(new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - - applier.apply(log); - QueryDataSet dataSet = query(Collections.singletonList(TestUtils.getTestSeries(1, 0)), null); - assertTrue(dataSet.hasNext()); - RowRecord record = dataSet.next(); - assertEquals(1, record.getTimestamp()); - assertEquals(1, record.getFields().size()); - assertEquals(1.0, record.getFields().get(0).getDoubleV(), 0.00001); - assertFalse(dataSet.hasNext()); - - // this series is not created but can be fetched - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(4))); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(4))); - applier.apply(log); - dataSet = query(Collections.singletonList(TestUtils.getTestSeries(4, 0)), null); - assertTrue(dataSet.hasNext()); - record = dataSet.next(); - assertEquals(1, record.getTimestamp()); - assertEquals(1, record.getFields().size()); - assertEquals(1.0, record.getFields().get(0).getDoubleV(), 0.00001); - assertFalse(dataSet.hasNext()); - - // this series does not exists any where - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(5))); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(5))); - applier.apply(log); - assertEquals( - "org.apache.iotdb.db.exception.metadata.PathNotExistException: Path [root.test5.s0] does not exist", - log.getException().getMessage()); - - // this storage group is not even set - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(16))); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(16))); - applier.apply(log); - assertEquals( - "org.apache.iotdb.db.exception.StorageEngineException: org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException: Storage group is not set for current seriesPath: [root.test16]", - log.getException().getMessage()); - } - - @Test - public void testApplyBatchInsert() - throws MetadataException, QueryProcessException, StorageEngineException, IOException, - InterruptedException, QueryFilterOptimizationException { - InsertRowsPlan insertRowsPlan = new InsertRowsPlan(); - PhysicalPlanLog log = new PhysicalPlanLog(); - log.setPlan(insertRowsPlan); - - for (int i = 1; i <= 4; i++) { - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(i))); - insertPlan.setTime(1); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {TestUtils.getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - insertPlan.setValues(new Object[] {"1.0"}); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - insertRowsPlan.addOneInsertRowPlan(insertPlan, i - 1); - } - - applier.apply(log); - - for (int i = 1; i <= 4; i++) { - QueryDataSet dataSet = query(Collections.singletonList(TestUtils.getTestSeries(i, 0)), null); - assertTrue(dataSet.hasNext()); - RowRecord record = dataSet.next(); - assertEquals(1, record.getTimestamp()); - assertEquals(1, record.getFields().size()); - assertEquals(1.0, record.getFields().get(0).getDoubleV(), 0.00001); - assertFalse(dataSet.hasNext()); - } - } - - @Test - public void testApplyDeletion() - throws QueryProcessException, MetadataException, QueryFilterOptimizationException, - StorageEngineException, IOException, InterruptedException { - DeletePlan deletePlan = new DeletePlan(); - deletePlan.setPaths(Collections.singletonList(new PartialPath(TestUtils.getTestSeries(0, 0)))); - deletePlan.setDeleteEndTime(50); - applier.apply(new PhysicalPlanLog(deletePlan)); - QueryDataSet dataSet = query(Collections.singletonList(TestUtils.getTestSeries(0, 0)), null); - int cnt = 0; - while (dataSet.hasNext()) { - RowRecord record = dataSet.next(); - assertEquals(cnt + 51L, record.getTimestamp()); - assertEquals((cnt + 51) * 1.0, record.getFields().get(0).getDoubleV(), 0.00001); - cnt++; - } - assertEquals(49, cnt); - } - - @Test - public void testApplyCloseFile() throws IoTDBException { - DataRegion dataRegion = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - TestCase.assertFalse(dataRegion.getWorkSequenceTsFileProcessors().isEmpty()); - - CloseFileLog closeFileLog = new CloseFileLog(TestUtils.getTestSg(0), 0, true); - applier.apply(closeFileLog); - TestCase.assertTrue(dataRegion.getWorkSequenceTsFileProcessors().isEmpty()); - } - - @Test - public void testApplyFlush() throws IllegalPathException { - // existing sg - FlushPlan flushPlan = - new FlushPlan(null, Collections.singletonList(new PartialPath(TestUtils.getTestSg(0)))); - PhysicalPlanLog log = new PhysicalPlanLog(flushPlan); - - applier.apply(log); - assertNull(log.getException()); - - // non-existing sg - flushPlan = - new FlushPlan(null, Collections.singletonList(new PartialPath(TestUtils.getTestSg(20)))); - log = new PhysicalPlanLog(flushPlan); - - applier.apply(log); - assertEquals( - "Storage group is not set for current seriesPath: [root.test20]", - log.getException().getMessage()); - } - - @Test - public void testApplyCreateMultiTimeseiresWithPulling() throws MetadataException { - IoTDB.schemaProcessor.setStorageGroup(new PartialPath("root.sg1")); - CreateMultiTimeSeriesPlan multiTimeSeriesPlan = new CreateMultiTimeSeriesPlan(); - multiTimeSeriesPlan.setIndexes(Collections.emptyList()); - multiTimeSeriesPlan.setPaths( - Arrays.asList( - new PartialPath("root.sg1.s1"), - // root.sg2 should be pulled - new PartialPath("root.sg2.s1"))); - multiTimeSeriesPlan.setCompressors( - Arrays.asList(CompressionType.UNCOMPRESSED, CompressionType.UNCOMPRESSED)); - multiTimeSeriesPlan.setDataTypes(Arrays.asList(TSDataType.DOUBLE, TSDataType.DOUBLE)); - multiTimeSeriesPlan.setEncodings(Arrays.asList(TSEncoding.GORILLA, TSEncoding.GORILLA)); - - PhysicalPlanLog log = new PhysicalPlanLog(multiTimeSeriesPlan); - // the applier should sync meta leader to get root.sg2 and report no error - applier.apply(log); - assertTrue( - IoTDB.schemaProcessor.getAllStorageGroupPaths().contains(new PartialPath("root.sg2"))); - assertNull(log.getException()); - } - - @Test - public void testApplyDeletePartitionFilter() throws QueryProcessException { - applier.setQueryExecutor( - new PlanExecutor() { - @Override - public boolean processNonQuery(PhysicalPlan plan) { - assertTrue(plan instanceof DeletePlan); - DeletePlan deletePlan = (DeletePlan) plan; - TimePartitionFilter planFilter = deletePlan.getPartitionFilter(); - TimePartitionFilter memberFilter = testDataGroupMember.getTimePartitionFilter(); - assertEquals(planFilter, memberFilter); - return true; - } - }); - - DeletePlan deletePlan = new DeletePlan(); - PhysicalPlanLog log = new PhysicalPlanLog(deletePlan); - applier.apply(log); - assertNull(log.getException()); - } - - @Test - public void testApplyClearCache() { - ClearCachePlan clearCachePlan = new ClearCachePlan(); - PhysicalPlanLog physicalPlanLog = new PhysicalPlanLog(clearCachePlan); - applier.apply(physicalPlanLog); - assertNull(physicalPlanLog.getException()); - } - - @Test - public void testApplyMerge() { - MergePlan mergePlan = new MergePlan(); - PhysicalPlanLog physicalPlanLog = new PhysicalPlanLog(mergePlan); - applier.apply(physicalPlanLog); - assertNull(physicalPlanLog.getException()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/MetaLogApplierTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/MetaLogApplierTest.java deleted file mode 100644 index f236a823d919..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/applier/MetaLogApplierTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.applier; - -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; - -import org.junit.After; -import org.junit.Test; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; - -public class MetaLogApplierTest extends IoTDBTest { - - private Set nodes = new HashSet<>(); - - private TestMetaGroupMember testMetaGroupMember = - new TestMetaGroupMember() { - @Override - public void applyAddNode(AddNodeLog addNodeLog) { - nodes.add(addNodeLog.getNewNode()); - } - - @Override - public void applyRemoveNode(RemoveNodeLog removeNodeLog) { - nodes.remove(removeNodeLog.getRemovedNode()); - } - }; - - private LogApplier applier = new MetaLogApplier(testMetaGroupMember); - - @Override - @After - public void tearDown() throws IOException, StorageEngineException { - testMetaGroupMember.stop(); - testMetaGroupMember.closeLogManager(); - super.tearDown(); - } - - @Test - public void testApplyAddNode() { - nodes.clear(); - testMetaGroupMember.setCoordinator(new Coordinator()); - testMetaGroupMember.setPartitionTable(TestUtils.getPartitionTable(3)); - Node node = new Node("localhost", 1111, 0, 2222, Constants.RPC_PORT, "localhost"); - AddNodeLog log = new AddNodeLog(); - log.setNewNode(node); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - applier.apply(log); - - assertTrue(nodes.contains(node)); - } - - @Test - public void testApplyRemoveNode() { - nodes.clear(); - - Node node = testMetaGroupMember.getThisNode(); - RemoveNodeLog log = new RemoveNodeLog(); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - log.setRemovedNode(node); - applier.apply(log); - - assertFalse(nodes.contains(node)); - } - - @Test - public void testApplyMetadataCreation() throws MetadataException { - PhysicalPlanLog physicalPlanLog = new PhysicalPlanLog(); - SetStorageGroupPlan setStorageGroupPlan = - new SetStorageGroupPlan(new PartialPath("root.applyMeta")); - physicalPlanLog.setPlan(setStorageGroupPlan); - - applier.apply(physicalPlanLog); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath("root.applyMeta"))); - - CreateTimeSeriesPlan createTimeSeriesPlan = - new CreateTimeSeriesPlan( - new PartialPath("root.applyMeta" + ".s1"), - TSDataType.DOUBLE, - TSEncoding.RLE, - CompressionType.SNAPPY, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - physicalPlanLog.setPlan(createTimeSeriesPlan); - applier.apply(physicalPlanLog); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath("root.applyMeta.s1"))); - assertEquals( - TSDataType.DOUBLE, - IoTDB.schemaProcessor.getSeriesType(new PartialPath("root" + ".applyMeta.s1"))); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/CatchUpTaskTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/CatchUpTaskTest.java deleted file mode 100644 index 431a1b767b34..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/CatchUpTaskTest.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestSyncClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class CatchUpTaskTest { - - private List receivedLogs = new ArrayList<>(); - private long leaderCommit; - private RaftNode header = new RaftNode(new Node(), 0); - private boolean prevUseAsyncServer; - - private RaftMember sender = - new TestMetaGroupMember() { - @Override - public PartitionTable getPartitionTable() { - return new SlotPartitionTable(TestUtils.getNode(0)); - } - - @Override - public Client getSyncClient(Node node) { - return new TestSyncClient() { - @Override - public long appendEntry(AppendEntryRequest request) { - return dummyAppendEntry(request); - } - - @Override - public long appendEntries(AppendEntriesRequest request) { - return dummyAppendEntries(request); - } - - @Override - public boolean matchTerm(long index, long term, RaftNode header) { - return dummyMatchTerm(index, term); - } - - @Override - public void sendSnapshot(SendSnapshotRequest request) { - // do nothing - } - }; - } - - @Override - public AsyncClient getAsyncClient(Node node) { - return new TestAsyncClient() { - @Override - public void appendEntry( - AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(dummyAppendEntry(request))).start(); - } - - @Override - public void appendEntries( - AppendEntriesRequest request, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(dummyAppendEntries(request))).start(); - } - - @Override - public void matchTerm( - long index, - long term, - RaftNode header, - AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(dummyMatchTerm(index, term))).start(); - } - - @Override - public void sendSnapshot( - SendSnapshotRequest request, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(null)).start(); - } - }; - } - - @Override - public RaftNode getHeader() { - return header; - } - }; - - private long dummyAppendEntry(AppendEntryRequest request) { - Log log = receivedLogs.get(receivedLogs.size() - 1); - Log testLog; - try { - testLog = LogParser.getINSTANCE().parse(request.entry); - } catch (Exception e) { - return Response.RESPONSE_NULL; - } - if (testLog.getCurrLogIndex() == log.getCurrLogIndex() + 1) { - leaderCommit = Math.max(request.leaderCommit, leaderCommit); - receivedLogs.add(testLog); - return Response.RESPONSE_AGREE; - } - if (testLog.getCurrLogIndex() == log.getCurrLogIndex()) { - leaderCommit = Math.max(request.leaderCommit, leaderCommit); - return Response.RESPONSE_AGREE; - } - return Response.RESPONSE_LOG_MISMATCH; - } - - private long dummyAppendEntries(AppendEntriesRequest request) { - for (ByteBuffer byteBuffer : request.getEntries()) { - Log testLog; - try { - testLog = LogParser.getINSTANCE().parse(byteBuffer); - } catch (Exception e) { - return Response.RESPONSE_NULL; - } - receivedLogs.add(testLog); - } - leaderCommit = Math.max(request.leaderCommit, leaderCommit); - return Response.RESPONSE_AGREE; - } - - private boolean dummyMatchTerm(long index, long term) { - if (receivedLogs.isEmpty()) { - return true; - } else { - for (Log receivedLog : receivedLogs) { - if (receivedLog.getCurrLogTerm() == term && receivedLog.getCurrLogIndex() == index) { - return true; - } - } - } - return false; - } - - @Before - public void setUp() { - IoTDB.configManager.init(); - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - receivedLogs = new ArrayList<>(); - EmptyContentLog log = new EmptyContentLog(); - log.setCurrLogIndex(-1); - log.setCurrLogTerm(-1); - receivedLogs.add(log); - } - - @After - public void tearDown() throws Exception { - IoTDB.configManager.clear(); - sender.stop(); - sender.closeLogManager(); - EnvironmentUtils.cleanAllDir(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testCatchUpEmpty() throws LogExecutionException { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - receivedLogs.clear(); - - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender.getLogManager().setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setMatchIndex(9); - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 9); - task.run(); - - assertTrue(receivedLogs.isEmpty()); - } - - @Test - public void testPartialCatchUpAsync() throws LogExecutionException { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - if (i < 6) { - receivedLogs.add(log); - } - } - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender.getLogManager().setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setMatchIndex(0); - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 5); - task.run(); - - assertEquals(logList, receivedLogs.subList(1, receivedLogs.size())); - assertEquals(9, leaderCommit); - } - - @Test - public void testPartialCatchUpSync() throws LogExecutionException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - - try { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - if (i < 6) { - receivedLogs.add(log); - } - } - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender - .getLogManager() - .setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setMatchIndex(0); - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 5); - task.run(); - - assertEquals(logList, receivedLogs.subList(1, receivedLogs.size())); - assertEquals(9, leaderCommit); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testCatchUpSingle() throws Exception { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender.getLogManager().setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setNextIndex(0); - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 0); - ClusterDescriptor.getInstance().getConfig().setUseBatchInLogCatchUp(false); - task.run(); - - assertEquals(logList, receivedLogs.subList(1, receivedLogs.size())); - assertEquals(9, leaderCommit); - } - - @Test - public void testCatchUpBatch() throws Exception { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender.getLogManager().setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setNextIndex(0); - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 0); - task.run(); - - assertEquals(logList, receivedLogs.subList(1, receivedLogs.size())); - assertEquals(9, leaderCommit); - } - - @Test - public void testFindLastMatchIndex() throws LogExecutionException { - List logList = new ArrayList<>(); - int lastMatchedIndex = 6; - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - - if (i < lastMatchedIndex) { - receivedLogs.add(log); - } - } - sender.getLogManager().append(logList); - sender.getLogManager().commitTo(9); - sender.getLogManager().setMaxHaveAppliedCommitIndex(sender.getLogManager().getCommitLogIndex()); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - Peer peer = new Peer(10); - peer.setMatchIndex(0); - peer.setNextIndex(0); - - CatchUpTask task = new CatchUpTask(receiver, 0, peer, sender, 0); - task.setLogs(logList); - try { - // 1. case 1: the matched index is in the middle of the logs interval - int resultMatchIndex = task.findLastMatchIndex(logList); - assertEquals(lastMatchedIndex, resultMatchIndex); - - // 2. case 2: no matched index case - lastMatchedIndex = -1; - receivedLogs.subList(1, receivedLogs.size()).clear(); - logList = new ArrayList<>(logList.subList(1, logList.size())); - task.setLogs(logList); - resultMatchIndex = task.findLastMatchIndex(logList); - assertEquals(lastMatchedIndex, resultMatchIndex); - - // 3. case 3: the matched index is at the last index of the logs - logList.clear(); - receivedLogs.subList(1, receivedLogs.size()).clear(); - lastMatchedIndex = 9; - for (int i = 0; i < 10; i++) { - Log log = new EmptyContentLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - - if (i < lastMatchedIndex) { - receivedLogs.add(log); - } - } - resultMatchIndex = task.findLastMatchIndex(logList); - assertEquals(lastMatchedIndex, resultMatchIndex); - - } catch (LeaderUnknownException | TException | InterruptedException e) { - Assert.fail(e.getMessage()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTaskTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTaskTest.java deleted file mode 100644 index 4c2bd912fc38..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/LogCatchUpTaskTest.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestSyncClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntriesRequest; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class LogCatchUpTaskTest { - - private List receivedLogs = new ArrayList<>(); - private RaftNode header = new RaftNode(new Node(), 0); - private boolean testLeadershipFlag; - private boolean prevUseAsyncServer; - - private RaftMember sender = - new TestMetaGroupMember() { - - @Override - public AsyncClient getAsyncClient(Node node) { - return new TestAsyncClient() { - @Override - public void appendEntry( - AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - resultHandler.onComplete(dummyAppendEntry(request)); - } catch (UnknownLogTypeException e) { - fail(e.getMessage()); - } - }) - .start(); - } - - @Override - public void appendEntries( - AppendEntriesRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - resultHandler.onComplete(dummyAppendEntries(request)); - } catch (UnknownLogTypeException e) { - fail(e.getMessage()); - } - }) - .start(); - } - }; - } - - @Override - public Client getSyncClient(Node node) { - return new TestSyncClient() { - @Override - public long appendEntry(AppendEntryRequest request) throws TException { - try { - return dummyAppendEntry(request); - } catch (UnknownLogTypeException e) { - throw new TException(e); - } - } - - @Override - public long appendEntries(AppendEntriesRequest request) throws TException { - try { - return dummyAppendEntries(request); - } catch (UnknownLogTypeException e) { - throw new TException(e); - } - } - }; - } - - @Override - public RaftNode getHeader() { - return header; - } - }; - - private long dummyAppendEntry(AppendEntryRequest request) throws UnknownLogTypeException { - LogParser parser = LogParser.getINSTANCE(); - Log testLog = parser.parse(request.entry); - receivedLogs.add(testLog); - if (testLeadershipFlag && testLog.getCurrLogIndex() == 4) { - sender.setCharacter(NodeCharacter.ELECTOR); - } - return Response.RESPONSE_AGREE; - } - - private long dummyAppendEntries(AppendEntriesRequest request) throws UnknownLogTypeException { - LogParser parser = LogParser.getINSTANCE(); - Log testLog; - for (ByteBuffer byteBuffer : request.getEntries()) { - testLog = parser.parse(byteBuffer); - receivedLogs.add(testLog); - if (testLog != null && testLeadershipFlag && testLog.getCurrLogIndex() >= 1023) { - // return a larger term to indicate that the leader has changed - return sender.getTerm().get() + 1; - } - } - - return Response.RESPONSE_AGREE; - } - - @Before - public void setUp() { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - testLeadershipFlag = false; - } - - @After - public void tearDown() throws Exception { - sender.stop(); - sender.closeLogManager(); - EnvironmentUtils.cleanAllDir(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testCatchUpAsync() throws InterruptedException, TException, LeaderUnknownException { - catchUpTest(10, false); - } - - @Test - public void testCatchUpInBatch() throws InterruptedException, TException, LeaderUnknownException { - catchUpTest(10, true); - } - - @Test - public void testCatchUpInBatch2() - throws InterruptedException, TException, LeaderUnknownException { - catchUpTest(500, true); - } - - public void catchUpTest(int logSize, boolean useBatch) - throws InterruptedException, TException, LeaderUnknownException { - List logList = TestUtils.prepareTestLogs(logSize); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, useBatch); - task.call(); - - assertEquals(logList, receivedLogs); - } - - @Test - public void testCatchUpSync() throws InterruptedException, TException, LeaderUnknownException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - - try { - List logList = TestUtils.prepareTestLogs(10); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, false); - task.call(); - - assertEquals(logList, receivedLogs); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testLeadershipLost() { - testLeadershipFlag = true; - // the leadership will be lost after sending 5 logs - List logList = TestUtils.prepareTestLogs(10); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, false); - task.setUseBatch(false); - try { - task.call(); - fail("Expected LeaderUnknownException"); - } catch (TException | InterruptedException e) { - fail(e.getMessage()); - } catch (LeaderUnknownException e) { - assertEquals( - "The leader is unknown in this group [Node(internalIp:192.168.0.0, metaPort:9003, " - + "nodeIdentifier:0, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.1, metaPort:9003, " - + "nodeIdentifier:1, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.2, metaPort:9003, " - + "nodeIdentifier:2, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.3, metaPort:9003, " - + "nodeIdentifier:3, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.4, metaPort:9003, " - + "nodeIdentifier:4, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.5, metaPort:9003, " - + "nodeIdentifier:5, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.6, metaPort:9003, " - + "nodeIdentifier:6, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.7, metaPort:9003, " - + "nodeIdentifier:7, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.8, metaPort:9003, " - + "nodeIdentifier:8, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.9, metaPort:9003, " - + "nodeIdentifier:9, dataPort:40010, clientPort:6667, clientIp:0.0.0.0)], id = 0", - e.getMessage()); - } - - assertEquals(logList.subList(0, 5), receivedLogs); - } - - @Test - public void testLeadershipLostInBatch() - throws InterruptedException, TException, LeaderUnknownException { - testLeadershipFlag = true; - // the leadership will be lost after sending 256 logs - List logList = TestUtils.prepareTestLogs(1030); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, true); - task.call(); - - assertEquals(logList.subList(0, 1024), receivedLogs); - } - - @Test - public void testSmallFrameSize() throws InterruptedException, TException, LeaderUnknownException { - int preFrameSize = IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize(); - try { - // thrift frame size is small so the logs must be sent in more than one batch - List logList = TestUtils.prepareTestLogs(500); - int singleLogSize = logList.get(0).serialize().limit(); - IoTDBDescriptor.getInstance() - .getConfig() - .setThriftMaxFrameSize(100 * singleLogSize + IoTDBConstant.LEFT_SIZE_IN_REQUEST); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, true); - task.call(); - - assertEquals(logList, receivedLogs); - } finally { - IoTDBDescriptor.getInstance().getConfig().setThriftMaxFrameSize(preFrameSize); - } - } - - @Test - public void testVerySmallFrameSize() - throws InterruptedException, TException, LeaderUnknownException { - int preFrameSize = IoTDBDescriptor.getInstance().getConfig().getThriftMaxFrameSize(); - try { - // thrift frame size is too small so no logs can be sent successfully - List logList = TestUtils.prepareTestLogs(500); - IoTDBDescriptor.getInstance().getConfig().setThriftMaxFrameSize(0); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new LogCatchUpTask(logList, receiver, 0, sender, true); - task.call(); - - assertTrue(receivedLogs.isEmpty()); - } finally { - IoTDBDescriptor.getInstance().getConfig().setThriftMaxFrameSize(preFrameSize); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTaskTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTaskTest.java deleted file mode 100644 index 008c23598674..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/catchup/SnapshotCatchUpTaskTest.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.catchup; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestLog; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestSnapshot; -import org.apache.iotdb.cluster.common.TestSyncClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.LeaderUnknownException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class SnapshotCatchUpTaskTest { - - private List receivedLogs = new ArrayList<>(); - private Snapshot receivedSnapshot; - private RaftNode header = new RaftNode(new Node(), 0); - private boolean testLeadershipFlag; - private boolean prevUseAsyncServer; - private boolean noConnection = false; - - private RaftMember sender = - new TestMetaGroupMember() { - - @Override - public AsyncClient getAsyncClient(Node node) { - if (noConnection) { - return null; - } - return new TestAsyncClient() { - @Override - public void appendEntry( - AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(dummyAppendEntry(request))).start(); - } - - @Override - public void sendSnapshot( - SendSnapshotRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - dummySendSnapshot(request); - resultHandler.onComplete(null); - }) - .start(); - } - }; - } - - @Override - public Client getSyncClient(Node node) { - if (noConnection) { - return null; - } - return new TestSyncClient() { - @Override - public long appendEntry(AppendEntryRequest request) { - return dummyAppendEntry(request); - } - - @Override - public void sendSnapshot(SendSnapshotRequest request) { - dummySendSnapshot(request); - } - }; - } - - @Override - public RaftNode getHeader() { - return header; - } - }; - - private long dummyAppendEntry(AppendEntryRequest request) { - TestLog testLog = new TestLog(); - testLog.deserialize(request.entry); - receivedLogs.add(testLog); - return Response.RESPONSE_AGREE; - } - - private void dummySendSnapshot(SendSnapshotRequest request) { - receivedSnapshot = new TestSnapshot(); - receivedSnapshot.deserialize(request.snapshotBytes); - if (testLeadershipFlag) { - sender.setCharacter(NodeCharacter.ELECTOR); - } - } - - @Before - public void setUp() { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - testLeadershipFlag = false; - receivedSnapshot = null; - receivedLogs.clear(); - noConnection = false; - } - - @After - public void tearDown() throws Exception { - sender.stop(); - sender.closeLogManager(); - EnvironmentUtils.cleanAllDir(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testCatchUpAsync() throws InterruptedException, TException, LeaderUnknownException { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new TestLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - Snapshot snapshot = new TestSnapshot(9989); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - SnapshotCatchUpTask task = new SnapshotCatchUpTask(logList, snapshot, receiver, 0, sender); - task.call(); - - assertEquals(logList, receivedLogs); - assertEquals(snapshot, receivedSnapshot); - } - - @Test - public void testNoConnection() throws InterruptedException, TException, LeaderUnknownException { - noConnection = true; - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new TestLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - Snapshot snapshot = new TestSnapshot(9989); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - SnapshotCatchUpTask task = new SnapshotCatchUpTask(logList, snapshot, receiver, 0, sender); - task.call(); - - assertTrue(receivedLogs.isEmpty()); - assertNull(receivedSnapshot); - } - - @Test - public void testCatchUp() throws InterruptedException, TException, LeaderUnknownException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - - try { - List logList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Log log = new TestLog(); - log.setCurrLogIndex(i); - log.setCurrLogTerm(i); - logList.add(log); - } - Snapshot snapshot = new TestSnapshot(9989); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - SnapshotCatchUpTask task = new SnapshotCatchUpTask(logList, snapshot, receiver, 0, sender); - task.call(); - - assertEquals(logList, receivedLogs); - assertEquals(snapshot, receivedSnapshot); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testLeadershipLost() { - testLeadershipFlag = true; - // the leadership will be lost after sending the snapshot - List logList = TestUtils.prepareTestLogs(10); - Snapshot snapshot = new TestSnapshot(9989); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.LEADER); - LogCatchUpTask task = new SnapshotCatchUpTask(logList, snapshot, receiver, 0, sender); - try { - task.call(); - fail("Expected LeaderUnknownException"); - } catch (TException | InterruptedException e) { - fail(e.getMessage()); - } catch (LeaderUnknownException e) { - assertEquals( - "The leader is unknown in this group [Node(internalIp:192.168.0.0, metaPort:9003, " - + "nodeIdentifier:0, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.1, metaPort:9003, " - + "nodeIdentifier:1, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.2, metaPort:9003, " - + "nodeIdentifier:2, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.3, metaPort:9003, " - + "nodeIdentifier:3, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.4, metaPort:9003, " - + "nodeIdentifier:4, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.5, metaPort:9003, " - + "nodeIdentifier:5, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.6, metaPort:9003, " - + "nodeIdentifier:6, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.7, metaPort:9003, " - + "nodeIdentifier:7, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.8, metaPort:9003, " - + "nodeIdentifier:8, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.9, metaPort:9003, " - + "nodeIdentifier:9, dataPort:40010, clientPort:6667, clientIp:0.0.0.0)], id = 0", - e.getMessage()); - } - - assertEquals(snapshot, receivedSnapshot); - assertTrue(receivedLogs.isEmpty()); - } - - @Test - public void testNoLeadership() { - // the leadership is lost from the beginning - List logList = TestUtils.prepareTestLogs(10); - Snapshot snapshot = new TestSnapshot(9989); - Node receiver = new Node(); - sender.setCharacter(NodeCharacter.ELECTOR); - LogCatchUpTask task = new SnapshotCatchUpTask(logList, snapshot, receiver, 0, sender); - try { - task.call(); - fail("Expected LeaderUnknownException"); - } catch (TException | InterruptedException e) { - fail(e.getMessage()); - } catch (LeaderUnknownException e) { - assertEquals( - "The leader is unknown in this group [Node(internalIp:192.168.0.0, metaPort:9003, " - + "nodeIdentifier:0, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.1, metaPort:9003, " - + "nodeIdentifier:1, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.2, metaPort:9003, " - + "nodeIdentifier:2, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.3, metaPort:9003, " - + "nodeIdentifier:3, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.4, metaPort:9003, " - + "nodeIdentifier:4, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.5, metaPort:9003, " - + "nodeIdentifier:5, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.6, metaPort:9003, " - + "nodeIdentifier:6, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.7, metaPort:9003, " - + "nodeIdentifier:7, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.8, metaPort:9003, " - + "nodeIdentifier:8, dataPort:40010, clientPort:6667, clientIp:0.0.0.0), Node(internalIp:192.168.0.9, metaPort:9003, " - + "nodeIdentifier:9, dataPort:40010, clientPort:6667, clientIp:0.0.0.0)], id = 0", - e.getMessage()); - } - - assertNull(receivedSnapshot); - assertTrue(receivedLogs.isEmpty()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/logtypes/SerializeLogTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/logtypes/SerializeLogTest.java deleted file mode 100644 index 35aae54f2d3e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/logtypes/SerializeLogTest.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.logtypes; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; - -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.Collections; -import java.util.HashMap; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class SerializeLogTest { - - @Test - public void testPhysicalPlanLog() throws UnknownLogTypeException, IllegalPathException { - PhysicalPlanLog log = new PhysicalPlanLog(); - log.setCurrLogIndex(2); - log.setCurrLogTerm(2); - InsertRowPlan plan = new InsertRowPlan(); - plan.setDevicePath(new PartialPath("root.d1")); - plan.setDevicePath(new PartialPath("root.d1")); - plan.setMeasurements(new String[] {"s1", "s2", "s3"}); - plan.setNeedInferType(true); - plan.setDataTypes(new TSDataType[plan.getMeasurements().length]); - plan.setValues(new Object[] {"0.1", "1", "\"dd\""}); - IMeasurementMNode[] schemas = { - TestUtils.getTestMeasurementMNode(1), - TestUtils.getTestMeasurementMNode(2), - TestUtils.getTestMeasurementMNode(3) - }; - schemas[0].getSchema().setType(TSDataType.DOUBLE); - schemas[1].getSchema().setType(TSDataType.INT32); - schemas[2].getSchema().setType(TSDataType.TEXT); - plan.setMeasurementMNodes(schemas); - plan.setTime(1); - log.setPlan(plan); - - ByteBuffer byteBuffer = log.serialize(); - Log logPrime = LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - - log = new PhysicalPlanLog(new SetStorageGroupPlan(new PartialPath("root.sg1"))); - byteBuffer = log.serialize(); - logPrime = LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - - log = - new PhysicalPlanLog( - new CreateTimeSeriesPlan( - new PartialPath("root.applyMeta" + ".s1"), - TSDataType.DOUBLE, - TSEncoding.RLE, - CompressionType.SNAPPY, - new HashMap() { - { - put("MAX_POINT_NUMBER", "100"); - } - }, - Collections.emptyMap(), - Collections.emptyMap(), - null)); - byteBuffer = log.serialize(); - logPrime = LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - } - - @Test - public void testAddNodeLog() throws UnknownLogTypeException { - AddNodeLog log = new AddNodeLog(); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - log.setCurrLogIndex(2); - log.setCurrLogTerm(2); - log.setNewNode( - new Node("apache.iotdb.com", 1234, 1, 4321, Constants.RPC_PORT, "apache.iotdb.com")); - ByteBuffer byteBuffer = log.serialize(); - Log logPrime = LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - } - - @Test - public void testCloseFileLog() throws UnknownLogTypeException { - CloseFileLog log = new CloseFileLog("root.sg1", 0, true); - log.setCurrLogIndex(2); - log.setCurrLogTerm(2); - ByteBuffer byteBuffer = log.serialize(); - CloseFileLog logPrime = (CloseFileLog) LogParser.getINSTANCE().parse(byteBuffer); - assertTrue(logPrime.isSeq()); - assertEquals("root.sg1", logPrime.getStorageGroupName()); - assertEquals(log, logPrime); - } - - @Test - public void testRemoveNodeLog() throws UnknownLogTypeException { - RemoveNodeLog log = new RemoveNodeLog(); - log.setPartitionTable(TestUtils.getSeralizePartitionTable()); - log.setCurrLogIndex(2); - log.setCurrLogTerm(2); - log.setRemovedNode(TestUtils.getNode(0)); - ByteBuffer byteBuffer = log.serialize(); - RemoveNodeLog logPrime = (RemoveNodeLog) LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - } - - @Test - public void testEmptyContentLog() throws UnknownLogTypeException { - EmptyContentLog log = new EmptyContentLog(2, 2); - ByteBuffer byteBuffer = log.serialize(); - EmptyContentLog logPrime = (EmptyContentLog) LogParser.getINSTANCE().parse(byteBuffer); - assertEquals(log, logPrime); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManagerTest.java deleted file mode 100644 index a1bcbce39e16..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/CommittedEntryManagerTest.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.exception.EntryUnavailableException; -import org.apache.iotdb.cluster.exception.TruncateCommittedEntryException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.snapshot.SimpleSnapshot; - -import org.junit.Test; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -public class CommittedEntryManagerTest { - - @Test - public void applyingSnapshot() { - class CommittedEntryManagerTester { - - public List entries; - public Snapshot snapshot; - public Snapshot applyingSnapshot; - public long testIndex; - - public CommittedEntryManagerTester( - List entries, Snapshot snapshot, Snapshot applyingSnapshot, long testIndex) { - this.entries = entries; - this.snapshot = snapshot; - this.applyingSnapshot = applyingSnapshot; - this.testIndex = testIndex; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new SimpleSnapshot(3, 3), - new SimpleSnapshot(3, 3), - 3)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new SimpleSnapshot(3, 3), - new SimpleSnapshot(4, 4), - 4)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new SimpleSnapshot(3, 3), - new SimpleSnapshot(5, 5), - 5)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new SimpleSnapshot(3, 3), - new SimpleSnapshot(7, 7), - 7)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - instance.applyingSnapshot(test.applyingSnapshot); - assertEquals(test.testIndex, (long) instance.getDummyIndex()); - } - } - - @Test - public void getDummyIndex() { - class CommittedEntryManagerTester { - - public List entries; - public long testIndex; - - public CommittedEntryManagerTester(List entries, long testIndex) { - this.entries = entries; - this.testIndex = testIndex; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - } - }, - 1)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 3)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - long index = instance.getDummyIndex(); - assertEquals(test.testIndex, index); - } - } - - @Test - public void getFirstIndex() { - class CommittedEntryManagerTester { - - public List entries; - public long testIndex; - - public CommittedEntryManagerTester(List entries, long testIndex) { - this.entries = entries; - this.testIndex = testIndex; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - } - }, - 2)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 4)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - long index = instance.getFirstIndex(); - assertEquals(test.testIndex, index); - } - } - - @Test - public void getLastIndex() { - class CommittedEntryManagerTester { - - public List entries; - public long testIndex; - - public CommittedEntryManagerTester(List entries, long testIndex) { - this.entries = entries; - this.testIndex = testIndex; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - } - }, - 1)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 5)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - long index = instance.getLastIndex(); - assertEquals(test.testIndex, index); - } - } - - @Test - public void maybeTerm() { - class CommittedEntryManagerTester { - - public long index; - public long testTerm; - public Class throwClass; - - public CommittedEntryManagerTester(long index, long testTerm, Class throwClass) { - this.index = index; - this.testTerm = testTerm; - this.throwClass = throwClass; - } - } - List entries = - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }; - List tests = - new ArrayList() { - { - add(new CommittedEntryManagerTester(3, 3, null)); - add(new CommittedEntryManagerTester(4, 4, null)); - add(new CommittedEntryManagerTester(5, 5, null)); - // entries that have been compacted; - add(new CommittedEntryManagerTester(2, 0, EntryCompactedException.class)); - // entries that have not been committed; - add(new CommittedEntryManagerTester(6, -1, null)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(entries); - try { - long term = instance.maybeTerm(test.index); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testTerm, term); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } - - @Test - public void getEntries() { - class CommittedEntryManagerTester { - - public long low; - public long high; - public List testEntries; - public Class throwClass; - - public CommittedEntryManagerTester( - long low, long high, List testEntries, Class throwClass) { - this.low = low; - this.high = high; - this.testEntries = testEntries; - this.throwClass = throwClass; - } - } - List entries = - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - add(new EmptyContentLog(6, 6)); - } - }; - List tests = - new ArrayList() { - { - add(new CommittedEntryManagerTester(4, 4, new ArrayList<>(), null)); - add( - new CommittedEntryManagerTester( - 4, - 5, - new ArrayList() { - { - add(new EmptyContentLog(4, 4)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - 4, - 6, - new ArrayList() { - { - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - 4, - 7, - new ArrayList() { - { - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - add(new EmptyContentLog(6, 6)); - } - }, - null)); - // entries that have not been committed; - add( - new CommittedEntryManagerTester( - 4, - 8, - new ArrayList() { - { - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - add(new EmptyContentLog(6, 6)); - } - }, - null)); - // entries that have been compacted; - add(new CommittedEntryManagerTester(2, 6, entries.subList(1, 3), null)); - add(new CommittedEntryManagerTester(3, 4, Collections.EMPTY_LIST, null)); - // illegal range - add(new CommittedEntryManagerTester(5, 4, Collections.EMPTY_LIST, null)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(entries); - try { - List answer = instance.getEntries(test.low, test.high); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testEntries, answer); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } - - @Test - public void compactEntries() { - class CommittedEntryManagerTester { - - public List entries; - public long compactIndex; - public List testEntries; - public Class throwClass; - - public CommittedEntryManagerTester( - List entries, long compactIndex, List testEntries, Class throwClass) { - this.entries = entries; - this.compactIndex = compactIndex; - this.testEntries = testEntries; - this.throwClass = throwClass; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 2, - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 3, - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 4, - new ArrayList() { - { - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 5)); - } - }, - null)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 6, - null, - EntryUnavailableException.class)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - 10, - null, - EntryUnavailableException.class)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - try { - instance.compactEntries(test.compactIndex); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testEntries, instance.getAllEntries()); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } - - @Test - public void append() { - class CommittedEntryManagerTester { - - public List entries; - public List toAppend; - public List testEntries; - public Class throwClass; - - public CommittedEntryManagerTester( - List entries, List toAppend, List testEntries, Class throwClass) { - this.entries = entries; - this.toAppend = toAppend; - this.testEntries = testEntries; - this.throwClass = throwClass; - } - } - List tests = - new ArrayList() { - { - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - null, - TruncateCommittedEntryException.class)); - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 6)); - add(new EmptyContentLog(5, 6)); - } - }, - null, - TruncateCommittedEntryException.class)); - // direct append - add( - new CommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(6, 5)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 5)); - add(new EmptyContentLog(6, 5)); - } - }, - null)); - } - }; - for (CommittedEntryManagerTester test : tests) { - CommittedEntryManager instance = new CommittedEntryManager(test.entries); - try { - instance.append(test.toAppend); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testEntries, instance.getAllEntries()); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManagerTest.java deleted file mode 100644 index 563ddc860bb1..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/FilePartitionedSnapshotLogManagerTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestLogApplier; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot; -import org.apache.iotdb.cluster.log.snapshot.PartitionedSnapshot; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.sys.FlushPlan; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.junit.After; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -@Ignore -public class FilePartitionedSnapshotLogManagerTest extends IoTDBTest { - - @Override - @After - public void tearDown() throws IOException, StorageEngineException { - super.tearDown(); - } - - @Test - public void testSnapshot() throws Exception { - PartitionTable partitionTable = TestUtils.getPartitionTable(3); - LogApplier applier = new TestLogApplier(); - FilePartitionedSnapshotLogManager manager = - new FilePartitionedSnapshotLogManager( - applier, - partitionTable, - TestUtils.getNode(0), - TestUtils.getNode(0), - new TestDataGroupMember()); - - try { - List logs = TestUtils.prepareTestLogs(10); - manager.append(logs); - manager.commitTo(10); - manager.setMaxHaveAppliedCommitIndex(manager.getCommitLogIndex()); - - Map>> storageGroupPartitionIds = new HashMap<>(); - // create files for sgs - for (int i = 1; i < 4; i++) { - PartialPath sg = new PartialPath(TestUtils.getTestSg(i)); - storageGroupPartitionIds.put(sg, null); - for (int j = 0; j < 4; j++) { - // closed files - prepareData(i, j * 10, 10); - StorageEngine.getInstance().closeStorageGroupProcessor(sg, true, true); - } - // un closed files - prepareData(i, 40, 10); - } - - FlushPlan plan = new FlushPlan(null, true, storageGroupPartitionIds); - PlanExecutor executor = new PlanExecutor(); - executor.processNonQuery(plan); - - List requireSlots = new ArrayList<>(); - ((SlotPartitionTable) manager.partitionTable) - .getAllNodeSlots() - .values() - .forEach(requireSlots::addAll); - manager.takeSnapshotForSpecificSlots(requireSlots, true); - PartitionedSnapshot snapshot = (PartitionedSnapshot) manager.getSnapshot(); - for (int i = 1; i < 4; i++) { - FileSnapshot fileSnapshot = - (FileSnapshot) - snapshot.getSnapshot( - SlotPartitionTable.getSlotStrategy() - .calculateSlotByTime(TestUtils.getTestSg(i), 0, ClusterConstant.SLOT_NUM)); - assertEquals(10, fileSnapshot.getTimeseriesSchemas().size()); - assertEquals(5, fileSnapshot.getDataFiles().size()); - } - } finally { - manager.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManagerTest.java deleted file mode 100644 index f967b2beacdf..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/MetaSingleSnapshotLogManagerTest.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestLogApplier; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.snapshot.MetaSimpleSnapshot; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.query.QueryProcessException; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.Arrays; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class MetaSingleSnapshotLogManagerTest extends IoTDBTest { - - private MetaSingleSnapshotLogManager logManager; - - @Override - @Before - public void setUp() throws QueryProcessException, StartupException, IllegalPathException { - super.setUp(); - MetaGroupMember metaGroupMember = new MetaGroupMember(); - Coordinator coordinator = new Coordinator(metaGroupMember); - metaGroupMember.setCoordinator(coordinator); - metaGroupMember.setPartitionTable(new SlotPartitionTable(new Node())); - logManager = new MetaSingleSnapshotLogManager(new TestLogApplier(), metaGroupMember); - } - - @Override - @After - public void tearDown() - throws java.io.IOException, org.apache.iotdb.db.exception.StorageEngineException { - logManager.close(); - super.tearDown(); - } - - @Test - public void testTakeSnapshot() throws Exception { - try { - List testLogs = TestUtils.prepareTestLogs(10); - logManager.append(testLogs); - logManager.commitTo(4); - logManager.setMaxHaveAppliedCommitIndex(logManager.getCommitLogIndex()); - - logManager.takeSnapshot(); - MetaSimpleSnapshot snapshot = (MetaSimpleSnapshot) logManager.getSnapshot(); - Map storageGroupTTLMap = snapshot.getStorageGroupTTLMap(); - PartialPath[] storageGroups = storageGroupTTLMap.keySet().toArray(new PartialPath[0]); - Arrays.sort(storageGroups); - - assertEquals(10, storageGroups.length); - for (int i = 0; i < 10; i++) { - assertEquals(new PartialPath(TestUtils.getTestSg(i)), storageGroups[i]); - } - assertEquals(4, snapshot.getLastLogIndex()); - assertEquals(4, snapshot.getLastLogTerm()); - } finally { - logManager.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/RaftLogManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/RaftLogManagerTest.java deleted file mode 100644 index 4d47219693ec..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/RaftLogManagerTest.java +++ /dev/null @@ -1,1889 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.common.TestLogApplier; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.EntryCompactedException; -import org.apache.iotdb.cluster.exception.GetEntriesWrongParametersException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.TruncateCommittedEntryException; -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogApplier; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.StableEntryManager; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; -import org.apache.iotdb.cluster.log.snapshot.SimpleSnapshot; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class RaftLogManagerTest { - - static class TestRaftLogManager extends RaftLogManager { - - private Snapshot snapshot; - - public TestRaftLogManager( - StableEntryManager stableEntryManager, LogApplier applier, String name) { - super(stableEntryManager, applier, name); - } - - public TestRaftLogManager( - CommittedEntryManager committedEntryManager, - StableEntryManager stableEntryManager, - LogApplier applier) { - super(committedEntryManager, stableEntryManager, applier); - } - - @Override - public Snapshot getSnapshot(long minIndex) { - return snapshot; - } - - @Override - public void takeSnapshot() throws IOException { - super.takeSnapshot(); - snapshot = new SimpleSnapshot(getLastLogIndex(), getLastLogTerm()); - } - } - - private Map appliedLogs; - private volatile boolean blocked = false; - private LogApplier logApplier = - new TestLogApplier() { - @Override - public void apply(Log log) { - if (blocked) { - return; - } - // make sure the log is applied when not blocked - appliedLogs.put(log.getCurrLogIndex(), log); - log.setApplied(true); - } - }; - private int testIdentifier = 1; - - private boolean prevLogPersistence; - - @Before - public void setUp() { - prevLogPersistence = ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence(); - ClusterDescriptor.getInstance().getConfig().setCatchUpTimeoutMS(100); - ClusterDescriptor.getInstance().getConfig().setEnableRaftLogPersistence(true); - appliedLogs = new ConcurrentHashMap<>(); - blocked = false; - } - - @After - public void tearDown() { - blocked = false; - File dir = new File(SyncLogDequeSerializer.getLogDir(testIdentifier)); - for (File file : dir.listFiles()) { - file.delete(); - } - dir.delete(); - ClusterDescriptor.getInstance().getConfig().setEnableRaftLogPersistence(prevLogPersistence); - } - - @Test - public void testHardState() { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - - try { - HardState hardState = new HardState(); - hardState.setVoteFor(TestUtils.getNode(10)); - hardState.setCurrentTerm(11); - assertNotEquals(hardState, instance.getHardState()); - instance.updateHardState(hardState); - assertEquals(hardState, instance.getHardState()); - } finally { - instance.close(); - } - } - - @Test - public void testBlockedSnapshot() throws LogExecutionException, IOException { - int catchUpTimeoutMS = ClusterDescriptor.getInstance().getConfig().getCatchUpTimeoutMS(); - ClusterDescriptor.getInstance().getConfig().setCatchUpTimeoutMS(200); - - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - List logs = TestUtils.prepareTestLogs(100); - instance.append(logs.subList(0, 50)); - instance.commitTo(49); - - instance.setBlockAppliedCommitIndex(99); - instance.append(logs.subList(50, 100)); - instance.commitTo(98); - - try { - // applier is blocked, so this should time out - instance.takeSnapshot(); - fail("No exception"); - } catch (IOException e) { - assertEquals("wait all log applied time out", e.getMessage()); - } - blocked = false; - instance.commitTo(99); - // applier is unblocked, BlockAppliedCommitIndex should be soon reached - ClusterDescriptor.getInstance().getConfig().setCatchUpTimeoutMS(60_000); - instance.takeSnapshot(); - assertEquals(new SimpleSnapshot(99, 99), instance.getSnapshot()); - } finally { - instance.close(); - ClusterDescriptor.getInstance().getConfig().setCatchUpTimeoutMS(catchUpTimeoutMS); - } - } - - @Test - public void getTerm() { - class RaftLogManagerTester { - - public long index; - public long testTerm; - public Class throwClass; - - public RaftLogManagerTester(long index, long testTerm, Class throwClass) { - this.index = index; - this.testTerm = testTerm; - this.throwClass = throwClass; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = 1; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - List tests = - new ArrayList() { - { - add(new RaftLogManagerTester(offset - 1, -1, null)); - add(new RaftLogManagerTester(offset, offset, null)); - add(new RaftLogManagerTester(half, half, null)); - add(new RaftLogManagerTester(last - 1, last - 1, null)); - add(new RaftLogManagerTester(last, -1, null)); - } - }; - for (RaftLogManagerTester test : tests) { - try { - long term = instance.getTerm(test.index); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testTerm, term); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } finally { - instance.close(); - } - } - - @Test - public void getFirstIndex() { - long offset = 100; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - assertEquals(offset + 1, instance.getFirstIndex()); - long newOffset = offset + 20; - committedEntryManager.applyingSnapshot(new SimpleSnapshot(newOffset, newOffset)); - assertEquals(newOffset + 1, instance.getFirstIndex()); - } finally { - instance.close(); - } - } - - @Test - public void getLastLogIndex() { - long offset = 100; - long num = 100; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = 1; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - assertEquals(offset + index, instance.getLastLogIndex()); - } - } finally { - instance.close(); - } - } - - @Test - public void getLastLogTerm() { - long offset = 100; - long num = 100; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = 1; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - assertEquals(offset + index, instance.getLastLogTerm()); - } - } finally { - instance.close(); - } - } - - @Test - public void maybeCommit() { - class RaftLogManagerTester { - - public long leaderCommit; - public long term; - public long testCommittedEntryManagerSize; - public long testUnCommittedEntryManagerSize; - public long testCommitIndex; - public boolean testCommit; - - public RaftLogManagerTester( - long leaderCommit, - long term, - long testCommittedEntryManagerSize, - long testUnCommittedEntryManagerSize, - long testCommitIndex, - boolean testCommit) { - this.leaderCommit = leaderCommit; - this.term = term; - this.testCommittedEntryManagerSize = testCommittedEntryManagerSize; - this.testUnCommittedEntryManagerSize = testUnCommittedEntryManagerSize; - this.testCommitIndex = testCommitIndex; - this.testCommit = testCommit; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - for (long i = 1; i < num / 2; i++) { - long index = i; - try { - committedEntryManager.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } catch (Exception e) { - } - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = num / 2; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - List tests = - new ArrayList() { - { - // term small leaderCommit - add( - new RaftLogManagerTester( - offset - 10, offset - 9, num / 2, num / 2, half - 1, false)); - add( - new RaftLogManagerTester( - offset - 10, offset - 10, num / 2, num / 2, half - 1, false)); - add(new RaftLogManagerTester(half - 1, half - 1, num / 2, num / 2, half - 1, false)); - // normal case - add(new RaftLogManagerTester(half, half + 1, num / 2, num / 2, half - 1, false)); - add(new RaftLogManagerTester(half, half, num / 2 + 1, num / 2 - 1, half, true)); - add(new RaftLogManagerTester(last - 1, last - 1, num, 0, last - 1, true)); - // test large leaderCommit - add(new RaftLogManagerTester(last, last, num, 0, last - 1, false)); - } - }; - for (RaftLogManagerTester test : tests) { - boolean answer = instance.maybeCommit(test.leaderCommit, test.term); - assertEquals( - test.testCommittedEntryManagerSize, - instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals( - test.testUnCommittedEntryManagerSize, - instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(test.testCommitIndex, instance.getCommitLogIndex()); - assertEquals(test.testCommit, answer); - } - } finally { - instance.close(); - } - } - - @Test - public void commitTo() throws Exception { - class RaftLogManagerTester { - - public long commitTo; - public long testCommittedEntryManagerSize; - public long testUnCommittedEntryManagerSize; - public long testCommitIndex; - - public RaftLogManagerTester( - long commitTo, - long testCommittedEntryManagerSize, - long testUnCommittedEntryManagerSize, - long testCommitIndex) { - this.commitTo = commitTo; - this.testCommittedEntryManagerSize = testCommittedEntryManagerSize; - this.testUnCommittedEntryManagerSize = testUnCommittedEntryManagerSize; - this.testCommitIndex = testCommitIndex; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - for (long i = 1; i < num / 2; i++) { - long index = i; - try { - committedEntryManager.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } catch (Exception e) { - } - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = num / 2; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - List tests = - new ArrayList() { - { - add(new RaftLogManagerTester(offset - 10, num / 2, num / 2, half - 1)); - add(new RaftLogManagerTester(half - 1, num / 2, num / 2, half - 1)); - add(new RaftLogManagerTester(half, num / 2 + 1, num / 2 - 1, half)); - add(new RaftLogManagerTester(half + 10, num / 2 + 11, num / 2 - 11, half + 10)); - add(new RaftLogManagerTester(last - 1, num, 0, last - 1)); - } - }; - for (RaftLogManagerTester test : tests) { - instance.commitTo(test.commitTo); - assertEquals( - test.testCommittedEntryManagerSize, - instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals( - test.testUnCommittedEntryManagerSize, - instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(test.testCommitIndex, instance.getCommitLogIndex()); - } - } finally { - instance.close(); - } - } - - @Test - public void applyEntries() { - List testLogs = TestUtils.prepareTestLogs(10); - RaftLogManager instance = - new TestRaftLogManager( - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()), - new SyncLogDequeSerializer(testIdentifier), - logApplier); - try { - instance.applyEntries(testLogs); - for (Log log : testLogs) { - while (!log.isApplied()) { - // wait - } - assertTrue(appliedLogs.containsKey(log.getCurrLogIndex())); - assertEquals(log, appliedLogs.get(log.getCurrLogIndex())); - } - } finally { - instance.close(); - } - } - - @Test - public void matchTerm() { - class RaftLogManagerTester { - - public long index; - public long term; - public boolean testMatch; - - public RaftLogManagerTester(long index, long term, boolean testMatch) { - this.index = index; - this.term = term; - this.testMatch = testMatch; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - for (long i = 1; i < num / 2; i++) { - long index = i; - try { - committedEntryManager.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } catch (Exception e) { - } - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = num / 2; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - List tests = - new ArrayList() { - { - add(new RaftLogManagerTester(offset - 1, offset - 1, false)); - add(new RaftLogManagerTester(offset, offset - 1, false)); - add(new RaftLogManagerTester(offset, offset, true)); - add(new RaftLogManagerTester(half, half, true)); - add(new RaftLogManagerTester(half + 1, half, false)); - add(new RaftLogManagerTester(last - 1, last - 1, true)); - add(new RaftLogManagerTester(last, last, false)); - } - }; - for (RaftLogManagerTester test : tests) { - assertEquals(test.testMatch, instance.matchTerm(test.index, test.term)); - } - } finally { - instance.close(); - } - } - - @Test - public void maybeAppendBatch() throws Exception { - class RaftLogManagerTester { - - public List entries; - public long lastIndex; - public long lastTerm; - public long leaderCommit; - public long testLastIndex; - public long testCommitIndex; - public boolean testAppend; - - public RaftLogManagerTester( - List entries, - long lastIndex, - long lastTerm, - long leaderCommit, - long testLastIndex, - long testCommitIndex, - boolean testAppend) { - this.entries = entries; - this.lastIndex = lastIndex; - this.lastTerm = lastTerm; - this.leaderCommit = leaderCommit; - this.testLastIndex = testLastIndex; - this.testCommitIndex = testCommitIndex; - this.testAppend = testAppend; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 3)); - } - }; - long lastIndex = 3; - long lastTerm = 3; - long commit = 1; - List tests = - new ArrayList() { - { - // not match: term is different - add( - new RaftLogManagerTester( - new ArrayList<>(), lastIndex, lastTerm - 1, lastIndex, -1, commit, false)); - // not match: index out of bound - add( - new RaftLogManagerTester( - new ArrayList<>(), lastIndex + 1, lastTerm, lastIndex, -1, commit, false)); - // match with the last existing entry - add( - new RaftLogManagerTester( - new ArrayList<>(), lastIndex, lastTerm, lastIndex, lastIndex, lastIndex, true)); - // do not increase commit higher than newLastIndex - add( - new RaftLogManagerTester( - new ArrayList<>(), - lastIndex, - lastTerm, - lastIndex + 1, - lastIndex, - lastIndex, - true)); - // commit up to the commit in the message - add( - new RaftLogManagerTester( - new ArrayList<>(), - lastIndex, - lastTerm, - lastIndex - 1, - lastIndex, - lastIndex - 1, - true)); - // commit do not decrease - add( - new RaftLogManagerTester( - new ArrayList<>(), lastIndex, lastTerm, 0, lastIndex, commit, true)); - // normal case - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex + 1, 4)); - } - }, - lastIndex, - lastTerm, - lastIndex, - lastIndex + 1, - lastIndex, - true)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex + 1, 4)); - } - }, - lastIndex, - lastTerm, - lastIndex + 1, - lastIndex + 1, - lastIndex + 1, - true)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex + 1, 4)); - add(new EmptyContentLog(lastIndex + 2, 4)); - } - }, - lastIndex, - lastTerm, - lastIndex + 2, - lastIndex + 2, - lastIndex + 2, - true)); - // do not increase commit higher than newLastIndex - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex + 1, 4)); - } - }, - lastIndex, - lastTerm, - lastIndex + 2, - lastIndex + 1, - lastIndex + 1, - true)); - // match with the the entry in the middle - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex, 4)); - } - }, - lastIndex - 1, - lastTerm - 1, - lastIndex, - lastIndex, - lastIndex, - true)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex - 1, 4)); - } - }, - lastIndex - 2, - lastTerm - 2, - lastIndex, - lastIndex - 1, - lastIndex - 1, - true)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(lastIndex - 1, 4)); - add(new EmptyContentLog(lastIndex, 4)); - } - }, - lastIndex - 2, - lastTerm - 2, - lastIndex, - lastIndex, - lastIndex, - true)); - } - }; - for (RaftLogManagerTester test : tests) { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(0, 0)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - instance.append(previousEntries); - instance.commitTo(commit); - assertEquals( - test.testLastIndex, - instance.maybeAppend(test.lastIndex, test.lastTerm, test.leaderCommit, test.entries)); - assertEquals(test.testCommitIndex, instance.getCommitLogIndex()); - if (test.testAppend) { - try { - List entries = - instance.getEntries( - instance.getLastLogIndex() - test.entries.size() + 1, Integer.MAX_VALUE); - assertEquals(test.entries, entries); - } catch (Exception e) { - fail("An unexpected exception was thrown."); - } - } - } finally { - instance.close(); - } - } - } - - @Test - public void maybeAppendSingle() throws Exception { - class RaftLogManagerTester { - - public Log entry; - public long lastIndex; - public long lastTerm; - public long leaderCommit; - public long testLastIndex; - public long testCommitIndex; - public boolean testAppend; - - public RaftLogManagerTester( - Log entry, - long lastIndex, - long lastTerm, - long leaderCommit, - long testLastIndex, - long testCommitIndex, - boolean testAppend) { - this.entry = entry; - this.lastIndex = lastIndex; - this.lastTerm = lastTerm; - this.leaderCommit = leaderCommit; - this.testLastIndex = testLastIndex; - this.testCommitIndex = testCommitIndex; - this.testAppend = testAppend; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 3)); - } - }; - long lastIndex = 3; - long lastTerm = 3; - long commit = 1; - List tests = - new ArrayList() { - { - // not match: term is different - add( - new RaftLogManagerTester( - null, lastIndex, lastTerm - 1, lastIndex, -1, commit, false)); - // not match: index out of bound - add( - new RaftLogManagerTester( - null, lastIndex + 1, lastTerm, lastIndex, -1, commit, false)); - // normal case - add( - new RaftLogManagerTester( - new EmptyContentLog(lastIndex + 1, 4), - lastIndex, - lastTerm, - lastIndex, - lastIndex + 1, - lastIndex, - true)); - add( - new RaftLogManagerTester( - new EmptyContentLog(lastIndex + 1, 4), - lastIndex, - lastTerm, - lastIndex + 1, - lastIndex + 1, - lastIndex + 1, - true)); - // do not increase commit higher than newLastIndex - add( - new RaftLogManagerTester( - new EmptyContentLog(lastIndex + 1, 4), - lastIndex, - lastTerm, - lastIndex + 2, - lastIndex + 1, - lastIndex + 1, - true)); - // match with the the entry in the middle - add( - new RaftLogManagerTester( - new EmptyContentLog(lastIndex, 4), - lastIndex - 1, - lastTerm - 1, - lastIndex, - lastIndex, - lastIndex, - true)); - add( - new RaftLogManagerTester( - new EmptyContentLog(lastIndex - 1, 4), - lastIndex - 2, - lastTerm - 2, - lastIndex, - lastIndex - 1, - lastIndex - 1, - true)); - } - }; - for (RaftLogManagerTester test : tests) { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(0, 0)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - instance.append(previousEntries); - instance.commitTo(commit); - assertEquals( - test.testLastIndex, - instance.maybeAppend(test.lastIndex, test.lastTerm, test.leaderCommit, test.entry)); - assertEquals(test.testCommitIndex, instance.getCommitLogIndex()); - if (test.testAppend) { - assertTrue(instance.matchTerm(test.entry.getCurrLogTerm(), test.entry.getCurrLogIndex())); - } - } finally { - instance.close(); - } - } - } - - @Test - public void testAppendCommitted() throws LogExecutionException { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - - try { - List logs = TestUtils.prepareTestLogs(10); - instance.append(logs); - instance.commitTo(9); - - // committed logs cannot be overwrite - Log log = new EmptyContentLog(9, 10000); - instance.maybeAppend(8, 8, 9, log); - assertNotEquals(log, instance.getEntries(9, 10).get(0)); - instance.maybeAppend(8, 8, 9, Collections.singletonList(log)); - assertNotEquals(log, instance.getEntries(9, 10).get(0)); - } finally { - instance.close(); - } - } - - @Test - public void testInnerDeleteLogs() { - int minNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig().getMinNumOfLogsInMem(); - int maxNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem(); - ClusterDescriptor.getInstance().getConfig().setMaxNumOfLogsInMem(10); - ClusterDescriptor.getInstance().getConfig().setMinNumOfLogsInMem(10); - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - List logs = TestUtils.prepareTestLogs(20); - - try { - instance.append(logs.subList(0, 15)); - instance.maybeCommit(14, 14); - while (instance.getMaxHaveAppliedCommitIndex() < 14) { - // wait - } - instance.append(logs.subList(15, 20)); - instance.maybeCommit(19, 19); - - List entries = instance.getEntries(0, 20); - assertEquals(logs.subList(10, 20), entries); - } finally { - instance.close(); - ClusterDescriptor.getInstance().getConfig().setMaxNumOfLogsInMem(maxNumOfLogsInMem); - ClusterDescriptor.getInstance().getConfig().setMinNumOfLogsInMem(minNumOfLogsInMem); - } - } - - @Test - public void testInnerDeleteLogsWithLargeLog() { - long maxMemSize = ClusterDescriptor.getInstance().getConfig().getMaxMemorySizeForRaftLog(); - int minNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig().getMinNumOfLogsInMem(); - int maxNumOfLogsInMem = ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem(); - ClusterDescriptor.getInstance().getConfig().setMaxNumOfLogsInMem(10); - ClusterDescriptor.getInstance().getConfig().setMinNumOfLogsInMem(10); - ClusterDescriptor.getInstance().getConfig().setMaxMemorySizeForRaftLog(1024 * 56); - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - List logs = TestUtils.prepareLargeTestLogs(12); - - try { - instance.append(logs.subList(0, 7)); - instance.maybeCommit(6, 6); - while (instance.getMaxHaveAppliedCommitIndex() < 6) { - // wait - } - instance.append(logs.subList(7, 12)); - instance.maybeCommit(11, 11); - - List entries = instance.getEntries(0, 12); - assertEquals(logs.subList(5, 12), entries); - } finally { - instance.close(); - ClusterDescriptor.getInstance().getConfig().setMaxNumOfLogsInMem(maxNumOfLogsInMem); - ClusterDescriptor.getInstance().getConfig().setMinNumOfLogsInMem(minNumOfLogsInMem); - ClusterDescriptor.getInstance().getConfig().setMaxMemorySizeForRaftLog(maxMemSize); - } - } - - @Test - @SuppressWarnings("java:S2925") - public void testReapplyBlockedLogs() throws LogExecutionException, InterruptedException { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - List logs = TestUtils.prepareTestLogs(20); - - try { - instance.append(logs.subList(0, 10)); - instance.commitTo(10); - instance.setBlockAppliedCommitIndex(9); - - blocked = false; - // as [0, 10) are blocked and we require a block index of 9, [10, 20) should be added to the - // blocked list - instance.append(logs.subList(10, 20)); - instance.commitTo(20); - while (instance.getMaxHaveAppliedCommitIndex() < 9) { - // wait until [0, 10) are applied - } - Thread.sleep(200); - // [10, 20) can still not be applied because we do not call `resetBlockAppliedCommitIndex` - for (Log log : logs.subList(10, 20)) { - assertFalse(log.isApplied()); - } - // [10, 20) can be applied now - instance.resetBlockAppliedCommitIndex(); - while (instance.getMaxHaveAppliedCommitIndex() < 19) { - // wait until [10, 20) are applied - } - } finally { - instance.close(); - } - } - - @Test - public void appendBatch() { - class RaftLogManagerTester { - - public List appendingEntries; - public List testEntries; - public long testLastIndexAfterAppend; - public long testOffset; - - public RaftLogManagerTester( - List appendingEntries, - List testEntries, - long testLastIndexAfterAppend, - long testOffset) { - this.appendingEntries = appendingEntries; - this.testEntries = testEntries; - this.testLastIndexAfterAppend = testLastIndexAfterAppend; - this.testOffset = testOffset; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }; - List tests = - new ArrayList() { - { - add( - new RaftLogManagerTester( - new ArrayList<>(), - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - 2, - 3)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 2)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 2)); - } - }, - 3, - 3)); - // conflicts with index 1 - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 2)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - 2, - 3)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(2, 3)); - add(new EmptyContentLog(3, 3)); - } - }, - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - 2, - 3)); - } - }; - for (RaftLogManagerTester test : tests) { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(0, 0)); - try { - committedEntryManager.append(previousEntries); - } catch (Exception e) { - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - instance.append(test.appendingEntries); - try { - List entries = instance.getEntries(1, Integer.MAX_VALUE); - assertEquals(test.testEntries, entries); - assertEquals( - test.testOffset, instance.getUnCommittedEntryManager().getFirstUnCommittedIndex()); - } catch (Exception e) { - fail("An unexpected exception was thrown."); - } finally { - instance.close(); - } - } - } - - @Test - public void appendSingle() { - class RaftLogManagerTester { - - public Log appendingEntry; - public long testLastIndexAfterAppend; - public List testEntries; - public long testOffset; - - public RaftLogManagerTester( - Log appendingEntry, - List testEntries, - long testLastIndexAfterAppend, - long testOffset) { - this.appendingEntry = appendingEntry; - this.testEntries = testEntries; - this.testLastIndexAfterAppend = testLastIndexAfterAppend; - this.testOffset = testOffset; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }; - List tests = - new ArrayList() { - { - add( - new RaftLogManagerTester( - new EmptyContentLog(3, 2), - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 2)); - } - }, - 3, - 3)); - // conflicts with index 1 - add( - new RaftLogManagerTester( - new EmptyContentLog(1, 2), - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - 2, - 3)); - add( - new RaftLogManagerTester( - new EmptyContentLog(2, 3), - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - 2, - 3)); - } - }; - for (RaftLogManagerTester test : tests) { - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(0, 0)); - try { - committedEntryManager.append(previousEntries); - } catch (Exception e) { - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - instance.append(test.appendingEntry); - try { - List entries = instance.getEntries(1, Integer.MAX_VALUE); - assertEquals(test.testEntries, entries); - assertEquals( - test.testOffset, instance.getUnCommittedEntryManager().getFirstUnCommittedIndex()); - } catch (Exception e) { - fail("An unexpected exception was thrown."); - } - } finally { - instance.close(); - } - } - } - - @Test - public void checkBound() { - class RaftLogManagerTester { - - public long low; - public long high; - public Class throwClass; - - public RaftLogManagerTester(long low, long high, Class throwClass) { - this.low = low; - this.high = high; - this.throwClass = throwClass; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - for (long i = 1; i < num / 2; i++) { - long index = i; - try { - committedEntryManager.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } catch (Exception e) { - } - } - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = num / 2; i < num; i++) { - long index = i; - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - List tests = - new ArrayList() { - { - add(new RaftLogManagerTester(offset - 1, offset + 1, EntryCompactedException.class)); - add(new RaftLogManagerTester(offset, offset + 1, EntryCompactedException.class)); - add(new RaftLogManagerTester(offset + 1, offset + 1, null)); - add(new RaftLogManagerTester(offset + 1, offset + 2, null)); - add(new RaftLogManagerTester(half + 1, half + 2, null)); - add(new RaftLogManagerTester(last, last, null)); - add(new RaftLogManagerTester(last + 1, last + 2, null)); - add( - new RaftLogManagerTester( - last + 1, last, GetEntriesWrongParametersException.class)); - add( - new RaftLogManagerTester( - half + 1, half, GetEntriesWrongParametersException.class)); - } - }; - for (RaftLogManagerTester test : tests) { - try { - instance.checkBound(test.low, test.high); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } finally { - instance.close(); - } - } - - @Test - public void applyingSnapshot() throws Exception { - long index = 100; - long term = 100; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(index, term)); - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - instance.applySnapshot(new SimpleSnapshot(index, term)); - assertEquals(instance.getLastLogIndex(), term); - List entries = new ArrayList<>(); - for (int i = 1; i <= 10; i++) { - entries.add(new EmptyContentLog(index + i, index + i)); - } - instance.maybeAppend(index, term, index, entries); - assertEquals(1, instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals(10, instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(100, instance.getCommitLogIndex()); - instance.commitTo(105); - assertEquals(101, instance.getFirstIndex()); - assertEquals(6, instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals(5, instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(105, instance.getCommitLogIndex()); - instance.applySnapshot(new SimpleSnapshot(103, 103)); - assertEquals(104, instance.getFirstIndex()); - assertEquals(3, instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals(5, instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(105, instance.getCommitLogIndex()); - instance.applySnapshot(new SimpleSnapshot(108, 108)); - assertEquals(109, instance.getFirstIndex()); - assertEquals(1, instance.getCommittedEntryManager().getAllEntries().size()); - assertEquals(0, instance.getUnCommittedEntryManager().getAllEntries().size()); - assertEquals(108, instance.getCommitLogIndex()); - } finally { - instance.close(); - } - } - - @Test - public void getEntries() throws TruncateCommittedEntryException { - class RaftLogManagerTester { - - public long low; - public long high; - public List testEntries; - public Class throwClass; - - public RaftLogManagerTester(long low, long high, List testEntries, Class throwClass) { - this.low = low; - this.high = high; - this.testEntries = testEntries; - this.throwClass = throwClass; - } - } - long offset = 100; - long num = 100; - long half = offset + num / 2; - long last = offset + num; - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - committedEntryManager.applyingSnapshot(new SimpleSnapshot(offset, offset)); - List logs = new ArrayList<>(); - for (long i = 1; i < num / 2; i++) { - logs.add(new EmptyContentLog(offset + i, offset + i)); - } - committedEntryManager.append(logs); - - RaftLogManager instance = - new TestRaftLogManager( - committedEntryManager, new SyncLogDequeSerializer(testIdentifier), logApplier); - try { - for (long i = num / 2; i < num; i++) { - long index = i; - logs.add(new EmptyContentLog(offset + index, offset + index)); - instance.append( - new ArrayList() { - { - add(new EmptyContentLog(offset + index, offset + index)); - } - }); - } - instance.append(logs.subList((int) num / 2 - 1, (int) num - 1)); - - List tests = - new ArrayList() { - { - add(new RaftLogManagerTester(offset + 1, offset + 1, new ArrayList<>(), null)); - add( - new RaftLogManagerTester( - offset + 1, - offset + 2, - new ArrayList() { - { - add(new EmptyContentLog(offset + 1, offset + 1)); - } - }, - null)); - add( - new RaftLogManagerTester( - half - 1, - half + 1, - new ArrayList() { - { - add(new EmptyContentLog(half - 1, half - 1)); - add(new EmptyContentLog(half, half)); - } - }, - null)); - add( - new RaftLogManagerTester( - half, - half + 1, - new ArrayList() { - { - add(new EmptyContentLog(half, half)); - } - }, - null)); - add( - new RaftLogManagerTester( - last - 1, - last, - new ArrayList() { - { - add(new EmptyContentLog(last - 1, last - 1)); - } - }, - null)); - // test EntryUnavailable - add( - new RaftLogManagerTester( - last - 1, - last + 1, - new ArrayList() { - { - add(new EmptyContentLog(last - 1, last - 1)); - } - }, - null)); - add(new RaftLogManagerTester(last, last + 1, new ArrayList<>(), null)); - add(new RaftLogManagerTester(last + 1, last + 2, new ArrayList<>(), null)); - // test GetEntriesWrongParametersException - add(new RaftLogManagerTester(offset + 1, offset, Collections.emptyList(), null)); - // test EntryCompactedException - add(new RaftLogManagerTester(offset - 1, offset + 1, Collections.emptyList(), null)); - add(new RaftLogManagerTester(offset, offset + 1, Collections.emptyList(), null)); - } - }; - for (RaftLogManagerTester test : tests) { - try { - List answer = instance.getEntries(test.low, test.high); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testEntries, answer); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } finally { - instance.close(); - } - } - - @Test - public void findConflict() { - class RaftLogManagerTester { - - public List conflictEntries; - public long testConflict; - - public RaftLogManagerTester(List conflictEntries, long testConflict) { - this.conflictEntries = conflictEntries; - this.testConflict = testConflict; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(0, 0)); - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }; - RaftLogManager instance = - new TestRaftLogManager( - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()), - new SyncLogDequeSerializer(testIdentifier), - logApplier); - try { - instance.append(previousEntries); - List tests = - new ArrayList() { - { - // no conflict, empty ent - add(new RaftLogManagerTester(new ArrayList<>(), -1)); - // no conflict - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(0, 0)); - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - -1)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }, - -1)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(2, 2)); - } - }, - -1)); - // no conflict, but has new entries - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(0, 0)); - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 3)); - } - }, - 3)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 3)); - } - }, - 3)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(2, 2)); - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 3)); - } - }, - 3)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(3, 3)); - add(new EmptyContentLog(4, 3)); - } - }, - 3)); - // conflicts with existing entries - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(0, 4)); - add(new EmptyContentLog(1, 4)); - } - }, - 0)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(1, 2)); - add(new EmptyContentLog(2, 4)); - add(new EmptyContentLog(3, 4)); - } - }, - 1)); - add( - new RaftLogManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(2, 1)); - add(new EmptyContentLog(3, 2)); - add(new EmptyContentLog(4, 4)); - add(new EmptyContentLog(5, 4)); - } - }, - 2)); - } - }; - for (RaftLogManagerTester test : tests) { - assertEquals(test.testConflict, instance.findConflict(test.conflictEntries)); - } - } finally { - instance.close(); - } - } - - @Test - public void isLogUpToDate() { - class RaftLogManagerTester { - - public long lastIndex; - public long lastTerm; - public boolean isUpToDate; - - public RaftLogManagerTester(long lastIndex, long lastTerm, boolean isUpToDate) { - this.lastIndex = lastIndex; - this.lastTerm = lastTerm; - this.isUpToDate = isUpToDate; - } - } - List previousEntries = - new ArrayList() { - { - add(new EmptyContentLog(0, 0)); - add(new EmptyContentLog(1, 1)); - add(new EmptyContentLog(2, 2)); - } - }; - RaftLogManager instance = - new TestRaftLogManager( - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()), - new SyncLogDequeSerializer(testIdentifier), - logApplier); - try { - instance.append(previousEntries); - List tests = - new ArrayList() { - { - // greater term, ignore lastIndex - add(new RaftLogManagerTester(instance.getLastLogIndex() - 1, 3, true)); - add(new RaftLogManagerTester(instance.getLastLogIndex(), 3, true)); - add(new RaftLogManagerTester(instance.getLastLogIndex() + 1, 3, true)); - // smaller term, ignore lastIndex - add(new RaftLogManagerTester(instance.getLastLogIndex() - 1, 1, false)); - add(new RaftLogManagerTester(instance.getLastLogIndex(), 1, false)); - add(new RaftLogManagerTester(instance.getLastLogIndex() + 1, 1, false)); - // equal term, equal or lager lastIndex wins - add(new RaftLogManagerTester(instance.getLastLogIndex() - 1, 2, false)); - add(new RaftLogManagerTester(instance.getLastLogIndex(), 2, true)); - add(new RaftLogManagerTester(instance.getLastLogIndex() + 1, 2, true)); - } - }; - for (RaftLogManagerTester test : tests) { - assertEquals(test.isUpToDate, instance.isLogUpToDate(test.lastTerm, test.lastIndex)); - } - } finally { - instance.close(); - } - } - - @Test - public void testCheckDeleteLog() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager raftLogManager = - new TestRaftLogManager(committedEntryManager, syncLogDequeSerializer, logApplier); - // prevent the commit checker thread from modifying max applied index - blocked = true; - - int minNumberOfLogs = 100; - List testLogs1; - - raftLogManager.setMinNumOfLogsInMem(minNumberOfLogs); - testLogs1 = TestUtils.prepareNodeLogs(130); - raftLogManager.append(testLogs1); - Log lastLog = testLogs1.get(testLogs1.size() - 1); - raftLogManager.setMaxHaveAppliedCommitIndex(100); - try { - raftLogManager.commitTo(lastLog.getCurrLogIndex()); - } catch (LogExecutionException e) { - Assert.fail(e.toString()); - } - - assertEquals(130, committedEntryManager.getTotalSize()); - - // the maxHaveAppliedCommitIndex is smaller than 130-minNumberOfLogs - long remainNumber = 130 - 20; - raftLogManager.setMaxHaveAppliedCommitIndex(20); - raftLogManager.checkDeleteLog(); - assertEquals(remainNumber, committedEntryManager.getTotalSize()); - - raftLogManager.setMaxHaveAppliedCommitIndex(100); - raftLogManager.checkDeleteLog(); - assertEquals(minNumberOfLogs, committedEntryManager.getTotalSize()); - - raftLogManager.close(); - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List logs = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - assertEquals(30, logs.size()); - for (int i = 0; i < logs.size(); i++) { - assertEquals(testLogs1.get(i + 100), logs.get(i)); - } - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testApplyAllCommittedLogWhenStartUp() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager raftLogManager = - new TestRaftLogManager(committedEntryManager, syncLogDequeSerializer, logApplier); - try { - int maxNumberOfLogs = 100; - List testLogs1; - raftLogManager.setMinNumOfLogsInMem(maxNumberOfLogs); - testLogs1 = TestUtils.prepareNodeLogs(130); - raftLogManager.append(testLogs1); - try { - raftLogManager.commitTo(testLogs1.get(testLogs1.size() - 1).getCurrLogIndex()); - } catch (LogExecutionException e) { - Assert.fail(e.toString()); - } - // wait log is applied - long startTime = System.currentTimeMillis(); - for (Log log : testLogs1) { - while (!log.isApplied()) { - if ((System.currentTimeMillis() - startTime) > 60_000) { - fail( - String.format( - "apply log %s time out after %d", - log, (System.currentTimeMillis() - startTime))); - } - } - } - assertEquals(testLogs1.size(), appliedLogs.size()); - for (Log log : testLogs1) { - assertTrue(appliedLogs.containsKey(log.getCurrLogIndex())); - assertEquals(log, appliedLogs.get(log.getCurrLogIndex())); - } - - raftLogManager.setMaxHaveAppliedCommitIndex(100); - raftLogManager.checkDeleteLog(); - assertEquals(maxNumberOfLogs, committedEntryManager.getTotalSize()); - // assertEquals(maxNumberOfLogs, syncLogDequeSerializer.getLogSizeDeque().size()); - raftLogManager.close(); - - raftLogManager = - new TestRaftLogManager(committedEntryManager, syncLogDequeSerializer, logApplier); - raftLogManager.applyAllCommittedLogWhenStartUp(); - assertEquals(appliedLogs.size(), testLogs1.size()); - for (Log log : testLogs1) { - assertTrue(appliedLogs.containsKey(log.getCurrLogIndex())); - assertEquals(log, appliedLogs.get(log.getCurrLogIndex())); - } - } finally { - raftLogManager.close(); - } - } - - @Test - public void testCheckAppliedLogIndex() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - CommittedEntryManager committedEntryManager = - new CommittedEntryManager( - ClusterDescriptor.getInstance().getConfig().getMaxNumOfLogsInMem()); - RaftLogManager raftLogManager = - new TestRaftLogManager(committedEntryManager, syncLogDequeSerializer, logApplier); - - try { - int minNumberOfLogs = 100; - List testLogs1; - raftLogManager.setMinNumOfLogsInMem(minNumberOfLogs); - testLogs1 = TestUtils.prepareNodeLogs(130); - raftLogManager.append(testLogs1); - - try { - raftLogManager.commitTo(testLogs1.get(testLogs1.size() - 1 - 30).getCurrLogIndex()); - } catch (LogExecutionException e) { - Assert.fail(e.getMessage()); - } - // wait log is applied - long startTime = System.currentTimeMillis(); - for (int i = 0; i < testLogs1.size() - 30; i++) { - while (!testLogs1.get(i).isApplied()) { - if ((System.currentTimeMillis() - startTime) > 60_000) { - Assert.fail("apply log time out"); - break; - } - } - } - - assertEquals(testLogs1.size() - 30, appliedLogs.size()); - for (Log log : testLogs1.subList(0, testLogs1.size() - 30)) { - assertTrue(appliedLogs.containsKey(log.getCurrLogIndex())); - assertEquals(log, appliedLogs.get(log.getCurrLogIndex())); - } - - raftLogManager.setMaxHaveAppliedCommitIndex( - testLogs1.get(testLogs1.size() - 1 - 30).getCurrLogIndex()); - assertEquals( - raftLogManager.getCommitLogIndex(), raftLogManager.getMaxHaveAppliedCommitIndex()); - - raftLogManager.checkDeleteLog(); - assertEquals(minNumberOfLogs, committedEntryManager.getTotalSize()); - - for (int i = testLogs1.size() - 30; i < testLogs1.size(); i++) { - try { - raftLogManager.commitTo(i); - } catch (LogExecutionException e) { - Assert.fail(e.toString()); - } - while (!testLogs1.get(i).isApplied()) { - if ((System.currentTimeMillis() - startTime) > 60_000) { - Assert.fail("apply log time out"); - break; - } - } - raftLogManager.doCheckAppliedLogIndex(); - assertEquals(raftLogManager.getMaxHaveAppliedCommitIndex(), i); - } - } finally { - raftLogManager.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManagerTest.java deleted file mode 100644 index 7b847a55e159..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/UnCommittedEntryManagerTest.java +++ /dev/null @@ -1,695 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.manage; - -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.logtypes.EmptyContentLog; -import org.apache.iotdb.cluster.log.snapshot.SimpleSnapshot; - -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -public class UnCommittedEntryManagerTest { - - static class UnCommitEntryManagerTesterBase { - - public List entries; - public long offset; - - public UnCommitEntryManagerTesterBase(List entries, long offset) { - this.entries = entries; - this.offset = offset; - } - } - - @Test - public void getFirstUnCommittedIndex() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public long testOffset; - - public UnCommittedEntryManagerTester(List entries, long offset, long testOffset) { - super(entries, offset); - this.testOffset = testOffset; - } - } - List tests = - new ArrayList() { - { - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - } - }, - 0, - 0)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - } - }, - 5, - 5)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(6, 1)); - } - }, - 5, - 5)); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - long index = instance.getFirstUnCommittedIndex(); - assertEquals(test.testOffset, index); - } - } - - @Test - public void maybeLastIndex() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public long testIndex; - - public UnCommittedEntryManagerTester(List entries, long offset, long testIndex) { - super(entries, offset); - this.testIndex = testIndex; - } - } - List tests = - new ArrayList() { - { - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - } - }, - 0, - -1)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - 5)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - } - }, - 5, - 6)); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - long index = instance.maybeLastIndex(); - assertEquals(test.testIndex, index); - } - } - - @Test - public void maybeTerm() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public long index; - public long testTerm; - public Class throwClass; - - public UnCommittedEntryManagerTester( - List entries, long offset, long index, long testTerm, Class throwClass) { - super(entries, offset); - this.index = index; - this.testTerm = testTerm; - this.throwClass = throwClass; - } - } - List tests = - new ArrayList() { - { - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - 5, - 1, - null)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - 4, - -1, - null)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 4)); - } - }, - 5, - 5, - 1, - null)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 4)); - } - }, - 5, - 6, - 4, - null)); - // entries that have been committed; - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 4)); - } - }, - 5, - 4, - -1, - null)); - // entries which are unavailable. - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - } - }, - 0, - 0, - -1, - null)); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - 6, - -1, - null)); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - try { - long term = instance.maybeTerm(test.index); - if (test.throwClass != null) { - fail("The expected exception is not thrown"); - } else { - assertEquals(test.testTerm, term); - } - } catch (Exception e) { - if (!e.getClass().getName().equals(test.throwClass.getName())) { - fail("An unexpected exception was thrown."); - } - } - } - } - - @Test - public void stableTo() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public long index; - public long testOffset; - public long testLen; - - public UnCommittedEntryManagerTester( - List entries, long offset, long index, long testOffset, long testLen) { - super(entries, offset); - this.index = index; - this.testOffset = testOffset; - this.testLen = testLen; - } - } - List tests = - new ArrayList() { - { - // stable to the first entry - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - 5, - 6, - 0)); - // stable to the first entry - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - } - }, - 5, - 5, - 6, - 1)); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - instance.stableTo(test.index); - assertEquals(test.testOffset, instance.getFirstUnCommittedIndex()); - assertEquals(test.testLen, instance.getAllEntries().size()); - } - } - - @Test - public void applyingSnapshot() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public Snapshot snapshot; - public long testOffset; - - public UnCommittedEntryManagerTester( - List entries, long offset, Snapshot snapshot, long testOffset) { - super(entries, offset); - this.snapshot = snapshot; - this.testOffset = testOffset; - } - } - List tests = - new ArrayList() { - { - // empty entries - add( - new UnCommittedEntryManagerTester( - new ArrayList<>(), 5, new SimpleSnapshot(6, 6), 7)); - // normal case - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new SimpleSnapshot(20, 20), - 21)); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - instance.applyingSnapshot(test.snapshot); - assertEquals(test.testOffset, instance.getFirstUnCommittedIndex()); - assertEquals(0, instance.getAllEntries().size()); - } - } - - @Test - public void truncateAndAppendSingle() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public Log toAppend; - public long testOffset; - public List testEntries; - - public UnCommittedEntryManagerTester( - List entries, long offset, Log toAppend, long testOffset, List testEntries) { - super(entries, offset); - this.toAppend = toAppend; - this.testOffset = testOffset; - this.testEntries = testEntries; - } - } - List tests = - new ArrayList() { - { - // append to the end - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new EmptyContentLog(6, 1), - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - } - })); - // replace the uncommitted entries - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new EmptyContentLog(5, 2), - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 2)); - } - })); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new EmptyContentLog(4, 2), - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - })); - // truncate the existing entries and append - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - }, - 5, - new EmptyContentLog(6, 2), - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 2)); - } - })); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - }, - 5, - new EmptyContentLog(7, 2), - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 2)); - } - })); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - instance.truncateAndAppend(test.toAppend); - assertEquals(test.testOffset, instance.getFirstUnCommittedIndex()); - assertEquals(test.testEntries, instance.getAllEntries()); - } - } - - @Test - public void truncateAndAppendBatch() { - class UnCommittedEntryManagerTester extends UnCommitEntryManagerTesterBase { - - public List toAppend; - public long testOffset; - public List testEntries; - - public UnCommittedEntryManagerTester( - List entries, - long offset, - List toAppend, - long testOffset, - List testEntries) { - super(entries, offset); - this.toAppend = toAppend; - this.testOffset = testOffset; - this.testEntries = testEntries; - } - } - List tests = - new ArrayList() { - { - // append to the end - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - })); - // replace the uncommitted entries - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 2)); - add(new EmptyContentLog(6, 2)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 2)); - add(new EmptyContentLog(6, 2)); - } - })); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(4, 2)); - add(new EmptyContentLog(5, 2)); - add(new EmptyContentLog(6, 2)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - } - })); - // truncate the existing entries and append - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(6, 2)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 2)); - } - })); - add( - new UnCommittedEntryManagerTester( - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 1)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(7, 2)); - add(new EmptyContentLog(8, 2)); - } - }, - 5, - new ArrayList() { - { - add(new EmptyContentLog(5, 1)); - add(new EmptyContentLog(6, 1)); - add(new EmptyContentLog(7, 2)); - add(new EmptyContentLog(8, 2)); - } - })); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - UnCommittedEntryManager instance = new UnCommittedEntryManager(test.offset, test.entries); - instance.truncateAndAppend(test.toAppend); - assertEquals(test.testOffset, instance.getFirstUnCommittedIndex()); - assertEquals(test.testEntries, instance.getAllEntries()); - } - } - - @Test - public void getEntries() { - class UnCommittedEntryManagerTester { - - public long low; - public long high; - public List testEntries; - - public UnCommittedEntryManagerTester(long low, long high, List testEntries) { - this.low = low; - this.high = high; - this.testEntries = testEntries; - } - } - long offset = 100; - long num = 100; - long last = offset + num; - List entries = new ArrayList<>(); - for (int i = 0; i < num; i++) { - entries.add(new EmptyContentLog(offset + i, offset + i)); - } - UnCommittedEntryManager instance = new UnCommittedEntryManager(offset, entries); - List tests = - new ArrayList() { - { - add(new UnCommittedEntryManagerTester(offset, offset + num, entries)); - add( - new UnCommittedEntryManagerTester( - offset - 1, - offset + 1, - new ArrayList() { - { - add(new EmptyContentLog(offset, offset)); - } - })); - add( - new UnCommittedEntryManagerTester( - offset, - offset + 1, - new ArrayList() { - { - add(new EmptyContentLog(offset, offset)); - } - })); - add( - new UnCommittedEntryManagerTester( - last - 1, - last, - new ArrayList() { - { - add(new EmptyContentLog(last - 1, last - 1)); - } - })); - add( - new UnCommittedEntryManagerTester( - last - 1, - last + 1, - new ArrayList() { - { - add(new EmptyContentLog(last - 1, last - 1)); - } - })); - add(new UnCommittedEntryManagerTester(offset, offset, new ArrayList<>())); - add(new UnCommittedEntryManagerTester(last, last + 1, new ArrayList<>())); - add(new UnCommittedEntryManagerTester(last + 1, last + 1, new ArrayList<>())); - } - }; - for (UnCommittedEntryManagerTester test : tests) { - List answer = instance.getEntries(test.low, test.high); - assertEquals(test.testEntries, answer); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializerTest.java deleted file mode 100644 index 0277d3ebf852..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/manage/serializable/SyncLogDequeSerializerTest.java +++ /dev/null @@ -1,612 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.log.manage.serializable; - -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.HardState; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.commons.file.SystemFileFactory; -import org.apache.iotdb.tsfile.utils.Pair; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.nio.BufferOverflowException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Properties; - -import static org.apache.iotdb.commons.conf.IoTDBConstant.FILE_NAME_SEPARATOR; - -public class SyncLogDequeSerializerTest extends IoTDBTest { - - private int testIdentifier = 1; - int maxPersistLogFileNumber = 3; - List testLogs1 = TestUtils.prepareNodeLogs(40); - - public void prepareFiles(SyncLogDequeSerializer syncLogDequeSerializer) { - try { - int oneLogSize = testLogs1.get(0).serialize().capacity(); - - syncLogDequeSerializer.setMaxRaftLogPersistDataSizePerFile(oneLogSize * 9); - // set max log file number - syncLogDequeSerializer.setMaxNumberOfPersistRaftLogFiles(maxPersistLogFileNumber); - // make sure every put should check the file size - ByteBuffer buffer = ByteBuffer.allocate(oneLogSize + 10); - syncLogDequeSerializer.setLogDataBuffer(buffer); - - // file1: 0-8 - syncLogDequeSerializer.append(testLogs1.subList(0, 10), 0); - testLogDataAndLogIndexEqual(syncLogDequeSerializer); - Assert.assertEquals(2, syncLogDequeSerializer.getLogDataFileList().size()); - File file = - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 2); - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - Assert.assertEquals(0, Long.parseLong(splits[0])); - Assert.assertEquals(8, Long.parseLong(splits[1])); - - // file2: 9-17 - syncLogDequeSerializer.append(testLogs1.subList(10, 20), 0); - testLogDataAndLogIndexEqual(syncLogDequeSerializer); - Assert.assertEquals(3, syncLogDequeSerializer.getLogDataFileList().size()); - file = - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 2); - splits = file.getName().split(FILE_NAME_SEPARATOR); - Assert.assertEquals(9, Long.parseLong(splits[0])); - Assert.assertEquals(17, Long.parseLong(splits[1])); - - // file3: 18-26 - syncLogDequeSerializer.append(testLogs1.subList(20, 30), 0); - testLogDataAndLogIndexEqual(syncLogDequeSerializer); - Assert.assertEquals(4, syncLogDequeSerializer.getLogDataFileList().size()); - file = - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 2); - splits = file.getName().split(FILE_NAME_SEPARATOR); - Assert.assertEquals(18, Long.parseLong(splits[0])); - Assert.assertEquals(26, Long.parseLong(splits[1])); - - // file4: 27-35 - syncLogDequeSerializer.append(testLogs1.subList(30, 40), 0); - testLogDataAndLogIndexEqual(syncLogDequeSerializer); - Assert.assertEquals(5, syncLogDequeSerializer.getLogDataFileList().size()); - file = - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 2); - splits = file.getName().split(FILE_NAME_SEPARATOR); - Assert.assertEquals(27, Long.parseLong(splits[0])); - Assert.assertEquals(35, Long.parseLong(splits[1])); - - // file5:36-Long.MAX_VALUE. check the last one log file - file = - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 1); - splits = file.getName().split(FILE_NAME_SEPARATOR); - Assert.assertEquals(36, Long.parseLong(splits[0])); - Assert.assertEquals(Long.MAX_VALUE, Long.parseLong(splits[1])); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - } - - @Test - public void testAppend() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - prepareFiles(syncLogDequeSerializer); - - // check the max log file number, the first one will be removed - syncLogDequeSerializer.checkDeletePersistRaftLog(); - testLogDataAndLogIndexEqual(syncLogDequeSerializer); - Assert.assertEquals( - maxPersistLogFileNumber, syncLogDequeSerializer.getLogDataFileList().size()); - File file = syncLogDequeSerializer.getLogDataFileList().get(0); - String[] splits = file.getName().split(FILE_NAME_SEPARATOR); - // after delete log file, the first log data file is file3 - Assert.assertEquals(18, Long.parseLong(splits[0])); - Assert.assertEquals(26, Long.parseLong(splits[1])); - } finally { - syncLogDequeSerializer.close(); - } - } - - private void testLogDataAndLogIndexEqual(SyncLogDequeSerializer syncLogDequeSerializer) { - Assert.assertEquals( - syncLogDequeSerializer.getLogDataFileList().size(), - syncLogDequeSerializer.getLogIndexFileList().size()); - - for (int i = 0; i < syncLogDequeSerializer.getLogDataFileList().size(); i++) { - String[] logDataSplits = - syncLogDequeSerializer.getLogDataFileList().get(i).getName().split(FILE_NAME_SEPARATOR); - String[] logIndexSplits = - syncLogDequeSerializer.getLogIndexFileList().get(i).getName().split(FILE_NAME_SEPARATOR); - - Assert.assertEquals(logDataSplits.length, logIndexSplits.length); - - // start log index - Assert.assertEquals(Long.parseLong(logDataSplits[0]), Long.parseLong(logIndexSplits[0])); - // end log index - Assert.assertEquals(Long.parseLong(logDataSplits[1]), Long.parseLong(logIndexSplits[1])); - // version - Assert.assertEquals(Long.parseLong(logDataSplits[2]), Long.parseLong(logIndexSplits[2])); - } - } - - @Test - public void testGetLogs() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - prepareFiles(syncLogDequeSerializer); - try { - List logList = syncLogDequeSerializer.getLogs(0, 10); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i), logList.get(i)); - } - - logList = syncLogDequeSerializer.getLogs(10, 20); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i + 10), logList.get(i)); - } - - logList = syncLogDequeSerializer.getLogs(0, 40); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i), logList.get(i)); - } - - // the max size of testLogs1 is 40 - logList = syncLogDequeSerializer.getLogs(40, 100); - Assert.assertTrue(logList.isEmpty()); - - logList = syncLogDequeSerializer.getLogs(20, 30); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i + 20), logList.get(i)); - } - - logList = syncLogDequeSerializer.getLogs(30, 20); - Assert.assertTrue(logList.isEmpty()); - - logList = syncLogDequeSerializer.getLogs(40, 40); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i + 40), logList.get(i)); - } - - logList = syncLogDequeSerializer.getLogs(0, 0); - for (int i = 0; i < logList.size(); i++) { - Assert.assertEquals(testLogs1.get(i), logList.get(i)); - } - - logList = syncLogDequeSerializer.getLogs(-1, 0); - Assert.assertTrue(logList.isEmpty()); - - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testGetLogIndexFile() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - prepareFiles(syncLogDequeSerializer); - - Pair> fileStartAndEndIndex; - for (int i = 0; i < 9; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogIndexFile(i); - Assert.assertEquals( - syncLogDequeSerializer.getLogIndexFileList().get(0), fileStartAndEndIndex.left); - Assert.assertEquals(0L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(8L, fileStartAndEndIndex.right.right.longValue()); - } - - for (int i = 9; i < 18; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogIndexFile(i); - Assert.assertEquals( - syncLogDequeSerializer.getLogIndexFileList().get(1), fileStartAndEndIndex.left); - Assert.assertEquals(9L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(17L, fileStartAndEndIndex.right.right.longValue()); - } - - for (int i = 36; i < 40; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogIndexFile(i); - int lastLogFile = syncLogDequeSerializer.getLogIndexFileList().size() - 1; - Assert.assertEquals( - syncLogDequeSerializer.getLogIndexFileList().get(lastLogFile), - fileStartAndEndIndex.left); - Assert.assertEquals(36L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(Long.MAX_VALUE, fileStartAndEndIndex.right.right.longValue()); - } - - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testGetLogDataFile() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - prepareFiles(syncLogDequeSerializer); - - Pair> fileStartAndEndIndex; - for (int i = 0; i < 9; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogDataFile(i); - Assert.assertEquals( - syncLogDequeSerializer.getLogDataFileList().get(0), fileStartAndEndIndex.left); - Assert.assertEquals(0L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(8L, fileStartAndEndIndex.right.right.longValue()); - } - - for (int i = 9; i < 18; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogDataFile(i); - Assert.assertEquals( - syncLogDequeSerializer.getLogDataFileList().get(1), fileStartAndEndIndex.left); - Assert.assertEquals(9L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(17L, fileStartAndEndIndex.right.right.longValue()); - } - - for (int i = 36; i < 40; i++) { - fileStartAndEndIndex = syncLogDequeSerializer.getLogDataFile(i); - int lastLogFile = syncLogDequeSerializer.getLogDataFileList().size() - 1; - Assert.assertEquals( - syncLogDequeSerializer.getLogDataFileList().get(lastLogFile), - fileStartAndEndIndex.left); - Assert.assertEquals(36L, fileStartAndEndIndex.right.left.longValue()); - Assert.assertEquals(Long.MAX_VALUE, fileStartAndEndIndex.right.right.longValue()); - } - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testAppendOverflow() { - int raftLogBufferSize = ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize(); - ClusterDescriptor.getInstance().getConfig().setRaftLogBufferSize(0); - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List testLogs1 = TestUtils.prepareNodeLogs(10); - try { - syncLogDequeSerializer.append(testLogs1, 0); - Assert.fail("No exception thrown"); - } catch (IOException e) { - Assert.assertTrue(e.getCause() instanceof BufferOverflowException); - } - } finally { - ClusterDescriptor.getInstance().getConfig().setRaftLogBufferSize(raftLogBufferSize); - syncLogDequeSerializer.close(); - } - } - - @Test - public void testGetOffsetAccordingToLogIndex() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - prepareFiles(syncLogDequeSerializer); - int currentOffset = 0; - for (int i = 0; i < testLogs1.size(); i++) { - long offset = syncLogDequeSerializer.getOffsetAccordingToLogIndex(i); - if (i % 9 == 0) { - currentOffset = 0; - } - Assert.assertEquals(currentOffset, offset); - currentOffset += 4 + testLogs1.get(i).serialize().capacity(); - } - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testRecoveryForClose() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 7; - List testLogs1 = TestUtils.prepareNodeLogs(logNum); - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } finally { - syncLogDequeSerializer.close(); - } - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List logDeque = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - int expectSize = - (int) testLogs1.get(testLogs1.size() - 1).getCurrLogIndex() - - maxHaveAppliedCommitIndex - + 1; - Assert.assertEquals(expectSize, logDeque.size()); - for (int i = maxHaveAppliedCommitIndex; i < logNum; i++) { - Assert.assertEquals(testLogs1.get(i), logDeque.get(i - maxHaveAppliedCommitIndex)); - } - Assert.assertEquals(hardState, syncLogDequeSerializer.getHardState()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testRecoveryForNotClose() { - Properties pop = System.getProperties(); - String osName = pop.getProperty("os.name"); - // for window os, skip the test because windows do not support reopen a file which is already - // opened. - if (osName.contains("Windows")) { - return; - } - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 7; - List testLogs1 = TestUtils.prepareNodeLogs(logNum); - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - syncLogDequeSerializer.forceFlushLogBuffer(); - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List logDeque = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - int expectSize = - (int) testLogs1.get(testLogs1.size() - 1).getCurrLogIndex() - - maxHaveAppliedCommitIndex - + 1; - Assert.assertEquals(expectSize, logDeque.size()); - for (int i = maxHaveAppliedCommitIndex; i < logNum; i++) { - Assert.assertEquals(testLogs1.get(i), logDeque.get(i - maxHaveAppliedCommitIndex)); - } - Assert.assertEquals(hardState, syncLogDequeSerializer.getHardState()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testRecoveryForNotCloseAndLoseData() { - // TODO-Cluster: do it more elegantly - Properties pop = System.getProperties(); - String osName = pop.getProperty("os.name"); - // for window os, skip the test because windows do not support reopen a file which is already - // opened. - if (osName.contains("Windows")) { - return; - } - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 20; - int maxHaveAppliedCommitIndex = 7; - List testLogs1 = TestUtils.prepareNodeLogs(logNum); - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - syncLogDequeSerializer.append(testLogs1.subList(0, 10), maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - syncLogDequeSerializer.forceFlushLogBuffer(); - - // add more logs, bug this logs will lost for not close - try { - syncLogDequeSerializer.append(testLogs1.subList(10, 20), maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - // all log files are deleted, the first new log file is newly created and the length is 0 - Assert.assertEquals(1, syncLogDequeSerializer.getLogDataFileList().size()); - Assert.assertEquals(1, syncLogDequeSerializer.getLogIndexFileList().size()); - - Assert.assertEquals( - 0, - syncLogDequeSerializer - .getLogDataFileList() - .get(syncLogDequeSerializer.getLogDataFileList().size() - 1) - .length()); - - Assert.assertEquals( - 0, - syncLogDequeSerializer - .getLogIndexFileList() - .get(syncLogDequeSerializer.getLogIndexFileList().size() - 1) - .length()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testRecoveryLogMeta() throws IOException { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 7; - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - testLogs1 = TestUtils.prepareNodeLogs(logNum); - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } finally { - syncLogDequeSerializer.close(); - } - - // recovery - try { - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - Assert.assertEquals( - testLogs1.get(testLogs1.size() - 1).getCurrLogIndex(), - syncLogDequeSerializer.getMeta().getCommitLogIndex()); - Assert.assertEquals( - maxHaveAppliedCommitIndex, - syncLogDequeSerializer.getMeta().getMaxHaveAppliedCommitIndex()); - Assert.assertEquals(hardState, syncLogDequeSerializer.getHardState()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testGetAllEntriesBeforeAppliedIndexEmpty() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 0; - List testLogs1; - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - - testLogs1 = TestUtils.prepareNodeLogs(logNum); - maxHaveAppliedCommitIndex = (int) testLogs1.get(testLogs1.size() - 1).getCurrLogIndex(); - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } finally { - syncLogDequeSerializer.close(); - } - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List logDeque = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - Assert.assertTrue(logDeque.isEmpty()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testGetAllEntriesBeforeAppliedIndexNotEmpty() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 4; - List testLogs1 = null; - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - testLogs1 = TestUtils.prepareNodeLogs(logNum); - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } finally { - syncLogDequeSerializer.close(); - } - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - List logDeque = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - for (int i = 0; i < logDeque.size(); i++) { - Assert.assertEquals(testLogs1.get(i + maxHaveAppliedCommitIndex), logDeque.get(i)); - } - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testDeleteLogs() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - prepareFiles(syncLogDequeSerializer); - int maxLogFile = 2; - syncLogDequeSerializer.setMaxNumberOfPersistRaftLogFiles(maxLogFile); - syncLogDequeSerializer.checkDeletePersistRaftLog(); - Assert.assertEquals(maxLogFile, syncLogDequeSerializer.getLogDataFileList().size()); - Assert.assertEquals(maxLogFile, syncLogDequeSerializer.getLogIndexFileList().size()); - } finally { - syncLogDequeSerializer.close(); - } - } - - @Test - public void testRecoverFromTemp() { - SyncLogDequeSerializer syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - int logNum = 10; - int maxHaveAppliedCommitIndex = 5; - List testLogs1 = TestUtils.prepareNodeLogs(logNum); - ; - HardState hardState = new HardState(); - hardState.setCurrentTerm(10); - hardState.setVoteFor(TestUtils.getNode(5)); - try { - syncLogDequeSerializer.append(testLogs1, maxHaveAppliedCommitIndex); - syncLogDequeSerializer.setHardStateAndFlush(hardState); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } finally { - syncLogDequeSerializer.close(); - } - - // recovery - syncLogDequeSerializer = new SyncLogDequeSerializer(testIdentifier); - try { - String logDir = syncLogDequeSerializer.getLogDir(); - File metaFile = SystemFileFactory.INSTANCE.getFile(logDir + "logMeta"); - File tempMetaFile = SystemFileFactory.INSTANCE.getFile(logDir + "logMeta.tmp"); - metaFile.renameTo(tempMetaFile); - metaFile.createNewFile(); - List logDeque = syncLogDequeSerializer.getAllEntriesAfterAppliedIndex(); - int expectSize = - (int) testLogs1.get(testLogs1.size() - 1).getCurrLogIndex() - - maxHaveAppliedCommitIndex - + 1; - Assert.assertEquals(expectSize, logDeque.size()); - for (int i = maxHaveAppliedCommitIndex; i < logNum; i++) { - Assert.assertEquals(testLogs1.get(i), logDeque.get(i - maxHaveAppliedCommitIndex)); - } - Assert.assertEquals(hardState, syncLogDequeSerializer.getHardState()); - } catch (IOException e) { - Assert.fail(e.getMessage()); - } finally { - syncLogDequeSerializer.close(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java deleted file mode 100644 index a09a34e9d688..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/DataSnapshotTest.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConfig; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TConfiguration; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.transport.TTransport; -import org.junit.After; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; - -public abstract class DataSnapshotTest { - - DataGroupMember dataGroupMember; - MetaGroupMember metaGroupMember; - Coordinator coordinator; - final int failureFrequency = 10; - int failureCnt; - boolean addNetFailure = false; - - private final ClusterConfig config = ClusterDescriptor.getInstance().getConfig(); - private boolean isAsyncServer; - - @Before - public void setUp() throws MetadataException, StartupException { - isAsyncServer = config.isUseAsyncServer(); - config.setUseAsyncServer(true); - dataGroupMember = - new TestDataGroupMember() { - @Override - public AsyncClient getAsyncClient(Node node) { - return new AsyncDataClient(null, null, null) { - @Override - public void readFile( - String filePath, - long offset, - int length, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (addNetFailure && (failureCnt++) % failureFrequency == 0) { - // insert 1 failure in every 10 requests - resultHandler.onError( - new Exception("[Ignore me in Tests] Faked network failure")); - return; - } - try { - resultHandler.onComplete(IOUtils.readFile(filePath, offset, length)); - } catch (IOException e) { - resultHandler.onError(e); - } - }) - .start(); - } - - @Override - public void removeHardLink( - String hardLinkPath, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - } catch (IOException e) { - // ignore - } - }) - .start(); - } - }; - } - - @Override - public Client getSyncClient(Node node) { - return new SyncDataClient( - new TBinaryProtocol( - new TTransport() { - @Override - public boolean isOpen() { - return false; - } - - @Override - public void open() {} - - @Override - public void close() {} - - @Override - public int read(byte[] bytes, int i, int i1) { - return 0; - } - - @Override - public void write(byte[] bytes, int i, int i1) {} - - @Override - public TConfiguration getConfiguration() { - return null; - } - - @Override - public void updateKnownMessageSize(long size) {} - - @Override - public void checkReadBytesAvailable(long numBytes) {} - })) { - @Override - public ByteBuffer readFile(String filePath, long offset, int length) - throws TException { - if (addNetFailure && (failureCnt++) % failureFrequency == 0) { - // simulate failures - throw new TException("[Ignore me in tests] Faked network failure"); - } - try { - return IOUtils.readFile(filePath, offset, length); - } catch (IOException e) { - throw new TException(e); - } - } - }; - } - }; - // do nothing - metaGroupMember = - new TestMetaGroupMember() { - @Override - public void syncLeaderWithConsistencyCheck(boolean isWriteRequest) { - // do nothing - } - }; - coordinator = new Coordinator(metaGroupMember); - metaGroupMember.setCoordinator(coordinator); - metaGroupMember.setPartitionTable(TestUtils.getPartitionTable(10)); - dataGroupMember.setMetaGroupMember(metaGroupMember); - dataGroupMember.setLogManager(new TestLogManager(0)); - EnvironmentUtils.envSetUp(); - for (int i = 0; i < 10; i++) { - IoTDB.schemaProcessor.setStorageGroup(new PartialPath(TestUtils.getTestSg(i))); - } - addNetFailure = false; - } - - @After - public void tearDown() throws Exception { - config.setUseAsyncServer(isAsyncServer); - metaGroupMember.closeLogManager(); - dataGroupMember.closeLogManager(); - metaGroupMember.stop(); - dataGroupMember.stop(); - EnvironmentUtils.cleanEnv(); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshotTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshotTest.java deleted file mode 100644 index a5de19ceb87c..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/FileSnapshotTest.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.RemoteTsFileResource; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.modification.Deletion; -import org.apache.iotdb.db.engine.modification.ModificationFile; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.LoadFileException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.write.WriteProcessException; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class FileSnapshotTest extends DataSnapshotTest { - - @Test - public void testSerialize() throws IOException, WriteProcessException { - FileSnapshot snapshot = new FileSnapshot(); - List dataFiles = new ArrayList<>(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - RemoteTsFileResource dataFile = - new RemoteTsFileResource(tsFileResources.get(i), TestUtils.getNode(i)); - dataFiles.add(dataFile); - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - assertEquals(dataFiles, snapshot.getDataFiles()); - snapshot.setTimeseriesSchemas(timeseriesSchemas); - - assertEquals("FileSnapshot{10 files, 10 series, index-term: 0-0}", snapshot.toString()); - - ByteBuffer buffer = snapshot.serialize(); - - FileSnapshot deserialized = new FileSnapshot(); - deserialized.deserialize(buffer); - assertEquals(snapshot, deserialized); - } - - @Test - public void testInstallSingle() - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException { - testInstallSingle(false); - } - - @Test - public void testInstallSingleWithFailure() - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException { - testInstallSingle(true); - } - - public void testInstallSingle(boolean addNetFailure) - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException { - this.addNetFailure = addNetFailure; - - FileSnapshot snapshot = new FileSnapshot(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - snapshot.setTimeseriesSchemas(timeseriesSchemas); - - SnapshotInstaller defaultInstaller = - snapshot.getDefaultInstaller(dataGroupMember); - dataGroupMember.getSlotManager().setToPulling(0, TestUtils.getNode(0)); - defaultInstaller.install(snapshot, 0, false); - // after installation, the slot should be available again - assertEquals(SlotStatus.NULL, dataGroupMember.getSlotManager().getStatus(0)); - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - - for (TsFileResource tsFileResource : tsFileResources) { - // source files should be deleted after being pulled - assertFalse(tsFileResource.getTsFile().exists()); - } - } - - @Test - public void testInstallSync() - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - - try { - FileSnapshot snapshot = new FileSnapshot(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - snapshot.setTimeseriesSchemas(timeseriesSchemas); - - SnapshotInstaller defaultInstaller = - snapshot.getDefaultInstaller(dataGroupMember); - dataGroupMember.getSlotManager().setToPulling(0, TestUtils.getNode(0)); - defaultInstaller.install(snapshot, 0, false); - // after installation, the slot should be available again - assertEquals(SlotStatus.NULL, dataGroupMember.getSlotManager().getStatus(0)); - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - - for (TsFileResource tsFileResource : tsFileResources) { - // source files should be deleted after being pulled - assertFalse(tsFileResource.getTsFile().exists()); - } - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testInstallWithModFile() - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException { - FileSnapshot snapshot = new FileSnapshot(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - ModificationFile modFile = tsFileResources.get(i).getModFile(); - modFile.write(new Deletion(new PartialPath(TestUtils.getTestSg(0)), 0, 10)); - modFile.close(); - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - snapshot.setTimeseriesSchemas(timeseriesSchemas); - - ByteBuffer buffer = snapshot.serialize(); - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.deserialize(buffer); - - SnapshotInstaller defaultInstaller = - fileSnapshot.getDefaultInstaller(dataGroupMember); - dataGroupMember.getSlotManager().setToPulling(0, TestUtils.getNode(0)); - defaultInstaller.install(fileSnapshot, 0, false); - // after installation, the slot should be available again - assertEquals(SlotStatus.NULL, dataGroupMember.getSlotManager().getStatus(0)); - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - ModificationFile modFile = loadedFiles.get(i).getModFile(); - assertTrue(modFile.exists()); - - Deletion deletion = new Deletion(new PartialPath(TestUtils.getTestSg(0)), 0, 10); - assertTrue(modFile.getModifications().contains(deletion)); - assertEquals(1, modFile.getModifications().size()); - modFile.close(); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - } - - @Test - public void testInstallMultiple() - throws IOException, WriteProcessException, SnapshotInstallationException, - IllegalPathException, StorageEngineException { - Map snapshotMap = new HashMap<>(); - for (int j = 0; j < 10; j++) { - FileSnapshot snapshot = new FileSnapshot(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(j, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - snapshot.setTimeseriesSchemas(timeseriesSchemas); - snapshotMap.put(j, snapshot); - } - - SnapshotInstaller defaultInstaller = - snapshotMap.get(0).getDefaultInstaller(dataGroupMember); - defaultInstaller.install(snapshotMap, false); - - for (int j = 0; j < 10; j++) { - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(j))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(10, loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - } - } - - @Test - public void testInstallPartial() - throws IOException, SnapshotInstallationException, IllegalPathException, - StorageEngineException, WriteProcessException, LoadFileException { - // dataGroupMember already have some of the files - FileSnapshot snapshot = new FileSnapshot(); - List timeseriesSchemas = new ArrayList<>(); - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - for (int i = 0; i < 10; i++) { - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - for (int i = 0; i < 5; i++) { - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - TsFileResource resource = tsFileResources.get(i); - String pathWithoutHardlinkSuffix = - resource.getTsFilePath().substring(0, resource.getTsFilePath().lastIndexOf('.')); - File fileWithoutHardlinkSuffix = new File(pathWithoutHardlinkSuffix); - resource.getTsFile().renameTo(fileWithoutHardlinkSuffix); - resource.setFile(fileWithoutHardlinkSuffix); - resource.serialize(); - processor.loadNewTsFile(resource, true); - } - snapshot.setTimeseriesSchemas(timeseriesSchemas); - - SnapshotInstaller defaultInstaller = - snapshot.getDefaultInstaller(dataGroupMember); - defaultInstaller.install(snapshot, 0, false); - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(11, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(1, processor.getUnSequenceFileList().size()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshotTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshotTest.java deleted file mode 100644 index 8cc13e468192..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/MetaSimpleSnapshotTest.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.common.IoTDBTest; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.CreateTemplatePlanUtil; -import org.apache.iotdb.commons.auth.AuthException; -import org.apache.iotdb.commons.auth.authorizer.BasicAuthorizer; -import org.apache.iotdb.commons.auth.entity.Role; -import org.apache.iotdb.commons.auth.entity.User; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.template.UndefinedTemplateException; -import org.apache.iotdb.db.metadata.template.Template; -import org.apache.iotdb.db.metadata.template.TemplateManager; -import org.apache.iotdb.db.qp.physical.sys.CreateTemplatePlan; -import org.apache.iotdb.db.service.IoTDB; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class MetaSimpleSnapshotTest extends IoTDBTest { - - private MetaGroupMember metaGroupMember; - private boolean subServerInitialized; - - @Override - @Before - public void setUp() - throws StartupException, org.apache.iotdb.db.exception.query.QueryProcessException, - IllegalPathException { - super.setUp(); - subServerInitialized = false; - metaGroupMember = - new TestMetaGroupMember() { - @Override - protected void rebuildDataGroups() { - subServerInitialized = true; - } - }; - metaGroupMember.setCoordinator(new Coordinator()); - } - - @Override - @After - public void tearDown() throws IOException, StorageEngineException { - metaGroupMember.stop(); - metaGroupMember.closeLogManager(); - super.tearDown(); - } - - @Test - public void testSerialize() { - try { - Map storageGroupTTLMap = new HashMap<>(); - Map userMap = new HashMap<>(); - Map roleMap = new HashMap<>(); - Map templateMap = new HashMap<>(); - PartitionTable partitionTable = TestUtils.getPartitionTable(10); - long lastLogIndex = 10; - long lastLogTerm = 5; - - for (int i = 0; i < 10; i++) { - PartialPath partialPath = new PartialPath("root.ln.sg1"); - storageGroupTTLMap.put(partialPath, (long) i); - } - - for (int i = 0; i < 5; i++) { - String userName = "user_" + i; - User user = new User(userName, "password_" + i); - userMap.put(userName, user); - } - - for (int i = 0; i < 10; i++) { - String roleName = "role_" + i; - Role role = new Role(roleName); - roleMap.put(roleName, role); - } - - CreateTemplatePlan createTemplatePlan = CreateTemplatePlanUtil.getCreateTemplatePlan(); - - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - Template template = new Template(createTemplatePlan); - templateMap.put(templateName, template); - } - - MetaSimpleSnapshot metaSimpleSnapshot = - new MetaSimpleSnapshot( - storageGroupTTLMap, userMap, roleMap, templateMap, partitionTable.serialize()); - - metaSimpleSnapshot.setLastLogIndex(lastLogIndex); - metaSimpleSnapshot.setLastLogTerm(lastLogTerm); - - ByteBuffer buffer = metaSimpleSnapshot.serialize(); - - MetaSimpleSnapshot newSnapshot = new MetaSimpleSnapshot(); - newSnapshot.deserialize(buffer); - - assertEquals(storageGroupTTLMap, newSnapshot.getStorageGroupTTLMap()); - assertEquals(userMap, newSnapshot.getUserMap()); - assertEquals(roleMap, newSnapshot.getRoleMap()); - assertEquals(templateMap, newSnapshot.getTemplateMap()); - - assertEquals(partitionTable.serialize(), newSnapshot.getPartitionTableBuffer()); - assertEquals(lastLogIndex, newSnapshot.getLastLogIndex()); - assertEquals(lastLogTerm, newSnapshot.getLastLogTerm()); - - assertEquals(metaSimpleSnapshot, newSnapshot); - - } catch (Exception e) { - Assert.fail(e.getMessage()); - } - } - - @Test - public void testInstallSuccessfully() - throws IllegalPathException, SnapshotInstallationException, AuthException { - Map storageGroupTTLMap = new HashMap<>(); - Map userMap = new HashMap<>(); - Map roleMap = new HashMap<>(); - Map templateMap = new HashMap<>(); - PartitionTable partitionTable = TestUtils.getPartitionTable(10); - long lastLogIndex = 10; - long lastLogTerm = 5; - - for (int i = 0; i < 10; i++) { - PartialPath partialPath = new PartialPath("root.ln.sg" + i); - storageGroupTTLMap.put(partialPath, (long) i); - } - - for (int i = 0; i < 5; i++) { - String userName = "user_" + i; - User user = new User(userName, "password_" + i); - userMap.put(userName, user); - } - - for (int i = 0; i < 10; i++) { - String roleName = "role_" + i; - Role role = new Role(roleName); - roleMap.put(roleName, role); - } - - CreateTemplatePlan createTemplatePlan = CreateTemplatePlanUtil.getCreateTemplatePlan(); - - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - createTemplatePlan.setName(templateName); - Template template = new Template(createTemplatePlan); - templateMap.put(templateName, template); - } - - MetaSimpleSnapshot metaSimpleSnapshot = - new MetaSimpleSnapshot( - storageGroupTTLMap, userMap, roleMap, templateMap, partitionTable.serialize()); - metaSimpleSnapshot.setLastLogIndex(lastLogIndex); - metaSimpleSnapshot.setLastLogTerm(lastLogTerm); - - SnapshotInstaller defaultInstaller = metaSimpleSnapshot.getDefaultInstaller(metaGroupMember); - defaultInstaller.install(metaSimpleSnapshot, -1, false); - - Map storageGroupsTTL = IoTDB.schemaProcessor.getStorageGroupsTTL(); - for (int i = 0; i < 10; i++) { - PartialPath partialPath = new PartialPath("root.ln.sg" + i); - assertEquals(i, (long) storageGroupsTTL.get(partialPath)); - } - - for (int i = 0; i < 5; i++) { - String userName = "user_" + i; - User user = BasicAuthorizer.getInstance().getUser(userName); - assertEquals(userMap.get(userName), user); - } - - for (int i = 0; i < 10; i++) { - String roleName = "role_" + i; - Role role = BasicAuthorizer.getInstance().getRole(roleName); - assertEquals(roleMap.get(roleName), role); - } - - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - try { - Template template = TemplateManager.getInstance().getTemplate(templateName); - assertEquals(templateMap.get(templateName), template); - } catch (UndefinedTemplateException e) { - fail(); - } - } - - assertEquals(partitionTable, metaGroupMember.getPartitionTable()); - assertEquals(lastLogIndex, metaGroupMember.getLogManager().getLastLogIndex()); - assertEquals(lastLogTerm, metaGroupMember.getLogManager().getLastLogTerm()); - assertTrue(subServerInitialized); - } - - @Test - public void testInstallOmitted() - throws IllegalPathException, SnapshotInstallationException, AuthException, - InterruptedException { - Map storageGroupTTLMap = new HashMap<>(); - Map userMap = new HashMap<>(); - Map roleMap = new HashMap<>(); - Map templateMap = new HashMap<>(); - PartitionTable partitionTable = TestUtils.getPartitionTable(10); - long lastLogIndex = 10; - long lastLogTerm = 5; - - for (int i = 0; i < 10; i++) { - PartialPath partialPath = new PartialPath("root.ln.sg" + i); - storageGroupTTLMap.put(partialPath, (long) i); - } - - for (int i = 0; i < 5; i++) { - String userName = "user_" + i; - User user = new User(userName, "password_" + i); - userMap.put(userName, user); - } - - for (int i = 0; i < 10; i++) { - String roleName = "role_" + i; - Role role = new Role(roleName); - roleMap.put(roleName, role); - } - - CreateTemplatePlan createTemplatePlan = CreateTemplatePlanUtil.getCreateTemplatePlan(); - - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - createTemplatePlan.setName(templateName); - Template template = new Template(createTemplatePlan); - templateMap.put(templateName, template); - } - - MetaSimpleSnapshot metaSimpleSnapshot = - new MetaSimpleSnapshot( - storageGroupTTLMap, userMap, roleMap, templateMap, partitionTable.serialize()); - metaSimpleSnapshot.setLastLogIndex(lastLogIndex); - metaSimpleSnapshot.setLastLogTerm(lastLogTerm); - - AtomicBoolean isLocked = new AtomicBoolean(false); - Lock snapshotLock = metaGroupMember.getSnapshotApplyLock(); - Lock signalLock = new ReentrantLock(); - signalLock.lock(); - try { - // Simulate another snapshot being installed - new Thread( - () -> { - boolean localLocked = snapshotLock.tryLock(); - if (localLocked) { - isLocked.set(true); - // Use signalLock to make sure this thread can hold the snapshotLock as long as - // possible - signalLock.lock(); - signalLock.unlock(); - snapshotLock.unlock(); - } - }) - .start(); - // Waiting another thread locking the snapshotLock - for (int i = 0; i < 10; i++) { - Thread.sleep(100); - if (isLocked.get()) { - break; - } - } - Assert.assertTrue(isLocked.get()); - SnapshotInstaller defaultInstaller = metaSimpleSnapshot.getDefaultInstaller(metaGroupMember); - defaultInstaller.install(metaSimpleSnapshot, -1, false); - - Map storageGroupsTTL = IoTDB.schemaProcessor.getStorageGroupsTTL(); - for (int i = 0; i < 10; i++) { - PartialPath partialPath = new PartialPath("root.ln.sg" + i); - assertNull(storageGroupsTTL.get(partialPath)); - } - - for (int i = 0; i < 5; i++) { - String userName = "user_" + i; - User user = BasicAuthorizer.getInstance().getUser(userName); - assertNull(user); - } - - for (int i = 0; i < 10; i++) { - String roleName = "role_" + i; - Role role = BasicAuthorizer.getInstance().getRole(roleName); - assertNull(role); - } - - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - try { - TemplateManager.getInstance().getTemplate(templateName); - fail(); - } catch (UndefinedTemplateException e) { - // Do nothing - } - } - - assertNull(metaGroupMember.getPartitionTable()); - assertEquals(-1, metaGroupMember.getLogManager().getLastLogIndex()); - assertEquals(-1, metaGroupMember.getLogManager().getLastLogTerm()); - assertFalse(subServerInitialized); - } finally { - signalLock.unlock(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshotTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshotTest.java deleted file mode 100644 index ffa9177b9171..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PartitionedSnapshotTest.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.write.WriteProcessException; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class PartitionedSnapshotTest extends DataSnapshotTest { - - @Test - public void testSerialize() throws IOException, WriteProcessException { - - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - PartitionedSnapshot partitionedSnapshot = - new PartitionedSnapshot(FileSnapshot.Factory.INSTANCE); - for (int i = 0; i < 10; i++) { - FileSnapshot snapshot = new FileSnapshot(); - snapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - snapshot.setTimeseriesSchemas( - Collections.singletonList(TestUtils.getTestTimeSeriesSchema(0, i))); - partitionedSnapshot.putSnapshot(i, snapshot); - } - partitionedSnapshot.setLastLogIndex(10); - partitionedSnapshot.setLastLogTerm(5); - - assertEquals( - "PartitionedSnapshot{slotSnapshots=10, lastLogIndex=10, lastLogTerm=5}", - partitionedSnapshot.toString()); - - ByteBuffer buffer = partitionedSnapshot.serialize(); - - PartitionedSnapshot deserialized = new PartitionedSnapshot(FileSnapshot.Factory.INSTANCE); - deserialized.deserialize(buffer); - assertEquals(partitionedSnapshot, deserialized); - } - - @Test - public void testInstallSuccessfully() - throws IOException, WriteProcessException, SnapshotInstallationException, - IllegalPathException, StorageEngineException { - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - PartitionedSnapshot snapshot = new PartitionedSnapshot(FileSnapshot.Factory.INSTANCE); - List timeseriesSchemas = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - fileSnapshot.setTimeseriesSchemas( - Collections.singletonList(TestUtils.getTestTimeSeriesSchema(0, i))); - snapshot.putSnapshot(i, fileSnapshot); - } - snapshot.setLastLogIndex(10); - snapshot.setLastLogTerm(5); - - SnapshotInstaller defaultInstaller = - snapshot.getDefaultInstaller(dataGroupMember); - for (int i = 0; i < 10; i++) { - dataGroupMember.getSlotManager().setToPulling(i, TestUtils.getNode(0)); - } - defaultInstaller.install(snapshot, -1, false); - // after installation, the slot should be available again - for (int i = 0; i < 10; i++) { - assertEquals(SlotStatus.NULL, dataGroupMember.getSlotManager().getStatus(i)); - } - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < loadedFiles.size(); i++) { - assertEquals(i, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - - for (TsFileResource tsFileResource : tsFileResources) { - // source files should be deleted after being pulled - assertFalse(tsFileResource.getTsFile().exists()); - } - } - - @Test - public void testInstallOmitted() - throws IOException, WriteProcessException, SnapshotInstallationException, - IllegalPathException, StorageEngineException, InterruptedException { - List tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - PartitionedSnapshot snapshot = new PartitionedSnapshot(FileSnapshot.Factory.INSTANCE); - List timeseriesSchemas = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - fileSnapshot.setTimeseriesSchemas( - Collections.singletonList(TestUtils.getTestTimeSeriesSchema(0, i))); - snapshot.putSnapshot(i, fileSnapshot); - } - snapshot.setLastLogIndex(10); - snapshot.setLastLogTerm(5); - - AtomicBoolean isLocked = new AtomicBoolean(false); - Lock snapshotLock = dataGroupMember.getSnapshotApplyLock(); - Lock signalLock = new ReentrantLock(); - signalLock.lock(); - try { - // Simulate another snapshot being installed - new Thread( - () -> { - boolean localLocked = snapshotLock.tryLock(); - if (localLocked) { - isLocked.set(true); - // Use signalLock to make sure this thread can hold the snapshotLock as long as - // possible - signalLock.lock(); - signalLock.unlock(); - snapshotLock.unlock(); - } - }) - .start(); - // Waiting another thread locking the snapshotLock - for (int i = 0; i < 10; i++) { - Thread.sleep(100); - if (isLocked.get()) { - break; - } - } - Assert.assertTrue(isLocked.get()); - - SnapshotInstaller defaultInstaller = - snapshot.getDefaultInstaller(dataGroupMember); - for (int i = 0; i < 10; i++) { - dataGroupMember.getSlotManager().setToPulling(i, TestUtils.getNode(0)); - } - defaultInstaller.install(snapshot, -1, false); - // after installation, the slot should be unchanged - for (int i = 0; i < 10; i++) { - assertEquals(SlotStatus.PULLING, dataGroupMember.getSlotManager().getStatus(i)); - } - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertFalse( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(0, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(0, loadedFiles.size()); - assertEquals(0, processor.getUnSequenceFileList().size()); - - for (TsFileResource tsFileResource : tsFileResources) { - assertTrue(tsFileResource.getTsFile().exists()); - } - } finally { - signalLock.unlock(); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptorTest.java deleted file mode 100644 index ba8f894c09cc..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskDescriptorTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.partition.PartitionGroup; - -import org.junit.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class PullSnapshotTaskDescriptorTest { - - @Test - public void testSerialize() throws IOException { - PartitionGroup group = new PartitionGroup(); - List slots = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - group.add(TestUtils.getNode(i)); - slots.add(i); - } - - PullSnapshotTaskDescriptor descriptor = new PullSnapshotTaskDescriptor(group, slots, true); - - byte[] bytes; - try (ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(arrayOutputStream)) { - descriptor.serialize(dataOutputStream); - bytes = arrayOutputStream.toByteArray(); - } - - try (ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes); - DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream)) { - PullSnapshotTaskDescriptor deserialized = new PullSnapshotTaskDescriptor(); - deserialized.deserialize(dataInputStream); - assertEquals(descriptor, deserialized); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskTest.java deleted file mode 100644 index b5c3fcdc1e0f..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/PullSnapshotTaskTest.java +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.common.TestAsyncDataClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.Client; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.utils.IOUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.exception.StartupException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.write.WriteProcessException; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.TConfiguration; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class PullSnapshotTaskTest extends DataSnapshotTest { - - private static final Logger logger = LoggerFactory.getLogger(PullSnapshotTaskTest.class); - private DataGroupMember sourceMember; - private DataGroupMember targetMember; - private List timeseriesSchemas; - private List tsFileResources; - private boolean hintRegistered; - private int requiredRetries; - private int defaultCompactionThread = - IoTDBDescriptor.getInstance().getConfig().getConcurrentCompactionThread(); - - @Override - @Before - public void setUp() throws MetadataException, StartupException { - IoTDBDescriptor.getInstance().getConfig().setConcurrentCompactionThread(0); - super.setUp(); - hintRegistered = false; - sourceMember = - new TestDataGroupMember() { - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncDataClient(node, null) { - @Override - public void pullSnapshot( - PullSnapshotRequest request, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - if (request.requireReadOnly) { - targetMember.setReadOnly(); - } - resultHandler.onComplete(targetMember.getSnapshot(request)); - } catch (IOException e) { - resultHandler.onError(e); - } - }) - .start(); - } - }; - } catch (IOException e) { - return null; - } - } - - @Override - public Client getSyncClient(Node node) { - return new SyncDataClient(null) { - @Override - public PullSnapshotResp pullSnapshot(PullSnapshotRequest request) throws TException { - try { - if (request.requireReadOnly) { - targetMember.setReadOnly(); - } - return targetMember.getSnapshot(request); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public ByteBuffer readFile(String filePath, long offset, int length) - throws TException { - try { - return IOUtils.readFile(filePath, offset, length); - } catch (IOException e) { - throw new TException(e); - } - } - - @Override - public TProtocol getInputProtocol() { - return new TBinaryProtocol( - new TTransport() { - @Override - public boolean isOpen() { - return false; - } - - @Override - public void open() {} - - @Override - public void close() {} - - @Override - public int read(byte[] buf, int off, int len) { - return 0; - } - - @Override - public void write(byte[] buf, int off, int len) {} - - @Override - public TConfiguration getConfiguration() { - return null; - } - - @Override - public void updateKnownMessageSize(long size) {} - - @Override - public void checkReadBytesAvailable(long numBytes) - throws TTransportException {} - }); - } - }; - } - - @Override - public void registerPullSnapshotHint(PullSnapshotTaskDescriptor descriptor) { - hintRegistered = true; - } - }; - sourceMember.setMetaGroupMember(metaGroupMember); - sourceMember.setLogManager(new TestLogManager(0)); - sourceMember.setThisNode(TestUtils.getNode(0)); - targetMember = - new TestDataGroupMember() { - @Override - public PullSnapshotResp getSnapshot(PullSnapshotRequest request) throws IOException { - if (requiredRetries > 0) { - requiredRetries--; - throw new IOException("Faked pull snapshot exception"); - } - - try { - tsFileResources = TestUtils.prepareTsFileResources(0, 10, 10, 10, true); - } catch (WriteProcessException e) { - return null; - } - Map snapshotBytes = new HashMap<>(); - for (int i = 0; i < 10; i++) { - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.addFile(tsFileResources.get(i), TestUtils.getNode(i)); - fileSnapshot.setTimeseriesSchemas( - Collections.singletonList(TestUtils.getTestTimeSeriesSchema(0, i))); - timeseriesSchemas.add(TestUtils.getTestTimeSeriesSchema(0, i)); - snapshotBytes.put(i, fileSnapshot.serialize()); - } - PullSnapshotResp pullSnapshotResp = new PullSnapshotResp(); - pullSnapshotResp.setSnapshotBytes(snapshotBytes); - return pullSnapshotResp; - } - }; - targetMember.setThisNode(TestUtils.getNode(1)); - targetMember.setLogManager(new TestLogManager(1)); - - timeseriesSchemas = new ArrayList<>(); - requiredRetries = 0; - } - - @Test - public void testAsync() throws IllegalPathException, StorageEngineException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - try { - testNormal(false); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testReadOnly() throws StorageEngineException, IllegalPathException { - testNormal(true); - assertTrue(targetMember.isReadOnly()); - } - - @Test - public void testSync() throws IllegalPathException, StorageEngineException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - try { - testNormal(false); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - } - } - - @Test - public void testWithRetry() throws StorageEngineException, IllegalPathException { - boolean useAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - int pullSnapshotRetryIntervalMs = - ClusterDescriptor.getInstance().getConfig().getPullSnapshotRetryIntervalMs(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - ClusterDescriptor.getInstance().getConfig().setPullSnapshotRetryIntervalMs(100); - try { - requiredRetries = 3; - testNormal(false); - } finally { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(useAsyncServer); - ClusterDescriptor.getInstance() - .getConfig() - .setPullSnapshotRetryIntervalMs(pullSnapshotRetryIntervalMs); - } - } - - private void testNormal(boolean requiresReadOnly) - throws IllegalPathException, StorageEngineException { - PartitionGroup partitionGroup = new PartitionGroup(); - partitionGroup.add(TestUtils.getNode(1)); - List slots = new ArrayList<>(); - for (int i = 0; i < 20; i++) { - slots.add(i); - sourceMember.getSlotManager().setToPulling(i, TestUtils.getNode(1)); - } - PullSnapshotTaskDescriptor descriptor = - new PullSnapshotTaskDescriptor(partitionGroup, slots, requiresReadOnly); - - PullSnapshotTask task = - new PullSnapshotTask(descriptor, sourceMember, FileSnapshot.Factory.INSTANCE, null); - task.call(); - - for (TimeseriesSchema timeseriesSchema : timeseriesSchemas) { - assertTrue( - IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertEquals(10, processor.getPartitionMaxFileVersions(0)); - List loadedFiles = processor.getSequenceFileList(); - assertEquals(tsFileResources.size(), loadedFiles.size()); - for (int i = 0; i < 9; i++) { - if (i != loadedFiles.get(i).getMaxPlanIndex()) { - logger.error( - "error occurred, i={}, minPlanIndex={}, maxPlanIndex={}, tsFileName={}", - i, - loadedFiles.get(i).getMinPlanIndex(), - loadedFiles.get(i).getMaxPlanIndex(), - loadedFiles.get(i).getTsFile().getAbsolutePath()); - } - assertEquals(-1, loadedFiles.get(i).getMaxPlanIndex()); - } - assertEquals(0, processor.getUnSequenceFileList().size()); - - for (TsFileResource tsFileResource : tsFileResources) { - // source files should be deleted after being pulled - assertFalse(tsFileResource.getTsFile().exists()); - } - assertTrue(hintRegistered); - for (int i = 0; i < 20; i++) { - assertEquals(SlotStatus.NULL, sourceMember.getSlotManager().getStatus(i)); - } - - assertFalse(task.getSnapshotSave().exists()); - } - - @Override - @After - public void tearDown() throws Exception { - sourceMember.closeLogManager(); - targetMember.closeLogManager(); - sourceMember.stop(); - targetMember.stop(); - super.tearDown(); - IoTDBDescriptor.getInstance() - .getConfig() - .setConcurrentCompactionThread(defaultCompactionThread); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/SimpleSnapshot.java b/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/SimpleSnapshot.java deleted file mode 100644 index ea3d016785a3..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/log/snapshot/SimpleSnapshot.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.log.snapshot; - -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -/** SimpleSnapshot keeps the committed logs in a memory list. */ -public class SimpleSnapshot extends Snapshot { - - private static final Logger logger = LoggerFactory.getLogger(SimpleSnapshot.class); - private List snapshot = new ArrayList<>(); - - public SimpleSnapshot(long lastIndex, long lastTerm) { - this.lastLogIndex = lastIndex; - this.lastLogTerm = lastTerm; - } - - @Override - public ByteBuffer serialize() { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream); - try { - if (snapshot != null) { - dataOutputStream.writeInt(snapshot.size()); - for (Log log : snapshot) { - outputStream.write(log.serialize().array()); - } - } else { - dataOutputStream.writeInt(0); - } - - } catch (IOException e) { - // unreachable - } - - return ByteBuffer.wrap(outputStream.toByteArray()); - } - - @Override - public void deserialize(ByteBuffer buffer) { - snapshot = new ArrayList<>(); - int size = buffer.getInt(); - for (int i = 0; i < size; i++) { - try { - snapshot.add(LogParser.getINSTANCE().parse(buffer)); - } catch (UnknownLogTypeException e) { - logger.error("Cannot recognize log", e); - } - } - this.lastLogIndex = - snapshot.isEmpty() ? -1 : snapshot.get(snapshot.size() - 1).getCurrLogIndex(); - this.lastLogTerm = snapshot.isEmpty() ? -1 : snapshot.get(snapshot.size() - 1).getCurrLogTerm(); - } - - public List getSnapshot() { - return snapshot; - } - - public void add(Log log) { - snapshot.add(log); - lastLogIndex = log.getCurrLogIndex(); - lastLogTerm = log.getCurrLogTerm(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SimpleSnapshot that = (SimpleSnapshot) o; - return Objects.equals(snapshot, that.snapshot); - } - - @Override - public int hashCode() { - return Objects.hash(snapshot); - } - - @Override - public SnapshotInstaller getDefaultInstaller(RaftMember member) { - return DEFAULT_INSTALLER; - } - - public static final SnapshotInstaller DEFAULT_INSTALLER = - new SnapshotInstaller() { - @Override - public void install(SimpleSnapshot snapshot, int slot, boolean isDataMigration) { - // default installer does nothing - } - - @Override - public void install(Map snapshotMap, boolean isDataMigration) { - // default installer does nothing - } - }; -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SchemaProcessorWhiteBox.java b/cluster/src/test/java/org/apache/iotdb/cluster/partition/SchemaProcessorWhiteBox.java deleted file mode 100644 index abd29cb8d7a7..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SchemaProcessorWhiteBox.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.partition; - -import org.apache.iotdb.db.metadata.LocalSchemaProcessor; - -import org.powermock.reflect.Whitebox; - -import java.io.File; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -public class SchemaProcessorWhiteBox { - - public static LocalSchemaProcessor newSchemaProcessor(String logFilePath) { - Constructor constructor = getSchemaProcessorConstructor(); - constructor.setAccessible(true); - try { - LocalSchemaProcessor schemaProcessor = constructor.newInstance(); - new File(logFilePath).getParentFile().mkdirs(); - Whitebox.setInternalState(schemaProcessor, "logFilePath", logFilePath); - // schemaProcessor.initForMultiSchemaProcessorTest(); - return schemaProcessor; - } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { - e.printStackTrace(); - } - return null; - } - - private static Constructor getSchemaProcessorConstructor() { - try { - return LocalSchemaProcessor.class.getDeclaredConstructor(); - } catch (NoSuchMethodException e) { - e.printStackTrace(); - } - return null; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotManagerTest.java deleted file mode 100644 index 122d7503435e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotManagerTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.partition; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.slot.SlotManager; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; - -import static org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus.NULL; -import static org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus.PULLING; -import static org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus.PULLING_WRITABLE; -import static org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus.SENDING; -import static org.apache.iotdb.cluster.partition.slot.SlotManager.SlotStatus.SENT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -@SuppressWarnings({"java:S2699", "java:S2925"}) -public class SlotManagerTest { - - private SlotManager slotManager; - private boolean prevEnableLogPersistence; - private int prevReplicaNum; - - @Before - public void setUp() { - prevEnableLogPersistence = - ClusterDescriptor.getInstance().getConfig().isEnableRaftLogPersistence(); - prevReplicaNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setEnableRaftLogPersistence(true); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(2); - int testSlotNum = 100; - slotManager = new SlotManager(testSlotNum, null, ""); - } - - @Test - public void waitSlot() { - slotManager.waitSlot(0); - slotManager.setToPulling(0, null); - new Thread( - () -> { - try { - Thread.sleep(200); - slotManager.setToNull(0); - } catch (InterruptedException e) { - e.printStackTrace(); - } - }) - .start(); - slotManager.waitSlot(0); - ClusterDescriptor.getInstance() - .getConfig() - .setEnableRaftLogPersistence(prevEnableLogPersistence); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(prevReplicaNum); - } - - @Test - public void waitSlotForWrite() throws StorageEngineException { - slotManager.waitSlot(0); - slotManager.setToPullingWritable(0); - slotManager.waitSlotForWrite(0); - slotManager.setToPulling(0, null); - new Thread( - () -> { - try { - Thread.sleep(200); - slotManager.setToNull(0); - } catch (InterruptedException e) { - e.printStackTrace(); - } - }) - .start(); - slotManager.waitSlotForWrite(0); - } - - @Test - public void getStatus() { - assertEquals(NULL, slotManager.getStatus(0)); - slotManager.setToPullingWritable(0); - assertEquals(PULLING_WRITABLE, slotManager.getStatus(0)); - slotManager.setToPulling(0, null); - assertEquals(PULLING, slotManager.getStatus(0)); - slotManager.setToNull(0); - assertEquals(NULL, slotManager.getStatus(0)); - } - - @Test - public void getSource() { - assertNull(slotManager.getSource(0)); - Node source = new Node(); - slotManager.setToPulling(0, source); - assertEquals(source, slotManager.getSource(0)); - slotManager.setToPullingWritable(0); - assertEquals(source, slotManager.getSource(0)); - slotManager.setToNull(0); - assertNull(slotManager.getSource(0)); - } - - @Test - public void testSerialize() throws IOException { - File dummyMemberDir = new File("test"); - dummyMemberDir.mkdirs(); - try { - slotManager = new SlotManager(5, dummyMemberDir.getPath(), ""); - slotManager.setToNull(0); - slotManager.setToPulling(1, TestUtils.getNode(1)); - slotManager.setToPulling(2, TestUtils.getNode(2)); - slotManager.setToPullingWritable(2); - slotManager.setToSending(3); - slotManager.sentOneReplication(3); - slotManager.setToSending(4); - for (int i = 0; i < ClusterDescriptor.getInstance().getConfig().getReplicationNum(); i++) { - slotManager.sentOneReplication(4); - } - - SlotManager recovered = new SlotManager(5, dummyMemberDir.getPath(), ""); - assertEquals(NULL, recovered.getStatus(0)); - assertEquals(PULLING, recovered.getStatus(1)); - assertEquals(PULLING_WRITABLE, recovered.getStatus(2)); - assertEquals(SENDING, recovered.getStatus(3)); - assertEquals(SENT, recovered.getStatus(4)); - } finally { - EnvironmentUtils.cleanDir(dummyMemberDir.getPath()); - } - } - // - // @After - // public void tearDown() throws Exception { - // EnvironmentUtils.cleanAllDir(); - // } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotPartitionTableTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotPartitionTableTest.java deleted file mode 100644 index 3e85a6651ed7..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/partition/SlotPartitionTableTest.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.partition; - -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.slot.SlotNodeRemovalResult; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.ClusterPlanRouter; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.cluster.utils.PartitionUtils; -import org.apache.iotdb.commons.auth.AuthException; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.LocalSchemaProcessor; -import org.apache.iotdb.db.qp.logical.Operator.OperatorType; -import org.apache.iotdb.db.qp.logical.sys.AuthorOperator.AuthorType; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.DeletePlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.AuthorPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.DataAuthPlan; -import org.apache.iotdb.db.qp.physical.sys.DeleteStorageGroupPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan.LoadConfigurationPlanType; -import org.apache.iotdb.db.qp.physical.sys.OperateFilePlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.qp.physical.sys.SetTTLPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowChildPathsPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowPlan.ShowContentType; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.powermock.reflect.Whitebox; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Random; -import java.util.stream.IntStream; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -@Ignore // need maintenance -@SuppressWarnings({"java:S2699"}) -public class SlotPartitionTableTest { - - Logger logger = LoggerFactory.getLogger(SlotPartitionTableTest.class); - SlotPartitionTable localTable; - Node localNode; - int replica_size = 5; - int raftId = 0; - LocalSchemaProcessor[] schemaProcessors; - - SlotPartitionTable[] tables; // The PartitionTable on each node. - List nodes; - - private int prevReplicaNum; - private boolean prevEnablePartition; - private long prevPartitionInterval; - - @Before - public void setUp() throws MetadataException { - prevEnablePartition = StorageEngine.isEnablePartition(); - prevPartitionInterval = StorageEngine.getTimePartitionInterval(); - StorageEngine.setEnablePartition(true); - - IoTDB.configManager.init(); - StorageEngine.setTimePartitionInterval(7 * 24 * 3600 * 1000L); - nodes = new ArrayList<>(); - IntStream.range(0, 20).forEach(i -> nodes.add(getNode(i))); - localNode = nodes.get(3); - prevReplicaNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(replica_size); - tables = new SlotPartitionTable[20]; - schemaProcessors = new LocalSchemaProcessor[20]; - - // suppose there are 40 storage groups and each node maintains two of them. - String[] storageNames = new String[40]; - List[] nodeSGs = new ArrayList[20]; - for (int i = 0; i < 20; i++) { - nodeSGs[i] = new ArrayList<>(); - tables[i] = new SlotPartitionTable(nodes, nodes.get(i)); - } - localTable = tables[3]; - // thisNode hold No. 1500 to 1999 slot. - - for (int i = 0; i < 20; i++) { - storageNames[i] = String.format("root.sg.l2.l3.%d", i); - // determine which node the sg belongs to - RaftNode node = localTable.routeToHeaderByTime(storageNames[i], 0); - nodeSGs[node.getNode().getMetaPort() - 30000].add(storageNames[i]); - storageNames[i + 20] = String.format("root.sg.l2.l3.l4.%d", i + 20); - node = localTable.routeToHeaderByTime(storageNames[i + 20], 0); - nodeSGs[node.getNode().getMetaPort() - 30000].add(storageNames[i + 20]); - } - for (int i = 0; i < 20; i++) { - schemaProcessors[i] = SchemaProcessorWhiteBox.newSchemaProcessor("target/schemas/mlog_" + i); - initMockSchemaProcessor(i, schemaProcessors[i], storageNames, nodeSGs[i]); - Whitebox.setInternalState(tables[i], "schemaProcessor", schemaProcessors[i]); - } - } - - private void initMockSchemaProcessor( - int id, LocalSchemaProcessor schemaProcessor, String[] storageGroups, List ownedSGs) - throws MetadataException { - for (String sg : storageGroups) { - schemaProcessor.setStorageGroup(new PartialPath(sg)); - } - for (String sg : ownedSGs) { - // register 4 series; - for (int i = 0; i < 4; i++) { - try { - schemaProcessor.createTimeseries( - new PartialPath(String.format(sg + ".ld.l1.d%d.s%d", i / 2, i % 2)), - TSDataType.INT32, - TSEncoding.RLE, - CompressionType.SNAPPY, - Collections.EMPTY_MAP); - } catch (Exception e) { - e.printStackTrace(); - } - } - } - } - - @After - public void tearDown() throws IOException, StorageEngineException { - ClusterDescriptor.getInstance().getConfig().setReplicationNum(prevReplicaNum); - if (schemaProcessors != null) { - for (LocalSchemaProcessor schemaProcessor : schemaProcessors) { - // schemaProcessor.clear(); - } - } - EnvironmentUtils.cleanEnv(); - File[] files = new File("target/schemas").listFiles(); - if (files != null) { - for (File file : files) { - try { - Files.delete(file.toPath()); - } catch (IOException e) { - logger.error("{} can not be deleted.", file, e); - } - } - } - StorageEngine.setEnablePartition(prevEnablePartition); - StorageEngine.setTimePartitionInterval(prevPartitionInterval); - } - - @Test - public void testGetHeaderGroup() { - Arrays.stream(new int[] {10, 15, 19}) - .forEach( - i -> { - int last = (i + replica_size - 1) % 20; - assertGetHeaderGroup(i, last); - }); - } - - private void assertGetHeaderGroup(int start, int last) { - PartitionGroup group = - localTable.getPartitionGroup( - new RaftNode( - new Node( - "localhost", - 30000 + start, - start, - 40000 + start, - Constants.RPC_PORT + start, - "localhost"), - 0)); - assertEquals(replica_size, group.size()); - assertEquals( - new Node( - "localhost", - 30000 + start, - start, - 40000 + start, - Constants.RPC_PORT + start, - "localhost"), - group.getHeader().getNode()); - - assertEquals( - new Node( - "localhost", 30000 + last, last, 40000 + last, Constants.RPC_PORT + start, "localhost"), - group.get(replica_size - 1)); - } - - private void assertPartitionGroup(PartitionGroup group, int... nodeIds) { - for (int i = 0; i < nodeIds.length; i++) { - assertEquals(nodeIds[i], group.get(i).nodeIdentifier); - } - } - - @Test - public void testRoute() { - PartitionGroup group1 = localTable.route("root.sg1", 1); - PartitionGroup group2 = localTable.route("root.sg1", 2); - PartitionGroup group3 = localTable.route("root.sg2", 2); - PartitionGroup group4 = - localTable.route("root.sg1", 2 + StorageEngine.getTimePartitionInterval()); - - assertEquals(group1, group2); - assertNotEquals(group2, group3); - assertNotEquals(group3, group4); - - PartitionGroup group = localTable.route(ClusterConstant.SLOT_NUM + 1); - assertNull(group); - // thisNode hold No. 1500 to 1999 slot. - group1 = localTable.route(1501); - group2 = localTable.route(1502); - group3 = localTable.route(2501); - assertEquals(group1, group2); - assertNotEquals(group2, group3); - } - - @Test - public void routeToHeader() { - RaftNode node1 = localTable.routeToHeaderByTime("root.sg.l2.l3.l4.28", 0); - RaftNode node2 = localTable.routeToHeaderByTime("root.sg.l2.l3.l4.28", 1); - RaftNode node3 = - localTable.routeToHeaderByTime( - "root.sg.l2.l3.l4.28", 1 + StorageEngine.getTimePartitionInterval()); - assertEquals(node1, node2); - assertNotEquals(node2, node3); - } - - @Test - public void addNode() { - // TODO do it when delete node is finished. - } - - @Test - public void getLocalGroups() { - List groups = localTable.getLocalGroups(); - int[][] nodeIds = new int[replica_size][replica_size]; - // we write them clearly to help people understand how the replica is assigned. - nodeIds[0] = new int[] {3, 4, 5, 6, 7}; - nodeIds[1] = new int[] {2, 3, 4, 5, 6}; - nodeIds[2] = new int[] {1, 2, 3, 4, 5}; - nodeIds[3] = new int[] {0, 1, 2, 3, 4}; - nodeIds[4] = new int[] {19, 0, 1, 2, 3}; - for (int i = 0; i < nodeIds.length; i++) { - assertPartitionGroup(groups.get(i), nodeIds[i]); - } - } - - @Test - public void serializeAndDeserialize() { - ByteBuffer buffer = localTable.serialize(); - SlotPartitionTable tmpTable = new SlotPartitionTable(new Node()); - tmpTable.deserialize(buffer); - assertEquals(localTable, tmpTable); - } - - @Test - public void getAllNodes() { - assertEquals(20, localTable.getAllNodes().size()); - } - - @Test - public void getPreviousNodeMap() { - // before adding or deleting node, it should be null - assertNull(localTable.getPreviousNodeMap(new RaftNode(localNode, 0))); - // TODO after adding or deleting node, it has data - } - - @Test - public void getNodeSlots() { - // TODO only meaningful when nodelist changes - } - - @Test - public void getAllNodeSlots() { - // TODO only meaningful when nodelist changes - } - - @Test - public void getTotalSlotNumbers() { - assertEquals(ClusterConstant.SLOT_NUM, localTable.getTotalSlotNumbers()); - } - - @Test - public void testPhysicalPlan() throws QueryProcessException, IllegalPathException { - PhysicalPlan deletePlan = new DeletePlan(); - assertTrue(PartitionUtils.isGlobalDataPlan(deletePlan)); - - try { - PhysicalPlan authorPlan = - new AuthorPlan( - AuthorType.CREATE_ROLE, - "test", - "test", - "test", - "test", - new String[] {}, - new PartialPath("root.sg.l2.l3.l4.28.ld.l1.d0")); - assertTrue(PartitionUtils.isGlobalMetaPlan(authorPlan)); - } catch (AuthException | IllegalPathException e) { - e.printStackTrace(); - fail(e.getMessage()); - } - PhysicalPlan deleteStorageGroup = new DeleteStorageGroupPlan(Collections.emptyList()); - assertTrue(PartitionUtils.isGlobalMetaPlan(deleteStorageGroup)); - PhysicalPlan globalLoadConfigPlan = - new LoadConfigurationPlan(LoadConfigurationPlanType.GLOBAL, new Properties[2]); - assertTrue(PartitionUtils.isGlobalMetaPlan(globalLoadConfigPlan)); - PhysicalPlan localLoadConfigPlan = new LoadConfigurationPlan(LoadConfigurationPlanType.LOCAL); - assertFalse(PartitionUtils.isGlobalMetaPlan(localLoadConfigPlan)); - PhysicalPlan operateFilePlan = new OperateFilePlan(new File(""), OperatorType.LOAD_FILES); - assertTrue(PartitionUtils.isLocalNonQueryPlan(operateFilePlan)); - - PhysicalPlan setStorageGroupPlan = new SetStorageGroupPlan(); - assertTrue(PartitionUtils.isGlobalMetaPlan(setStorageGroupPlan)); - PhysicalPlan setTTLPlan = new SetTTLPlan(new PartialPath("root.group")); - assertTrue(PartitionUtils.isGlobalMetaPlan(setTTLPlan)); - } - - // @Test - public void testInsertPlan() throws IllegalPathException { - PhysicalPlan insertPlan1 = - new InsertRowPlan( - new PartialPath("root.sg.l2.l3.l4.28.ld.l1.d0"), - 1, - new String[] {"s0", "s1"}, - new String[] {"0", "1"}); - PhysicalPlan insertPlan2 = - new InsertRowPlan( - new PartialPath("root.sg.l2.l3.l4.28.ld.l1.d0"), - 1 + StorageEngine.getTimePartitionInterval(), - new String[] {"s0", "s1"}, - new String[] {"0", "1"}); - PartitionGroup group1, group2; - assertFalse(insertPlan1.canBeSplit()); - ClusterPlanRouter router = new ClusterPlanRouter(localTable); - try { - group1 = router.routePlan(insertPlan1); - group2 = router.routePlan(insertPlan2); - assertNotEquals(group1, group2); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } - } - - // @Test - public void testCreateTimeSeriesPlan() throws IllegalPathException { - PhysicalPlan createTimeSeriesPlan1 = - new CreateTimeSeriesPlan( - new PartialPath("root.sg.l2.l3.l4.28.ld" + ".l1.d1"), - TSDataType.BOOLEAN, - TSEncoding.RLE, - CompressionType.SNAPPY, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - PhysicalPlan createTimeSeriesPlan2 = - new CreateTimeSeriesPlan( - new PartialPath("root.sg.l2.l3.l4.28.ld" + ".l1.d2"), - TSDataType.BOOLEAN, - TSEncoding.RLE, - CompressionType.SNAPPY, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - PhysicalPlan createTimeSeriesPlan3 = - new CreateTimeSeriesPlan( - new PartialPath("root.sg.l2.l3.l4.29.ld" + ".l1.d2"), - TSDataType.BOOLEAN, - TSEncoding.RLE, - CompressionType.SNAPPY, - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - assertFalse(createTimeSeriesPlan1.canBeSplit()); - ClusterPlanRouter router = new ClusterPlanRouter(localTable); - - try { - PartitionGroup group1 = router.routePlan(createTimeSeriesPlan1); - PartitionGroup group2 = router.routePlan(createTimeSeriesPlan2); - PartitionGroup group3 = router.routePlan(createTimeSeriesPlan3); - assertEquals(group1, group2); - assertNotEquals(group2, group3); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } - } - - // @Test - public void testInsertTabletPlan() throws IllegalPathException { - PhysicalPlan batchInertPlan = - new InsertTabletPlan( - new PartialPath("root.sg.l2.l3.l4.28.ld.l1" + ".d0"), - new String[] {"s0", "s1"}, - Arrays.asList(0, 1)); - assertTrue(batchInertPlan.canBeSplit()); - // (String deviceId, String[] measurements, List dataTypes) - long[] times = new long[9]; - Object[] values = new Object[2]; - values[0] = new boolean[9]; - values[1] = new int[9]; - - for (int i = 0; i < 3; i++) { - times[i] = Math.abs(new Random().nextLong()) % StorageEngine.getTimePartitionInterval(); - ((boolean[]) values[0])[i] = new Random().nextBoolean(); - ((int[]) values[1])[i] = new Random().nextInt(); - } - for (int i = 3; i < 6; i++) { - times[i] = - StorageEngine.getTimePartitionInterval() - + Math.abs(new Random().nextLong()) % StorageEngine.getTimePartitionInterval(); - ((boolean[]) values[0])[i] = new Random().nextBoolean(); - ((int[]) values[1])[i] = new Random().nextInt(); - } - for (int i = 6; i < 9; i++) { - times[i] = - StorageEngine.getTimePartitionInterval() * 10 - + Math.abs(new Random().nextLong()) % StorageEngine.getTimePartitionInterval(); - ((boolean[]) values[0])[i] = new Random().nextBoolean(); - ((int[]) values[1])[i] = new Random().nextInt(); - } - ((InsertTabletPlan) batchInertPlan).setTimes(times); - ((InsertTabletPlan) batchInertPlan).setColumns(values); - ((InsertTabletPlan) batchInertPlan).setRowCount(9); - try { - ClusterPlanRouter router = new ClusterPlanRouter(localTable); - Map result = router.splitAndRoutePlan(batchInertPlan); - assertEquals(3, result.size()); - result.forEach( - (key, value) -> { - assertEquals(3, ((InsertTabletPlan) key).getRowCount()); - long[] subtimes = ((InsertTabletPlan) key).getTimes(); - assertEquals(3, subtimes.length); - assertEquals( - subtimes[0] / StorageEngine.getTimePartitionInterval(), - subtimes[2] / StorageEngine.getTimePartitionInterval()); - }); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } - } - - // @Test - public void testShowChildPathsPlan() throws IllegalPathException { - PhysicalPlan showChildPathsPlan1 = - new ShowChildPathsPlan(ShowContentType.CHILD_PATH, new PartialPath("root.sg.l2.l3.l4.28")); - PhysicalPlan showChildPathsPlan2 = - new ShowChildPathsPlan(ShowContentType.CHILD_PATH, new PartialPath("root.sg.l2.l3.l4")); - try { - assertFalse(showChildPathsPlan1.canBeSplit()); - ClusterPlanRouter router = new ClusterPlanRouter(localTable); - PartitionGroup group1 = router.routePlan(showChildPathsPlan1); - PartitionGroup group2 = router.routePlan(showChildPathsPlan2); - assertNotEquals(group1, group2); - } catch (Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } - } - - @Test - public void testDataAuthPlan() { - List users = new ArrayList(Arrays.asList("user1", "user2")); - PhysicalPlan dataAuthPlan = new DataAuthPlan(OperatorType.GRANT_WATERMARK_EMBEDDING, users); - Assert.assertTrue(PartitionUtils.isGlobalMetaPlan(dataAuthPlan)); - } - - private Node getNode(int i) { - return new Node("localhost", 30000 + i, i, 40000 + i, Constants.RPC_PORT + i, "localhost"); - } - - private RaftNode getRaftNode(int i, int raftId) { - return new RaftNode(getNode(i), raftId); - } - - @Test - public void testRemoveNode() { - List nodeSlots = localTable.getNodeSlots(getRaftNode(0, raftId)); - localTable.removeNode(getNode(0)); - NodeRemovalResult nodeRemovalResult = localTable.getNodeRemovalResult(); - assertFalse(localTable.getAllNodes().contains(getNode(0))); - PartitionGroup removedGroup = nodeRemovalResult.getRemovedGroup(0); - for (int i = 0; i < 5; i++) { - assertTrue(removedGroup.contains(getNode(i))); - } - // the slots owned by the removed one should be redistributed to other nodes - Map> newSlotOwners = - ((SlotNodeRemovalResult) nodeRemovalResult).getNewSlotOwners(); - for (List slots : newSlotOwners.values()) { - assertTrue(nodeSlots.containsAll(slots)); - nodeSlots.removeAll(slots); - } - assertTrue(nodeSlots.isEmpty()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/BaseQueryTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/BaseQueryTest.java deleted file mode 100644 index 88f42eaae003..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/BaseQueryTest.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.server.member.BaseMember; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Field; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; -import static org.junit.Assert.assertNull; - -/** - * allNodes: node0, node1... node9 localNode: node0 pathList: root.sg0.s0, root.sg0.s1... - * root.sg0.s9 (all double type) - */ -public class BaseQueryTest extends BaseMember { - - protected List pathList; - protected int defaultCompactionThread = - IoTDBDescriptor.getInstance().getConfig().getConcurrentCompactionThread(); - - protected static void checkAggregations( - List aggregationResults, Object[] answer) { - Assert.assertEquals(answer.length, aggregationResults.size()); - for (int i = 0; i < aggregationResults.size(); i++) { - AggregateResult aggregateResult = aggregationResults.get(i); - if (answer[i] != null) { - Assert.assertEquals( - (double) answer[i], - Double.parseDouble(aggregateResult.getResult().toString()), - 0.00001); - } else { - assertNull(aggregateResult.getResult()); - } - } - } - - @Override - @Before - public void setUp() throws Exception { - IoTDBDescriptor.getInstance().getConfig().setConcurrentCompactionThread(0); - super.setUp(); - pathList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - MeasurementPath path = new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE); - pathList.add(path); - } - NodeStatusManager.getINSTANCE().setMetaGroupMember(testMetaMember); - TestUtils.prepareData(); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - NodeStatusManager.getINSTANCE().setMetaGroupMember(null); - IoTDBDescriptor.getInstance() - .getConfig() - .setConcurrentCompactionThread(defaultCompactionThread); - } - - void checkSequentialDataset(QueryDataSet dataSet, int offset, int size) throws IOException { - for (int i = offset; i < offset + size; i++) { - assertTrue(dataSet.hasNext()); - RowRecord record = dataSet.next(); - assertEquals(i, record.getTimestamp()); - assertEquals(10, record.getFields().size()); - for (int j = 0; j < 10; j++) { - assertEquals(i * 1.0, record.getFields().get(j).getDoubleV(), 0.00001); - } - } - assertFalse(dataSet.hasNext()); - } - - protected void checkDoubleDataset(QueryDataSet queryDataSet, Object[] answers) - throws IOException { - Assert.assertTrue(queryDataSet.hasNext()); - RowRecord record = queryDataSet.next(); - List fields = record.getFields(); - Assert.assertEquals(answers.length, fields.size()); - for (int i = 0; i < answers.length; i++) { - if (answers[i] != null) { - Assert.assertEquals( - (double) answers[i], Double.parseDouble(fields.get(i).getStringValue()), 0.000001); - } else { - assertNull(fields.get(i)); - } - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterAggregateExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterAggregateExecutorTest.java deleted file mode 100644 index 77aa4989a878..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterAggregateExecutorTest.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.aggregate.ClusterAggregateExecutor; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.qp.physical.crud.AggregationPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Field; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class ClusterAggregateExecutorTest extends BaseQueryTest { - - private ClusterAggregateExecutor executor; - - @Test - public void testNoFilter() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - AggregationPlan plan = new AggregationPlan(); - List paths = new ArrayList<>(); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 1), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 2), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 3), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 4), TSDataType.DOUBLE)); - - List aggregations = - Arrays.asList( - SQLConstant.MIN_TIME, - SQLConstant.MAX_VALUE, - SQLConstant.AVG, - SQLConstant.COUNT, - SQLConstant.SUM); - plan.setPaths(paths); - plan.setDeduplicatedPathsAndUpdate(paths); - plan.setAggregations(aggregations); - plan.setDeduplicatedAggregations(aggregations); - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - executor = new ClusterAggregateExecutor(context, plan, testMetaMember); - QueryDataSet queryDataSet = executor.executeWithoutValueFilter(plan); - assertTrue(queryDataSet.hasNext()); - RowRecord record = queryDataSet.next(); - List fields = record.getFields(); - assertEquals(5, fields.size()); - Object[] answers = new Object[] {0.0, 19.0, 9.5, 20.0, 190.0}; - for (int i = 0; i < 5; i++) { - assertEquals((double) answers[i], Double.parseDouble(fields.get(i).toString()), 0.00001); - } - assertFalse(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFilter() - throws StorageEngineException, IOException, QueryProcessException, IllegalPathException { - AggregationPlan plan = new AggregationPlan(); - List paths = new ArrayList<>(); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 1), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 2), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 3), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 4), TSDataType.DOUBLE)); - List aggregations = - Arrays.asList( - SQLConstant.MIN_TIME, - SQLConstant.MAX_VALUE, - SQLConstant.AVG, - SQLConstant.COUNT, - SQLConstant.SUM); - plan.setPaths(paths); - plan.setDeduplicatedPathsAndUpdate(paths); - plan.setAggregations(aggregations); - plan.setDeduplicatedAggregations(aggregations); - plan.setExpression( - BinaryExpression.and( - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), ValueFilter.ltEq(8.0)), - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), TimeFilter.gtEq(3)))); - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - executor = new ClusterAggregateExecutor(context, plan, testMetaMember); - QueryDataSet queryDataSet = executor.executeWithValueFilter(plan); - assertTrue(queryDataSet.hasNext()); - RowRecord record = queryDataSet.next(); - List fields = record.getFields(); - assertEquals(5, fields.size()); - Object[] answers = new Object[] {3.0, 8.0, 5.5, 6.0, 33.0}; - for (int i = 0; i < 5; i++) { - assertEquals((double) answers[i], Double.parseDouble(fields.get(i).toString()), 0.00001); - } - assertFalse(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutorTest.java deleted file mode 100644 index 78e4295ca2d0..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterDataQueryExecutorTest.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.Field; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; -import org.apache.iotdb.tsfile.write.schema.MeasurementSchema; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; - -public class ClusterDataQueryExecutorTest extends BaseQueryTest { - - private ClusterDataQueryExecutor queryExecutor; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - @Test - public void testNoFilter() throws IOException, StorageEngineException { - RawDataQueryPlan plan = new RawDataQueryPlan(); - plan.setDeduplicatedPathsAndUpdate(pathList); - queryExecutor = new ClusterDataQueryExecutor(plan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithoutValueFilter(context); - checkSequentialDataset(dataSet, 0, 20); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFilter() - throws IOException, StorageEngineException, QueryProcessException, IllegalPathException { - IExpression expression = - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), ValueFilter.gtEq(5.0)); - RawDataQueryPlan plan = new RawDataQueryPlan(); - plan.setDeduplicatedPathsAndUpdate(pathList); - plan.setExpression(expression); - queryExecutor = new ClusterDataQueryExecutor(plan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithValueFilter(context); - checkSequentialDataset(dataSet, 5, 15); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testNoFilterWithRedirect() throws StorageEngineException { - RawDataQueryPlan plan = new RawDataQueryPlan(); - plan.setDeduplicatedPathsAndUpdate(pathList); - plan.setEnableRedirect(true); - queryExecutor = new ClusterDataQueryExecutor(plan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithoutValueFilter(context); - assertNull(dataSet.getEndPoint()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFilterWithValueFilterRedirect() - throws StorageEngineException, QueryProcessException, IllegalPathException { - IExpression expression = - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), ValueFilter.gtEq(5.0)); - RawDataQueryPlan plan = new RawDataQueryPlan(); - plan.setDeduplicatedPathsAndUpdate(pathList); - plan.setExpression(expression); - plan.setEnableRedirect(true); - queryExecutor = new ClusterDataQueryExecutor(plan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithValueFilter(context); - assertNull(dataSet.getEndPoint()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFilterWithTimeFilterRedirect() - throws StorageEngineException, QueryProcessException { - IExpression expression = - new GlobalTimeExpression(new AndFilter(TimeFilter.gtEq(5), TimeFilter.ltEq(10))); - RawDataQueryPlan plan = new RawDataQueryPlan(); - plan.setDeduplicatedPathsAndUpdate(pathList.subList(0, 1)); - plan.setExpression(expression); - plan.setEnableRedirect(true); - queryExecutor = new ClusterDataQueryExecutor(plan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithoutValueFilter(context); - assertEquals("ip:port=0.0.0.0:6667", dataSet.getEndPoint().toString()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test // IOTDB-2219 - public void testQueryInMemory() - throws IOException, StorageEngineException, IllegalPathException, QueryProcessException, - StorageGroupNotSetException { - PlanExecutor planExecutor = new PlanExecutor(); - MeasurementPath[] paths = - new MeasurementPath[] { - new MeasurementPath( - TestUtils.getTestSg(100), - TestUtils.getTestMeasurement(0), - new MeasurementSchema(TestUtils.getTestMeasurement(0), TSDataType.DOUBLE)), - new MeasurementPath( - TestUtils.getTestSg(100), - TestUtils.getTestMeasurement(1), - new MeasurementSchema(TestUtils.getTestMeasurement(1), TSDataType.DOUBLE)), - new MeasurementPath( - TestUtils.getTestSg(100), - TestUtils.getTestMeasurement(2), - new MeasurementSchema(TestUtils.getTestMeasurement(2), TSDataType.DOUBLE)), - }; - String[] measurements = - new String[] { - TestUtils.getTestMeasurement(0), - TestUtils.getTestMeasurement(1), - TestUtils.getTestMeasurement(2) - }; - IMeasurementMNode[] schemas = - new IMeasurementMNode[] { - TestUtils.getTestMeasurementMNode(0), - TestUtils.getTestMeasurementMNode(1), - TestUtils.getTestMeasurementMNode(2) - }; - TSDataType[] dataTypes = - new TSDataType[] {TSDataType.DOUBLE, TSDataType.DOUBLE, TSDataType.DOUBLE}; - Object[] values = new Object[] {1.0, 2.0, 3.0}; - - // set storage group - SetStorageGroupPlan setStorageGroupPlan = new SetStorageGroupPlan(); - setStorageGroupPlan.setPath(new PartialPath(TestUtils.getTestSg(100))); - planExecutor.setStorageGroup(setStorageGroupPlan); - - // insert data - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(100))); - insertPlan.setMeasurements(measurements); - insertPlan.setMeasurementMNodes(schemas); - insertPlan.setDataTypes(dataTypes); - insertPlan.setNeedInferType(true); - insertPlan.setTime(0); - insertPlan.setValues(values); - - planExecutor.processNonQuery(insertPlan); - - // query data - RawDataQueryPlan queryPlan = new RawDataQueryPlan(); - queryPlan.setDeduplicatedPaths(Arrays.asList(paths)); - queryExecutor = new ClusterDataQueryExecutor(queryPlan, testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.executeWithoutValueFilter(context); - RowRecord record = dataSet.next(); - List fields = record.getFields(); - Assert.assertEquals(values.length, fields.size()); - for (int i = 0; i < values.length; i++) { - Assert.assertEquals(String.valueOf(values[i]), fields.get(i).getStringValue()); - } - assertFalse(dataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPhysicalGeneratorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPhysicalGeneratorTest.java deleted file mode 100644 index d0585de36b82..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPhysicalGeneratorTest.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.logical.crud.FromComponent; -import org.apache.iotdb.db.qp.logical.crud.QueryOperator; -import org.apache.iotdb.db.qp.logical.crud.SelectComponent; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.expression.ResultColumn; -import org.apache.iotdb.db.query.expression.leaf.TimeSeriesOperand; - -import org.junit.Before; -import org.junit.Test; - -import java.time.ZoneId; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class ClusterPhysicalGeneratorTest extends BaseQueryTest { - - private ClusterPhysicalGenerator physicalGenerator; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - physicalGenerator = new ClusterPhysicalGenerator(); - } - - @Test - public void test() throws QueryProcessException, IllegalPathException { - QueryOperator operator = new QueryOperator(); - - SelectComponent selectComponent = new SelectComponent(ZoneId.systemDefault()); - List resultColumns = new ArrayList<>(); - for (PartialPath partialPath : pathList) { - resultColumns.add(new ResultColumn(new TimeSeriesOperand(partialPath))); - } - selectComponent.setResultColumns(resultColumns); - FromComponent fromComponent = new FromComponent(); - fromComponent.addPrefixTablePath(new PartialPath(TestUtils.getTestSg(0))); - - operator.setSelectComponent(selectComponent); - operator.setFromComponent(fromComponent); - RawDataQueryPlan plan = (RawDataQueryPlan) physicalGenerator.transformToPhysicalPlan(operator); - - assertEquals(pathList, plan.getDeduplicatedPaths()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlanExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlanExecutorTest.java deleted file mode 100644 index d0151d41789c..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlanExecutorTest.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IStorageGroupMNode; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.qp.physical.sys.ShowTimeSeriesPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class ClusterPlanExecutorTest extends BaseQueryTest { - - private ClusterPlanExecutor queryExecutor; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - queryExecutor = new ClusterPlanExecutor(testMetaMember); - } - - @Test - public void testQuery() - throws QueryProcessException, QueryFilterOptimizationException, StorageEngineException, - IOException, MetadataException, InterruptedException { - RawDataQueryPlan queryPlan = new RawDataQueryPlan(); - queryPlan.setDeduplicatedPathsAndUpdate(pathList); - queryPlan.setPaths(pathList); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - QueryDataSet dataSet = queryExecutor.processQuery(queryPlan, context); - checkSequentialDataset(dataSet, 0, 20); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testMatchPaths() throws MetadataException { - List allMatchedPaths = - queryExecutor.getPathsName(new PartialPath("root.*.s0")); - allMatchedPaths.sort(null); - for (int i = 0; i < allMatchedPaths.size(); i++) { - assertEquals(pathList.get(i), allMatchedPaths.get(i)); - } - } - - @Test - public void testGetAllStorageGroupNodes() { - List allStorageGroupNodes = queryExecutor.getAllStorageGroupNodes(); - for (int i = 0; i < allStorageGroupNodes.size(); i++) { - assertEquals( - IoTDB.schemaProcessor.getAllStorageGroupNodes().get(i).getFullPath(), - allStorageGroupNodes.get(i).getFullPath()); - } - } - - @Test - public void testShowTimeseries() - throws StorageEngineException, QueryFilterOptimizationException, MetadataException, - IOException, InterruptedException, QueryProcessException { - ShowTimeSeriesPlan showTimeSeriesPlan = new ShowTimeSeriesPlan(pathList.get(0)); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet dataSet = queryExecutor.processQuery(showTimeSeriesPlan, context); - int count = 0; - while (dataSet.hasNext()) { - dataSet.next(); - count++; - } - assertEquals(count, 1); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlannerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlannerTest.java deleted file mode 100644 index 1bd4dee08490..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterPlannerTest.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; - -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class ClusterPlannerTest extends BaseQueryTest { - - private ClusterPlanner parser; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - parser = new ClusterPlanner(); - } - - @Test - public void test() throws QueryProcessException { - List sgs = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - sgs.add(TestUtils.getTestSg(i)); - } - String sql = String.format("SELECT s0 FROM %s", String.join(",", sgs)); - RawDataQueryPlan plan = (RawDataQueryPlan) parser.parseSQLToPhysicalPlan(sql); - assertEquals(pathList, plan.getDeduplicatedPaths()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryRouterTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryRouterTest.java deleted file mode 100644 index 7d7f7edc5a26..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterQueryRouterTest.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.AggregationPlan; -import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.qp.physical.crud.UDTFPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.executor.fill.IFill; -import org.apache.iotdb.db.query.executor.fill.LinearFill; -import org.apache.iotdb.db.query.executor.fill.PreviousFill; -import org.apache.iotdb.tsfile.exception.filter.QueryFilterOptimizationException; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import junit.framework.TestCase; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static junit.framework.TestCase.assertEquals; -import static org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan.getTimeExpression; -import static org.junit.Assert.assertFalse; - -public class ClusterQueryRouterTest extends BaseQueryTest { - - private ClusterQueryRouter clusterQueryRouter; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - clusterQueryRouter = new ClusterQueryRouter(testMetaMember); - } - - @Test - public void test() throws StorageEngineException, IOException, QueryProcessException { - RawDataQueryPlan queryPlan = new RawDataQueryPlan(); - queryPlan.setDeduplicatedPathsAndUpdate(pathList); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - QueryDataSet dataSet = clusterQueryRouter.rawDataQuery(queryPlan, context); - checkSequentialDataset(dataSet, 0, 20); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testAggregation() - throws StorageEngineException, IOException, QueryProcessException, - QueryFilterOptimizationException, IllegalPathException { - AggregationPlan plan = new AggregationPlan(); - List paths = new ArrayList<>(); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 1), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 2), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 3), TSDataType.DOUBLE)); - paths.add(new MeasurementPath(TestUtils.getTestSeries(0, 4), TSDataType.DOUBLE)); - List aggregations = - Arrays.asList( - SQLConstant.MIN_TIME, - SQLConstant.MAX_VALUE, - SQLConstant.AVG, - SQLConstant.COUNT, - SQLConstant.SUM); - plan.setPaths(paths); - plan.setDeduplicatedPathsAndUpdate(paths); - plan.setAggregations(aggregations); - plan.setDeduplicatedAggregations(aggregations); - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet queryDataSet = clusterQueryRouter.aggregate(plan, context); - checkDoubleDataset(queryDataSet, new Object[] {0.0, 19.0, 9.5, 20.0, 190.0}); - assertFalse(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testPreviousFill() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - FillQueryPlan plan = new FillQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - long defaultFillInterval = IoTDBDescriptor.getInstance().getConfig().getDefaultFillInterval(); - Map tsDataTypeIFillMap = - Collections.singletonMap( - TSDataType.DOUBLE, new PreviousFill(TSDataType.DOUBLE, 0, defaultFillInterval)); - plan.setFillType(tsDataTypeIFillMap); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - QueryDataSet queryDataSet; - long[] queryTimes = new long[] {-1, 0, 5, 10, 20}; - Object[][] answers = - new Object[][] { - new Object[] {null}, - new Object[] {0.0}, - new Object[] {0.0}, - new Object[] {10.0}, - new Object[] {10.0}, - }; - for (int i = 0; i < queryTimes.length; i++) { - plan.setQueryTime(queryTimes[i]); - queryDataSet = clusterQueryRouter.fill(plan, context); - checkDoubleDataset(queryDataSet, answers[i]); - assertFalse(queryDataSet.hasNext()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testLinearFill() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - FillQueryPlan plan = new FillQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - long defaultFillInterval = IoTDBDescriptor.getInstance().getConfig().getDefaultFillInterval(); - Map tsDataTypeIFillMap = - Collections.singletonMap( - TSDataType.DOUBLE, - new LinearFill(TSDataType.DOUBLE, 0, defaultFillInterval, defaultFillInterval)); - plan.setFillType(tsDataTypeIFillMap); - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - QueryDataSet queryDataSet; - long[] queryTimes = new long[] {-1, 0, 5, 10, 20}; - Object[][] answers = - new Object[][] { - new Object[] {null}, - new Object[] {0.0}, - new Object[] {5.0}, - new Object[] {10.0}, - new Object[] {null}, - }; - for (int i = 0; i < queryTimes.length; i++) { - plan.setQueryTime(queryTimes[i]); - queryDataSet = clusterQueryRouter.fill(plan, context); - checkDoubleDataset(queryDataSet, answers[i]); - assertFalse(queryDataSet.hasNext()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testVFilterGroupBy() - throws IOException, StorageEngineException, QueryFilterOptimizationException, - QueryProcessException, IllegalPathException { - QueryContext queryContext = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - GroupByTimePlan groupByPlan = new GroupByTimePlan(); - List pathList = new ArrayList<>(); - List aggregations = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - pathList.add(new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE)); - aggregations.add(SQLConstant.COUNT); - } - groupByPlan.setPaths(pathList); - groupByPlan.setDeduplicatedPathsAndUpdate(pathList); - groupByPlan.setAggregations(aggregations); - groupByPlan.setDeduplicatedAggregations(aggregations); - - groupByPlan.setStartTime(0); - groupByPlan.setEndTime(20); - groupByPlan.setSlidingStep(5); - groupByPlan.setInterval(5); - - IExpression expression = - BinaryExpression.and( - new SingleSeriesExpression( - new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE), - ValueFilter.gtEq(5.0)), - new SingleSeriesExpression( - new MeasurementPath(TestUtils.getTestSeries(5, 0), TSDataType.DOUBLE), - TimeFilter.ltEq(15))); - groupByPlan.setExpression(expression); - QueryDataSet queryDataSet = clusterQueryRouter.groupBy(groupByPlan, queryContext); - - Object[][] answers = - new Object[][] { - new Object[] {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, - }; - for (Object[] answer : answers) { - checkDoubleDataset(queryDataSet, answer); - } - assertFalse(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(queryContext.getQueryId()); - } - } - - @Test - public void testNoVFilterGroupBy() - throws StorageEngineException, IOException, QueryFilterOptimizationException, - QueryProcessException, IllegalPathException { - QueryContext queryContext = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - GroupByTimePlan groupByPlan = new GroupByTimePlan(); - List pathList = new ArrayList<>(); - List aggregations = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - pathList.add(new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE)); - aggregations.add(SQLConstant.COUNT); - } - groupByPlan.setPaths(pathList); - groupByPlan.setDeduplicatedPathsAndUpdate(pathList); - groupByPlan.setAggregations(aggregations); - groupByPlan.setDeduplicatedAggregations(aggregations); - - groupByPlan.setStartTime(0); - groupByPlan.setEndTime(20); - groupByPlan.setSlidingStep(5); - groupByPlan.setInterval(5); - - groupByPlan.setExpression(getTimeExpression(groupByPlan)); - QueryDataSet dataSet = clusterQueryRouter.groupBy(groupByPlan, queryContext); - - Object[][] answers = - new Object[][] { - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - }; - for (Object[] answer : answers) { - checkDoubleDataset(dataSet, answer); - } - assertFalse(dataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(queryContext.getQueryId()); - } - } - - @Test - public void testUDTFQuery() throws QueryProcessException, StorageEngineException { - ClusterPlanner processor = new ClusterPlanner(); - String sqlStr = "select sin(s0) from root.*"; - PhysicalPlan plan = processor.parseSQLToPhysicalPlan(sqlStr); - UDTFPlan udtfPlan = (UDTFPlan) plan; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - QueryDataSet queryDataSet = clusterQueryRouter.udtfQuery(udtfPlan, context); - for (int i = 0; i < 20; i++) { - TestCase.assertTrue(queryDataSet.hasNext()); - RowRecord record = queryDataSet.next(); - assertEquals(i, record.getTimestamp()); - assertEquals(10, record.getFields().size()); - for (int j = 0; j < 10; j++) { - assertEquals(Math.sin(i * 1.0), record.getFields().get(j).getDoubleV(), 0.00001); - } - } - TestCase.assertFalse(queryDataSet.hasNext()); - } catch (StorageEngineException | IOException | InterruptedException e) { - e.printStackTrace(); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutorTest.java deleted file mode 100644 index 5de21b277349..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/ClusterUDTFQueryExecutorTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.UDTFPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.read.common.RowRecord; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import junit.framework.TestCase; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.time.ZoneId; - -import static junit.framework.TestCase.assertEquals; - -public class ClusterUDTFQueryExecutorTest extends BaseQueryTest { - - private ClusterUDTFQueryExecutor executor; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - } - - @Test - public void testWithoutValueFilterAlignByTime() - throws QueryProcessException, StorageEngineException { - ClusterPlanner processor = new ClusterPlanner(); - String sqlStr = "select sin(s0) from root.*"; - PhysicalPlan plan = - processor.parseSQLToPhysicalPlan( - sqlStr, ZoneId.systemDefault(), IoTDBConstant.ClientVersion.V_0_13); - UDTFPlan udtfPlan = (UDTFPlan) plan; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - executor = new ClusterUDTFQueryExecutor(udtfPlan, testMetaMember); - QueryDataSet queryDataSet = executor.executeWithoutValueFilterAlignByTime(context); - checkSequentialDatasetWithMathFunction(queryDataSet, 0, 20, Math::sin); - } catch (StorageEngineException | IOException | InterruptedException e) { - e.printStackTrace(); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testWithValueFilterAlignByTime() - throws IOException, StorageEngineException, QueryProcessException { - ClusterPlanner processor = new ClusterPlanner(); - String sqlStr = "select sin(s0) from root.* where time >= 5"; - PhysicalPlan plan = - processor.parseSQLToPhysicalPlan( - sqlStr, ZoneId.systemDefault(), IoTDBConstant.ClientVersion.V_0_13); - UDTFPlan udtfPlan = (UDTFPlan) plan; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - executor = new ClusterUDTFQueryExecutor(udtfPlan, testMetaMember); - QueryDataSet queryDataSet = executor.executeWithoutValueFilterAlignByTime(context); - checkSequentialDatasetWithMathFunction(queryDataSet, 5, 15, Math::sin); - } catch (QueryProcessException | InterruptedException e) { - e.printStackTrace(); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - private interface MathFunctionProxy { - double invoke(double x); - } - - void checkSequentialDatasetWithMathFunction( - QueryDataSet dataSet, int offset, int size, MathFunctionProxy functionProxy) - throws IOException { - for (int i = offset; i < offset + size; i++) { - TestCase.assertTrue(dataSet.hasNext()); - RowRecord record = dataSet.next(); - assertEquals(i, record.getTimestamp()); - assertEquals(10, record.getFields().size()); - for (int j = 0; j < 10; j++) { - assertEquals( - functionProxy.invoke(i * 1.0), record.getFields().get(j).getDoubleV(), 0.00001); - } - } - TestCase.assertFalse(dataSet.hasNext()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/LoadConfigurationTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/LoadConfigurationTest.java deleted file mode 100644 index 48d6653f0ef5..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/LoadConfigurationTest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.iotdb.cluster.query; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.constant.TestConstant; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.logical.sys.LoadConfigurationOperator; -import org.apache.iotdb.db.qp.physical.sys.LoadConfigurationPlan; -import org.apache.iotdb.db.qp.strategy.PhysicalGenerator; -import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer; -import org.apache.iotdb.tsfile.fileSystem.fsFactory.FSFactory; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.powermock.api.mockito.PowerMockito; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import java.io.File; -import java.io.FileWriter; -import java.net.URL; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.when; - -@PowerMockIgnore({"com.sun.org.apache.xerces.*", "javax.xml.*", "org.xml.*", "javax.management.*"}) -@RunWith(PowerMockRunner.class) -@PrepareForTest({IoTDBDescriptor.class, ClusterDescriptor.class}) -public class LoadConfigurationTest { - - private static FSFactory fsFactory = FSFactoryProducer.getFSFactory(); - - private static final String ENGINE_PROPERTIES_FILE = - TestConstant.BASE_OUTPUT_PATH.concat("LoadConfigurationTestEngineProperties"); - private static final String CLUSTER_PROPERTIES_FILE = - TestConstant.BASE_OUTPUT_PATH.concat("LoadConfigurationTestClusterProperties"); - - @Mock private IoTDBDescriptor ioTDBDescriptor; - - @Mock private ClusterDescriptor clusterDescriptor; - - @Before - public void setUp() throws Exception { - // init engine properties - File engineFile = fsFactory.getFile(ENGINE_PROPERTIES_FILE); - if (engineFile.exists()) { - Assert.assertTrue(engineFile.delete()); - } - try (FileWriter fw = new FileWriter(engineFile)) { - fw.write("enable_metric_service=false"); - } - ioTDBDescriptor = PowerMockito.mock(IoTDBDescriptor.class); - PowerMockito.mockStatic(IoTDBDescriptor.class); - PowerMockito.doReturn(ioTDBDescriptor).when(IoTDBDescriptor.class, "getInstance"); - when(ioTDBDescriptor.getPropsUrl()).thenReturn(new URL("file:" + ENGINE_PROPERTIES_FILE)); - - // init cluster properties - File clusterFile = fsFactory.getFile(CLUSTER_PROPERTIES_FILE); - if (clusterFile.exists()) { - Assert.assertTrue(clusterFile.delete()); - } - try (FileWriter fw = new FileWriter(clusterFile)) { - fw.write("cluster_rpc_ip=127.0.0.1"); - } - clusterDescriptor = PowerMockito.mock(ClusterDescriptor.class); - PowerMockito.mockStatic(ClusterDescriptor.class); - PowerMockito.doReturn(clusterDescriptor).when(ClusterDescriptor.class, "getInstance"); - when(clusterDescriptor.getPropsUrl()).thenReturn(CLUSTER_PROPERTIES_FILE); - } - - @After - public void tearDown() { - File engineFile = fsFactory.getFile(ENGINE_PROPERTIES_FILE); - if (engineFile.exists()) { - Assert.assertTrue(engineFile.delete()); - } - File clusterFile = fsFactory.getFile(CLUSTER_PROPERTIES_FILE); - if (clusterFile.exists()) { - Assert.assertTrue(clusterFile.delete()); - } - } - - @Test - public void testLoadConfigurationGlobal() throws QueryProcessException { - PhysicalGenerator physicalGenerator = new ClusterPhysicalGenerator(); - LoadConfigurationOperator loadConfigurationOperator = - new LoadConfigurationOperator( - LoadConfigurationOperator.LoadConfigurationOperatorType.GLOBAL); - - LoadConfigurationPlan loadConfigurationPlan = - (LoadConfigurationPlan) - physicalGenerator.transformToPhysicalPlan(loadConfigurationOperator); - String metricProperties = - (String) loadConfigurationPlan.getIoTDBProperties().get("enable_metric_service"); - assertEquals("false", metricProperties); - String clusterIp = (String) loadConfigurationPlan.getClusterProperties().get("cluster_rpc_ip"); - assertEquals("127.0.0.1", clusterIp); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutorTest.java deleted file mode 100644 index fe6c873475ad..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/fill/ClusterFillExecutorTest.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.fill; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.crud.FillQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.executor.fill.IFill; -import org.apache.iotdb.db.query.executor.fill.LinearFill; -import org.apache.iotdb.db.query.executor.fill.PreviousFill; -import org.apache.iotdb.db.query.executor.fill.ValueFill; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.junit.Test; - -import java.io.IOException; -import java.util.Collections; -import java.util.Map; - -import static org.junit.Assert.assertFalse; - -public class ClusterFillExecutorTest extends BaseQueryTest { - - @Test - public void testPreviousFill() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - FillQueryPlan plan = new FillQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - long defaultFillInterval = IoTDBDescriptor.getInstance().getConfig().getDefaultFillInterval(); - Map tsDataTypeIFillMap = - Collections.singletonMap( - TSDataType.DOUBLE, new PreviousFill(TSDataType.DOUBLE, 0, defaultFillInterval)); - plan.setFillType(tsDataTypeIFillMap); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - ClusterFillExecutor fillExecutor; - QueryDataSet queryDataSet; - long[] queryTimes = new long[] {-1, 0, 5, 10, 20}; - Object[][] answers = - new Object[][] { - new Object[] {null}, - new Object[] {0.0}, - new Object[] {0.0}, - new Object[] {10.0}, - new Object[] {10.0}, - }; - for (int i = 0; i < queryTimes.length; i++) { - plan.setQueryTime(queryTimes[i]); - fillExecutor = new ClusterFillExecutor(plan, testMetaMember); - queryDataSet = fillExecutor.execute(context); - checkDoubleDataset(queryDataSet, answers[i]); - assertFalse(queryDataSet.hasNext()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testLinearFill() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - FillQueryPlan plan = new FillQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - long defaultFillInterval = IoTDBDescriptor.getInstance().getConfig().getDefaultFillInterval(); - Map tsDataTypeIFillMap = - Collections.singletonMap( - TSDataType.DOUBLE, - new LinearFill(TSDataType.DOUBLE, 0, defaultFillInterval, defaultFillInterval)); - plan.setFillType(tsDataTypeIFillMap); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - ClusterFillExecutor fillExecutor; - QueryDataSet queryDataSet; - long[] queryTimes = new long[] {-1, 0, 5, 10, 20}; - Object[][] answers = - new Object[][] { - new Object[] {null}, - new Object[] {0.0}, - new Object[] {5.0}, - new Object[] {10.0}, - new Object[] {null}, - }; - for (int i = 0; i < queryTimes.length; i++) { - plan.setQueryTime(queryTimes[i]); - fillExecutor = new ClusterFillExecutor(plan, testMetaMember); - queryDataSet = fillExecutor.execute(context); - checkDoubleDataset(queryDataSet, answers[i]); - assertFalse(queryDataSet.hasNext()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testValueFill() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - FillQueryPlan plan = new FillQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - double fillValue = 1.0D; - Map tsDataTypeIFillMap = - Collections.singletonMap( - TSDataType.DOUBLE, new ValueFill(Double.toString(fillValue), TSDataType.DOUBLE)); - plan.setFillType(tsDataTypeIFillMap); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - ClusterFillExecutor fillExecutor; - QueryDataSet queryDataSet; - long[] queryTimes = new long[] {-1, 0, 5, 10, 20}; - Object[][] answers = - new Object[][] { - new Object[] {1.0D}, - new Object[] {0.0D}, - new Object[] {1.0D}, - new Object[] {10.0D}, - new Object[] {1.0D}, - }; - for (int i = 0; i < queryTimes.length; i++) { - plan.setQueryTime(queryTimes[i]); - fillExecutor = new ClusterFillExecutor(plan, testMetaMember); - queryDataSet = fillExecutor.execute(context); - checkDoubleDataset(queryDataSet, answers[i]); - assertFalse(queryDataSet.hasNext()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSetTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSetTest.java deleted file mode 100644 index 39b53f829f57..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByNoVFilterDataSetTest.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.filter.GroupByFilter; - -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertFalse; - -public class ClusterGroupByNoVFilterDataSetTest extends BaseQueryTest { - - @Test - public void test() - throws StorageEngineException, IOException, QueryProcessException, IllegalPathException { - QueryContext queryContext = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - GroupByTimePlan groupByPlan = new GroupByTimePlan(); - List pathList = new ArrayList<>(); - List aggregations = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - pathList.add(new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE)); - aggregations.add(SQLConstant.COUNT); - } - groupByPlan.setPaths(pathList); - groupByPlan.setDeduplicatedPathsAndUpdate(pathList); - groupByPlan.setAggregations(aggregations); - groupByPlan.setDeduplicatedAggregations(aggregations); - - groupByPlan.setStartTime(0); - groupByPlan.setEndTime(20); - groupByPlan.setSlidingStep(5); - groupByPlan.setInterval(5); - groupByPlan.setExpression(new GlobalTimeExpression(new GroupByFilter(5, 5, 0, 20))); - - ClusterGroupByNoVFilterDataSet dataSet = - new ClusterGroupByNoVFilterDataSet(queryContext, groupByPlan, testMetaMember); - dataSet.initGroupBy(queryContext, groupByPlan); - Object[][] answers = - new Object[][] { - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - }; - for (Object[] answer : answers) { - checkDoubleDataset(dataSet, answer); - } - assertFalse(dataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(queryContext.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSetTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSetTest.java deleted file mode 100644 index 61b3bc5476b5..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/ClusterGroupByVFilterDataSetTest.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.constant.SQLConstant; -import org.apache.iotdb.db.qp.physical.crud.GroupByTimePlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; - -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertFalse; - -public class ClusterGroupByVFilterDataSetTest extends BaseQueryTest { - - @Test - public void test() - throws IOException, StorageEngineException, QueryProcessException, IllegalPathException { - QueryContext queryContext = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - GroupByTimePlan groupByPlan = new GroupByTimePlan(); - List pathList = new ArrayList<>(); - List aggregations = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - pathList.add(new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE)); - aggregations.add(SQLConstant.COUNT); - } - groupByPlan.setPaths(pathList); - groupByPlan.setDeduplicatedPathsAndUpdate(pathList); - groupByPlan.setAggregations(aggregations); - groupByPlan.setDeduplicatedAggregations(aggregations); - - groupByPlan.setStartTime(0); - groupByPlan.setEndTime(20); - groupByPlan.setSlidingStep(5); - groupByPlan.setInterval(5); - - IExpression expression = - BinaryExpression.and( - new SingleSeriesExpression( - new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE), - ValueFilter.gtEq(5.0)), - new SingleSeriesExpression( - new MeasurementPath(TestUtils.getTestSeries(5, 0), TSDataType.DOUBLE), - TimeFilter.ltEq(15))); - groupByPlan.setExpression(expression); - - ClusterGroupByVFilterDataSet dataSet = - new ClusterGroupByVFilterDataSet(queryContext, groupByPlan, testMetaMember); - dataSet.initGroupBy(queryContext, groupByPlan); - Object[][] answers = - new Object[][] { - new Object[] {0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, - new Object[] {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, - }; - for (Object[] answer : answers) { - checkDoubleDataset(dataSet, answer); - } - assertFalse(dataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(queryContext.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutorTest.java deleted file mode 100644 index 7cc7e565f8ac..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/MergeGroupByExecutorTest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.aggregation.AggregationType; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.factory.AggregateResultFactory; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import org.junit.Test; - -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class MergeGroupByExecutorTest extends BaseQueryTest { - - @Test - public void testNoTimeFilter() - throws QueryProcessException, IOException, IllegalPathException, StorageEngineException { - PartialPath path = new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE); - TSDataType dataType = TSDataType.DOUBLE; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - Filter timeFilter = null; - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(path.getMeasurement()); - - MergeGroupByExecutor groupByExecutor = - new MergeGroupByExecutor( - path, deviceMeasurements, context, timeFilter, testMetaMember, true); - AggregationType[] types = AggregationType.values(); - for (AggregationType type : types) { - groupByExecutor.addAggregateResult( - AggregateResultFactory.getAggrResultByType(type, TSDataType.DOUBLE, true)); - } - Object[] answers; - List aggregateResults; - - answers = new Object[] {5.0, 2.0, 10.0, 0.0, 4.0, 4.0, 0.0, 4.0, 0.0, 4.0}; - aggregateResults = groupByExecutor.calcResult(0, 5); - checkAggregations(aggregateResults, answers); - - answers = new Object[] {5.0, 7.0, 35.0, 5.0, 9.0, 9.0, 5.0, 9.0, 5.0, 9.0}; - aggregateResults = groupByExecutor.calcResult(5, 10); - checkAggregations(aggregateResults, answers); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testTimeFilter() - throws QueryProcessException, IOException, IllegalPathException, StorageEngineException { - PartialPath path = new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE); - TSDataType dataType = TSDataType.DOUBLE; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - Filter timeFilter = TimeFilter.gtEq(3); - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(path.getMeasurement()); - - MergeGroupByExecutor groupByExecutor = - new MergeGroupByExecutor( - path, deviceMeasurements, context, timeFilter, testMetaMember, true); - AggregationType[] types = AggregationType.values(); - for (AggregationType type : types) { - groupByExecutor.addAggregateResult( - AggregateResultFactory.getAggrResultByType(type, TSDataType.DOUBLE, true)); - } - - Object[] answers; - List aggregateResults; - answers = new Object[] {2.0, 3.5, 7.0, 3.0, 4.0, 4.0, 3.0, 4.0, 3.0, 4.0}; - aggregateResults = groupByExecutor.calcResult(0, 5); - checkAggregations(aggregateResults, answers); - - answers = new Object[] {5.0, 7.0, 35.0, 5.0, 9.0, 9.0, 5.0, 9.0, 5.0, 9.0}; - aggregateResults = groupByExecutor.calcResult(5, 10); - checkAggregations(aggregateResults, answers); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutorTest.java deleted file mode 100644 index e4f0bf6516de..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/groupby/RemoteGroupByExecutorTest.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.groupby; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.query.reader.EmptyReader; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.aggregation.AggregationType; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.executor.groupby.GroupByExecutor; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; - -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import static org.junit.Assert.assertTrue; - -public class RemoteGroupByExecutorTest extends BaseQueryTest { - - @Test - public void testNoTimeFilter() - throws QueryProcessException, IOException, StorageEngineException, IllegalPathException { - PartialPath path = new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE); - TSDataType dataType = TSDataType.DOUBLE; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - Filter timeFilter = null; - List aggregationTypes = new ArrayList<>(); - for (int i = 0; i < AggregationType.values().length; i++) { - aggregationTypes.add(i); - } - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(path.getMeasurement()); - - ClusterReaderFactory readerFactory = new ClusterReaderFactory(testMetaMember); - List groupByExecutors = - readerFactory.getGroupByExecutors( - path, deviceMeasurements, dataType, context, timeFilter, aggregationTypes, true); - - for (int i = 0; i < groupByExecutors.size(); i++) { - GroupByExecutor groupByExecutor = groupByExecutors.get(i); - Object[] answers; - if (groupByExecutors.size() == 1) { - // a series is only managed by one group - List aggregateResults; - answers = new Object[] {5.0, 2.0, 10.0, 0.0, 4.0, 4.0, 0.0, 4.0, 0.0, 4.0}; - aggregateResults = groupByExecutor.calcResult(0, 5); - checkAggregations(aggregateResults, answers); - - answers = new Object[] {5.0, 7.0, 35.0, 5.0, 9.0, 9.0, 5.0, 9.0, 5.0, 9.0}; - aggregateResults = groupByExecutor.calcResult(5, 10); - checkAggregations(aggregateResults, answers); - } else { - List aggregateResults; - answers = new Object[] {0.0, null, 0.0, null, null, null, null, null, null}; - aggregateResults = groupByExecutor.calcResult(0, 5); - if (!(groupByExecutor instanceof EmptyReader)) { - checkAggregations(aggregateResults, answers); - } else { - assertTrue(aggregateResults.isEmpty()); - } - - answers = new Object[] {0.0, null, 0.0, null, null, null, null, null, null}; - aggregateResults = groupByExecutor.calcResult(5, 10); - if (!(groupByExecutor instanceof EmptyReader)) { - checkAggregations(aggregateResults, answers); - } else { - assertTrue(aggregateResults.isEmpty()); - } - } - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testTimeFilter() - throws QueryProcessException, IOException, StorageEngineException, IllegalPathException { - PartialPath path = new MeasurementPath(TestUtils.getTestSeries(0, 0), TSDataType.DOUBLE); - TSDataType dataType = TSDataType.DOUBLE; - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - Filter timeFilter = TimeFilter.gtEq(3); - List aggregationTypes = new ArrayList<>(); - for (int i = 0; i < AggregationType.values().length; i++) { - aggregationTypes.add(i); - } - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(path.getMeasurement()); - - ClusterReaderFactory readerFactory = new ClusterReaderFactory(testMetaMember); - List groupByExecutors = - readerFactory.getGroupByExecutors( - path, deviceMeasurements, dataType, context, timeFilter, aggregationTypes, true); - - for (int i = 0; i < groupByExecutors.size(); i++) { - GroupByExecutor groupByExecutor = groupByExecutors.get(i); - Object[] answers; - if (groupByExecutors.size() == 1) { - // a series is only managed by one group - List aggregateResults; - answers = - new Object[] { - 2.0, 3.5, 7.0, 3.0, 4.0, 4.0, 3.0, 4.0, 3.0, 4.0, - }; - aggregateResults = groupByExecutor.calcResult(0, 5); - checkAggregations(aggregateResults, answers); - - answers = new Object[] {5.0, 7.0, 35.0, 5.0, 9.0, 9.0, 5.0, 9.0, 5.0, 9.0}; - aggregateResults = groupByExecutor.calcResult(5, 10); - checkAggregations(aggregateResults, answers); - } else { - List aggregateResults; - answers = new Object[] {0.0, null, 0.0, null, null, null, null, null, null}; - aggregateResults = groupByExecutor.calcResult(0, 5); - if (!(groupByExecutor instanceof EmptyReader)) { - checkAggregations(aggregateResults, answers); - } else { - assertTrue(aggregateResults.isEmpty()); - } - - answers = new Object[] {0.0, null, 0.0, null, null, null, null, null, null}; - aggregateResults = groupByExecutor.calcResult(5, 10); - if (!(groupByExecutor instanceof EmptyReader)) { - checkAggregations(aggregateResults, answers); - } else { - assertTrue(aggregateResults.isEmpty()); - } - } - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutorTest.java deleted file mode 100644 index 0cbd269cfa77..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/last/ClusterLastQueryExecutorTest.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.last; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.crud.LastQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.expression.ResultColumn; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.GlobalTimeExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.query.dataset.QueryDataSet; - -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class ClusterLastQueryExecutorTest extends BaseQueryTest { - - @Test - public void testLastQueryTimeFilter() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - LastQueryPlan plan = new LastQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - IExpression expression = new GlobalTimeExpression(TimeFilter.gtEq(Long.MAX_VALUE)); - plan.setExpression(expression); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - ClusterLastQueryExecutor lastQueryExecutor = - new ClusterLastQueryExecutor(plan, testMetaMember); - QueryDataSet queryDataSet = lastQueryExecutor.execute(context, plan); - assertFalse(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testLastQueryNoTimeFilter() - throws QueryProcessException, StorageEngineException, IOException, IllegalPathException { - LastQueryPlan plan = new LastQueryPlan(); - plan.setDeduplicatedPathsAndUpdate( - Collections.singletonList( - new MeasurementPath(TestUtils.getTestSeries(0, 10), TSDataType.DOUBLE))); - plan.setPaths(plan.getDeduplicatedPaths()); - List resultColumnList = new ArrayList<>(); - resultColumnList.add(new ResultColumn(null, "a")); - plan.setResultColumns(resultColumnList); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - ClusterLastQueryExecutor lastQueryExecutor = - new ClusterLastQueryExecutor(plan, testMetaMember); - QueryDataSet queryDataSet = lastQueryExecutor.execute(context, plan); - assertTrue(queryDataSet.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManagerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManagerTest.java deleted file mode 100644 index a50b03758c55..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/ClusterQueryManagerTest.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.manage; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.reader.series.IAggregateReader; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IBatchReader; - -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; - -public class ClusterQueryManagerTest { - - private ClusterQueryManager queryManager; - - @Before - public void setUp() { - queryManager = new ClusterQueryManager(); - } - - @Test - public void testContext() { - RemoteQueryContext queryContext1 = queryManager.getQueryContext(TestUtils.getNode(0), 1); - RemoteQueryContext queryContext2 = queryManager.getQueryContext(TestUtils.getNode(0), 1); - RemoteQueryContext queryContext3 = queryManager.getQueryContext(TestUtils.getNode(1), 1); - assertSame(queryContext1, queryContext2); - assertNotEquals(queryContext2, queryContext3); - } - - @Test - public void testRegisterReader() { - IBatchReader reader = - new IBatchReader() { - @Override - public boolean hasNextBatch() { - return false; - } - - @Override - public BatchData nextBatch() { - return null; - } - - @Override - public void close() {} - }; - long id = queryManager.registerReader(reader); - assertSame(reader, queryManager.getReader(id)); - } - - @Test - public void testRegisterReaderByTime() { - IReaderByTimestamp reader = (timestamp, length) -> null; - long id = queryManager.registerReaderByTime(reader); - assertSame(reader, queryManager.getReaderByTimestamp(id)); - } - - @Test - public void testRegisterAggregateReader() { - IAggregateReader reader = - new IAggregateReader() { - @Override - public boolean hasNextFile() { - return false; - } - - @Override - public boolean canUseCurrentFileStatistics() { - return false; - } - - @Override - public Statistics currentFileStatistics() { - return null; - } - - @Override - public void skipCurrentFile() {} - - @Override - public boolean hasNextChunk() { - return false; - } - - @Override - public boolean canUseCurrentChunkStatistics() { - return false; - } - - @Override - public Statistics currentChunkStatistics() { - return null; - } - - @Override - public void skipCurrentChunk() {} - - @Override - public boolean hasNextPage() { - return false; - } - - @Override - public boolean canUseCurrentPageStatistics() { - return false; - } - - @Override - public Statistics currentPageStatistics() { - return null; - } - - @Override - public void skipCurrentPage() {} - - @Override - public BatchData nextPage() { - return null; - } - - @Override - public boolean isAscending() { - return false; - } - }; - long id = queryManager.registerAggrReader(reader); - assertSame(reader, queryManager.getAggrReader(id)); - } - - @Test - public void testEndQuery() throws StorageEngineException { - RemoteQueryContext queryContext = queryManager.getQueryContext(TestUtils.getNode(0), 1); - for (int i = 0; i < 10; i++) { - IBatchReader reader = - new IBatchReader() { - @Override - public boolean hasNextBatch() { - return false; - } - - @Override - public BatchData nextBatch() { - return null; - } - - @Override - public void close() {} - }; - queryContext.registerLocalReader(queryManager.registerReader(reader)); - } - queryManager.endQuery(TestUtils.getNode(0), 1); - for (int i = 0; i < 10; i++) { - assertNull(queryManager.getReader(i)); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/QueryCoordinatorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/QueryCoordinatorTest.java deleted file mode 100644 index 23752a7fcd8b..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/manage/QueryCoordinatorTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.manage; - -import org.apache.iotdb.cluster.common.TestAsyncMetaClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.monitor.NodeStatus; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TBinaryProtocol.Factory; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -@SuppressWarnings({"java:S2925"}) -public class QueryCoordinatorTest { - - private Map nodeStatusMap; - private Map nodeLatencyMap; - private QueryCoordinator coordinator = QueryCoordinator.getINSTANCE(); - private boolean prevUseAsyncServer; - - @Before - public void setUp() { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - nodeStatusMap = new HashMap<>(); - nodeLatencyMap = new HashMap<>(); - for (int i = 0; i < 5; i++) { - NodeStatus status = new NodeStatus(); - TNodeStatus nodeStatus = new TNodeStatus(); - status.setStatus(nodeStatus); - status.setLastResponseLatency(i); - Node node = TestUtils.getNode(i); - nodeStatusMap.put(node, status); - // nodes with smaller num have lower latency - nodeLatencyMap.put(node, i * 200L); - } - - MetaGroupMember metaGroupMember = - new MetaGroupMember() { - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncMetaClient(new Factory(), null, node) { - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - Thread.sleep(nodeLatencyMap.get(getNode())); - } catch (InterruptedException e) { - // ignored - } - - resultHandler.onComplete(nodeStatusMap.get(getNode()).getStatus()); - }) - .start(); - } - }; - } catch (IOException e) { - fail(e.getMessage()); - return null; - } - } - }; - NodeStatusManager.getINSTANCE().setMetaGroupMember(metaGroupMember); - NodeStatusManager.getINSTANCE().clear(); - } - - @After - public void tearDown() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void test() { - List orderedNodes = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - orderedNodes.add(TestUtils.getNode(i)); - } - List unorderedNodes = new ArrayList<>(orderedNodes); - Collections.shuffle(unorderedNodes); - - List reorderedNodes = coordinator.reorderNodes(unorderedNodes); - assertEquals(orderedNodes, reorderedNodes); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactoryTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactoryTest.java deleted file mode 100644 index 59428ab7777f..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterReaderFactoryTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.reader.series.SeriesRawDataBatchReader; - -import org.junit.Test; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; - -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -public class ClusterReaderFactoryTest extends BaseQueryTest { - - @Test - public void testTTL() - throws StorageEngineException, MetadataException, QueryProcessException, IOException { - - ClusterReaderFactory readerFactory = new ClusterReaderFactory(testMetaMember); - RemoteQueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - SeriesRawDataBatchReader seriesReader = - (SeriesRawDataBatchReader) - readerFactory.getSeriesBatchReader( - pathList.get(0), - new HashSet<>(Collections.singletonList(pathList.get(0).getMeasurement())), - pathList.get(0).getSeriesType(), - null, - null, - context, - dataGroupMemberMap.get(TestUtils.getRaftNode(10, 0)), - true, - null); - assertNotNull(seriesReader); - StorageEngine.getInstance().setTTL(new PartialPath(TestUtils.getTestSg(0)), 100); - seriesReader = - (SeriesRawDataBatchReader) - readerFactory.getSeriesBatchReader( - pathList.get(0), - new HashSet<>(Collections.singletonList(pathList.get(0).getMeasurement())), - pathList.get(0).getSeriesType(), - null, - null, - context, - dataGroupMemberMap.get(TestUtils.getRaftNode(10, 0)), - true, - null); - assertNull(seriesReader); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - StorageEngine.getInstance().setTTL(new PartialPath(TestUtils.getTestSg(0)), Long.MAX_VALUE); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGeneratorTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGeneratorTest.java deleted file mode 100644 index 18b7167d1a76..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/ClusterTimeGeneratorTest.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.query.BaseQueryTest; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.qp.physical.crud.RawDataQueryPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.expression.IExpression; -import org.apache.iotdb.tsfile.read.expression.impl.BinaryExpression; -import org.apache.iotdb.tsfile.read.expression.impl.SingleSeriesExpression; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; - -import org.junit.Test; - -import java.io.IOException; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class ClusterTimeGeneratorTest extends BaseQueryTest { - - @Test - public void test() - throws StorageEngineException, IOException, IllegalPathException, QueryProcessException { - RawDataQueryPlan dataQueryPlan = new RawDataQueryPlan(); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - IExpression expression = - BinaryExpression.and( - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), ValueFilter.gtEq(3.0)), - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(1, 1)), ValueFilter.ltEq(8.0))); - dataQueryPlan.setExpression(expression); - dataQueryPlan.addDeduplicatedPaths(new MeasurementPath(TestUtils.getTestSeries(0, 0))); - dataQueryPlan.addDeduplicatedPaths(new MeasurementPath(TestUtils.getTestSeries(1, 1))); - - ClusterTimeGenerator timeGenerator = - new ClusterTimeGenerator(context, testMetaMember, dataQueryPlan, false); - for (int i = 3; i <= 8; i++) { - assertTrue(timeGenerator.hasNext()); - assertEquals(i, timeGenerator.next()); - } - assertFalse(timeGenerator.hasNext()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testTimeFilter() - throws StorageEngineException, IOException, IllegalPathException, QueryProcessException { - RawDataQueryPlan dataQueryPlan = new RawDataQueryPlan(); - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - Filter valueFilter = ValueFilter.gtEq(3.0); - Filter timeFilter = TimeFilter.ltEq(8); - try { - IExpression expression = - new SingleSeriesExpression( - new PartialPath(TestUtils.getTestSeries(0, 0)), - new AndFilter(valueFilter, timeFilter)); - dataQueryPlan.setExpression(expression); - dataQueryPlan.addDeduplicatedPaths(new MeasurementPath(TestUtils.getTestSeries(0, 0))); - - // capture the time filter used to create a reader - AtomicReference timeFilterRef = new AtomicReference<>(null); - ClusterReaderFactory clusterReaderFactory = - new ClusterReaderFactory(testMetaMember) { - @Override - public ManagedSeriesReader getSeriesReader( - PartialPath path, - Set deviceMeasurements, - TSDataType dataType, - Filter timeFilter, - Filter valueFilter, - QueryContext context, - boolean ascending) - throws StorageEngineException, EmptyIntervalException { - timeFilterRef.set(timeFilter); - return super.getSeriesReader( - path, deviceMeasurements, dataType, timeFilter, valueFilter, context, ascending); - } - }; - ClusterTimeGenerator timeGenerator = - new ClusterTimeGenerator( - context, testMetaMember, clusterReaderFactory, dataQueryPlan, false); - - for (int i = 3; i <= 8; i++) { - assertTrue(timeGenerator.hasNext()); - assertEquals(i, timeGenerator.next()); - } - assertFalse(timeGenerator.hasNext()); - assertEquals(timeFilter, timeFilterRef.get()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/DatasourceInfoTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/DatasourceInfoTest.java deleted file mode 100644 index b066a21b24bd..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/DatasourceInfoTest.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -import static org.junit.Assert.assertFalse; - -public class DatasourceInfoTest { - private MetaGroupMember metaGroupMember; - private IClientManager clientManager; - - @Before - public void setUp() { - metaGroupMember = new TestMetaGroupMember(); - clientManager = ClusterIoTDB.getInstance().getClientManager(); - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new AsyncDataClient(null, null, TestUtils.getNode(0), null) { - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) - throws TException { - throw new TException("Don't worry, this is the exception I constructed."); - } - }; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - @After - public void tearDown() { - ClusterIoTDB.getInstance().setClientManager(clientManager); - } - - @Test - public void testFailedAll() throws StorageEngineException { - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - DataSourceInfo sourceInfo = - new DataSourceInfo(group, TSDataType.DOUBLE, request, context, group); - boolean hasClient = sourceInfo.hasNextDataClient(false, Long.MIN_VALUE); - - assertFalse(hasClient); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestampTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestampTest.java deleted file mode 100644 index a4a4eae39725..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSeriesReaderByTimestampTest.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -public class RemoteSeriesReaderByTimestampTest { - - private BatchData batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - private Set failedNodes = new ConcurrentSkipListSet<>(); - private boolean prevUseAsyncServer; - - @Before - public void setUp() { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new AsyncDataClient(null, null, node, ClientCategory.DATA) { - - @Override - public void querySingleSeriesByTimestamp( - SingleSeriesQueryRequest request, - org.apache.thrift.async.AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread(() -> resultHandler.onComplete(1L)).start(); - } - - @Override - public void fetchSingleSeriesByTimestamps( - RaftNode header, - long readerId, - List timestamps, - AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread( - () -> { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = - new DataOutputStream(byteArrayOutputStream); - Object[] results = new Object[timestamps.size()]; - for (int i = 0; i < timestamps.size(); i++) { - while (batchData.hasCurrent()) { - long currentTime = batchData.currentTime(); - if (currentTime == timestamps.get(i)) { - results[i] = batchData.currentValue(); - batchData.next(); - break; - } else if (currentTime > timestamps.get(i)) { - results[i] = null; - break; - } - // time < timestamp, continue - batchData.next(); - } - } - SerializeUtils.serializeObjects(results, dataOutputStream); - - resultHandler.onComplete( - ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - }) - .start(); - } - }; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - @After - public void tearDown() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - private MetaGroupMember metaGroupMember = new MetaGroupMember(); - - @Test - public void test() throws IOException, StorageEngineException { - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - DataSourceInfo sourceInfo = - new DataSourceInfo(group, TSDataType.DOUBLE, request, context, group); - sourceInfo.hasNextDataClient(true, Long.MIN_VALUE); - - RemoteSeriesReaderByTimestamp reader = new RemoteSeriesReaderByTimestamp(sourceInfo); - - long[] times = new long[100]; - for (int i = 0; i < 100; i++) { - times[i] = i; - } - Object[] results = reader.getValuesInTimestamps(times, times.length); - for (int i = 0; i < 100; i++) { - assertEquals(i * 1.0, results[i]); - } - times[0] = 101; - assertEquals(null, reader.getValuesInTimestamps(times, 1)[0]); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFailedNode() throws IOException, StorageEngineException { - batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - DataSourceInfo sourceInfo = - new DataSourceInfo(group, TSDataType.DOUBLE, request, context, group); - long startTime = System.currentTimeMillis(); - sourceInfo.hasNextDataClient(true, Long.MIN_VALUE); - RemoteSeriesReaderByTimestamp reader = new RemoteSeriesReaderByTimestamp(sourceInfo); - - long endTime = System.currentTimeMillis(); - System.out.println( - Thread.currentThread().getStackTrace()[1].getLineNumber() - + " begin: " - + (endTime - startTime)); - // normal read - assertEquals(TestUtils.getNode(0), sourceInfo.getCurrentNode()); - long[] times = new long[50]; - for (int i = 0; i < 50; i++) { - times[i] = i; - } - Object[] results = reader.getValuesInTimestamps(times, 50); - for (int i = 0; i < 50; i++) { - assertEquals(i * 1.0, results[i]); - } - - endTime = System.currentTimeMillis(); - System.out.println( - Thread.currentThread().getStackTrace()[1].getLineNumber() - + " begin: " - + (endTime - startTime)); - failedNodes.add(TestUtils.getNode(0)); - for (int i = 50; i < 80; i++) { - times[i - 50] = i; - } - results = reader.getValuesInTimestamps(times, 30); - for (int i = 50; i < 80; i++) { - assertEquals(i * 1.0, results[i - 50]); - } - assertEquals(TestUtils.getNode(1), sourceInfo.getCurrentNode()); - - // a bad client, change to another node again - failedNodes.add(TestUtils.getNode(1)); - for (int i = 80; i < 90; i++) { - times[i - 80] = i; - } - results = reader.getValuesInTimestamps(times, 10); - for (int i = 80; i < 90; i++) { - assertEquals(i * 1.0, results[i - 80]); - } - assertEquals(TestUtils.getNode(2), sourceInfo.getCurrentNode()); - - endTime = System.currentTimeMillis(); - System.out.println( - Thread.currentThread().getStackTrace()[1].getLineNumber() - + " begin: " - + (endTime - startTime)); - // all node failed - failedNodes.add(TestUtils.getNode(2)); - - try { - times[0] = 90; - reader.getValuesInTimestamps(times, 1); - fail(); - } catch (IOException e) { - assertEquals("no available client.", e.getMessage()); - } - endTime = System.currentTimeMillis(); - System.out.println( - Thread.currentThread().getStackTrace()[1].getLineNumber() - + " begin: " - + (endTime - startTime)); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReaderTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReaderTest.java deleted file mode 100644 index 7c23cb3ec39b..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/RemoteSimpleSeriesReaderTest.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.query.reader; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; -import static org.junit.Assert.fail; - -public class RemoteSimpleSeriesReaderTest { - - private RemoteSimpleSeriesReader reader; - private BatchData batchData; - private boolean batchUsed; - private MetaGroupMember metaGroupMember; - private Set failedNodes = new ConcurrentSkipListSet<>(); - private boolean prevUseAsyncServer; - - @Before - public void setUp() { - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - batchUsed = false; - metaGroupMember = new TestMetaGroupMember(); - // TODO fixme : restore normal provider - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new AsyncDataClient(null, null, node, ClientCategory.DATA) { - @Override - public void fetchSingleSeries( - RaftNode header, long readerId, AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread( - () -> { - if (batchUsed) { - resultHandler.onComplete(ByteBuffer.allocate(0)); - } else { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = - new DataOutputStream(byteArrayOutputStream); - SerializeUtils.serializeBatchData(batchData, dataOutputStream); - batchUsed = true; - resultHandler.onComplete( - ByteBuffer.wrap(byteArrayOutputStream.toByteArray())); - } - }) - .start(); - } - - @Override - public void querySingleSeries( - SingleSeriesQueryRequest request, AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread(() -> resultHandler.onComplete(1L)).start(); - } - }; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - @After - public void tearDown() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testSingle() throws IOException, StorageEngineException { - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - DataSourceInfo sourceInfo = - new DataSourceInfo(group, TSDataType.DOUBLE, request, context, group); - sourceInfo.hasNextDataClient(false, Long.MIN_VALUE); - - reader = new RemoteSimpleSeriesReader(sourceInfo); - - for (int i = 0; i < 100; i++) { - assertTrue(reader.hasNextTimeValuePair()); - TimeValuePair curr = reader.currentTimeValuePair(); - TimeValuePair pair = reader.nextTimeValuePair(); - assertEquals(pair, curr); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getDouble(), 0.00001); - } - assertFalse(reader.hasNextTimeValuePair()); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testFailedNode() throws IOException, StorageEngineException { - System.out.println("Start testFailedNode()"); - - batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - DataSourceInfo sourceInfo = - new DataSourceInfo(group, TSDataType.DOUBLE, request, context, group); - sourceInfo.hasNextDataClient(false, Long.MIN_VALUE); - reader = new RemoteSimpleSeriesReader(sourceInfo); - - // normal read - Assert.assertEquals(TestUtils.getNode(0), sourceInfo.getCurrentNode()); - for (int i = 0; i < 50; i++) { - assertTrue(reader.hasNextTimeValuePair()); - TimeValuePair curr = reader.currentTimeValuePair(); - TimeValuePair pair = reader.nextTimeValuePair(); - assertEquals(pair, curr); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getDouble(), 0.00001); - } - - this.batchUsed = false; - this.batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - // a bad client, change to another node - failedNodes.add(TestUtils.getNode(0)); - reader.clearCurDataForTest(); - for (int i = 50; i < 80; i++) { - TimeValuePair pair = reader.nextTimeValuePair(); - assertEquals(i - 50, pair.getTimestamp()); - assertEquals((i - 50) * 1.0, pair.getValue().getDouble(), 0.00001); - } - Assert.assertEquals(TestUtils.getNode(1), sourceInfo.getCurrentNode()); - - this.batchUsed = false; - this.batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - // a bad client, change to another node again - failedNodes.add(TestUtils.getNode(1)); - reader.clearCurDataForTest(); - for (int i = 80; i < 90; i++) { - TimeValuePair pair = reader.nextTimeValuePair(); - assertEquals(i - 80, pair.getTimestamp()); - assertEquals((i - 80) * 1.0, pair.getValue().getDouble(), 0.00001); - } - assertEquals(TestUtils.getNode(2), sourceInfo.getCurrentNode()); - - // all node failed - failedNodes.add(TestUtils.getNode(2)); - reader.clearCurDataForTest(); - try { - reader.nextTimeValuePair(); - fail(); - } catch (IOException e) { - assertEquals(e.getMessage(), "no available client."); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReaderTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReaderTest.java deleted file mode 100644 index de0d8ea762ab..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/AssignPathManagedMergeReaderTest.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -import static junit.framework.TestCase.assertEquals; - -public class AssignPathManagedMergeReaderTest { - - private AssignPathManagedMergeReader assignPathManagedMergeReader; - private RemoteMultSeriesReader reader; - private List batchData; - private boolean batchUsed; - private MetaGroupMember metaGroupMember; - private Set failedNodes = new ConcurrentSkipListSet<>(); - private boolean prevUseAsyncServer; - private List paths; - private List dataTypes; - - @Before - public void setUp() throws IllegalPathException { - paths = Lists.newArrayList(); - dataTypes = Lists.newArrayList(); - PartialPath partialPath = new PartialPath("root.a.b"); - paths.add(partialPath); - partialPath = new PartialPath("root.a.c"); - paths.add(partialPath); - dataTypes.add(TSDataType.DOUBLE); - dataTypes.add(TSDataType.INT32); - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - batchData = Lists.newArrayList(); - batchData.add(TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100)); - batchData.add(TestUtils.genBatchData(TSDataType.INT32, 0, 100)); - batchUsed = false; - metaGroupMember = new TestMetaGroupMember(); - assignPathManagedMergeReader = - new AssignPathManagedMergeReader("root.a.b", TSDataType.DOUBLE, true); - } - - @After - public void tearDown() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testMultManagerMergeRemoteSeriesReader() throws IOException, StorageEngineException { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - PartitionGroup group = new PartitionGroup(); - setAsyncDataClient(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - MultSeriesQueryRequest request = new MultSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - MultDataSourceInfo sourceInfo = - new MultDataSourceInfo(group, paths, dataTypes, request, context, group); - sourceInfo.hasNextDataClient(Long.MIN_VALUE); - - reader = new RemoteMultSeriesReader(sourceInfo); - assignPathManagedMergeReader.addReader(reader, 0); - - for (int i = 0; i < 100; i++) { - assertEquals(true, assignPathManagedMergeReader.hasNextTimeValuePair()); - TimeValuePair pair = assignPathManagedMergeReader.nextTimeValuePair(); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getDouble(), 0.00001); - } - assertEquals(false, assignPathManagedMergeReader.hasNextTimeValuePair()); - - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - private void setAsyncDataClient() { - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new AsyncDataClient(null, null, node, ClientCategory.DATA) { - @Override - public void fetchMultSeries( - RaftNode header, - long readerId, - List paths, - AsyncMethodCallback> resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread( - () -> { - Map stringByteBufferMap = Maps.newHashMap(); - if (batchUsed) { - paths.forEach( - path -> { - stringByteBufferMap.put(path, ByteBuffer.allocate(0)); - }); - } else { - batchUsed = true; - - for (int i = 0; i < batchData.size(); i++) { - stringByteBufferMap.put( - paths.get(i), generateByteBuffer(batchData.get(i))); - } - } - resultHandler.onComplete(stringByteBufferMap); - }) - .start(); - } - - @Override - public void queryMultSeries( - MultSeriesQueryRequest request, AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread(() -> resultHandler.onComplete(1L)).start(); - } - }; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - private ByteBuffer generateByteBuffer(BatchData batchData) { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - SerializeUtils.serializeBatchData(batchData, dataOutputStream); - ByteBuffer byteBuffer = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - return byteBuffer; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReaderTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReaderTest.java deleted file mode 100644 index 86299ac10972..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/MultSeriesRawDataPointReaderTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.reader.series.SeriesRawDataPointReader; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.reader.IPointReader; - -import com.google.common.collect.Maps; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.Map; - -import static junit.framework.TestCase.assertEquals; - -public class MultSeriesRawDataPointReaderTest { - - private MultSeriesRawDataPointReader reader; - - @Before - public void setUp() throws IllegalPathException, IOException { - BatchData batchData = TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100); - Map pointReaderMap = Maps.newHashMap(); - SeriesRawDataPointReader seriesRawDataBatchReader = - Mockito.mock(SeriesRawDataPointReader.class); - Mockito.when(seriesRawDataBatchReader.hasNextTimeValuePair()).thenReturn(true); - TimeValuePair timeValuePair = - new TimeValuePair(batchData.currentTime(), batchData.currentTsPrimitiveType()); - Mockito.when(seriesRawDataBatchReader.nextTimeValuePair()).thenReturn(timeValuePair); - pointReaderMap.put("root.a.b", seriesRawDataBatchReader); - pointReaderMap.put("root.a.c", seriesRawDataBatchReader); - reader = new MultSeriesRawDataPointReader(pointReaderMap); - } - - @Test - public void testMultSeriesReader() throws IOException, StorageEngineException { - boolean hasNext = this.reader.hasNextTimeValuePair("root.a.b"); - assertEquals(true, hasNext); - TimeValuePair timeValuePair = this.reader.nextTimeValuePair("root.a.b"); - assertEquals(0, timeValuePair.getTimestamp()); - assertEquals(0 * 1.0, timeValuePair.getValue().getDouble(), 0.0001); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReaderTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReaderTest.java deleted file mode 100644 index 79dde210a77e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/query/reader/mult/RemoteMultSeriesReaderTest.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.query.reader.mult; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.client.async.AsyncDataClient; -import org.apache.iotdb.cluster.client.sync.SyncDataClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.MultSeriesQueryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.ClientUtils; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.rpc.RpcTransportFactory; -import org.apache.iotdb.rpc.TConfigurationConst; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.TimeValuePair; -import org.apache.iotdb.tsfile.read.common.BatchData; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TCompactProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransportException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentSkipListSet; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; - -public class RemoteMultSeriesReaderTest { - - private RemoteMultSeriesReader reader; - private List batchData; - private boolean batchUsed; - private MetaGroupMember metaGroupMember; - private Set failedNodes = new ConcurrentSkipListSet<>(); - private boolean prevUseAsyncServer; - private List paths; - private List dataTypes; - private TProtocolFactory protocolFactory; - - @Before - public void setUp() throws IllegalPathException { - paths = Lists.newArrayList(); - dataTypes = Lists.newArrayList(); - PartialPath partialPath = new PartialPath("root.a.b"); - paths.add(partialPath); - partialPath = new PartialPath("root.a.c"); - paths.add(partialPath); - dataTypes.add(TSDataType.DOUBLE); - dataTypes.add(TSDataType.INT32); - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - batchData = Lists.newArrayList(); - batchData.add(TestUtils.genBatchData(TSDataType.DOUBLE, 0, 100)); - batchData.add(TestUtils.genBatchData(TSDataType.INT32, 0, 100)); - batchUsed = false; - metaGroupMember = new TestMetaGroupMember(); - protocolFactory = - ClusterDescriptor.getInstance().getConfig().isRpcThriftCompressionEnabled() - ? new TCompactProtocol.Factory() - : new TBinaryProtocol.Factory(); - } - - @After - public void tearDown() { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - } - - @Test - public void testAsyncMultSeriesReader() throws IOException, StorageEngineException { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - PartitionGroup group = new PartitionGroup(); - setAsyncDataClient(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - MultSeriesQueryRequest request = new MultSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - MultDataSourceInfo sourceInfo = - new MultDataSourceInfo(group, paths, dataTypes, request, context, group); - sourceInfo.hasNextDataClient(Long.MIN_VALUE); - - reader = new RemoteMultSeriesReader(sourceInfo); - - for (int i = 0; i < 100; i++) { - assertTrue(reader.hasNextTimeValuePair(paths.get(0).getFullPath())); - TimeValuePair pair = reader.nextTimeValuePair(paths.get(0).getFullPath()); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getDouble(), 0.00001); - } - assertFalse(reader.hasNextTimeValuePair(paths.get(0).getFullPath())); - - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testSyncMultSeriesReader() throws IOException, StorageEngineException { - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(false); - setSyncDataClient(); - PartitionGroup group = new PartitionGroup(); - group.add(TestUtils.getNode(0)); - group.add(TestUtils.getNode(1)); - group.add(TestUtils.getNode(2)); - - MultSeriesQueryRequest request = new MultSeriesQueryRequest(); - RemoteQueryContext context = new RemoteQueryContext(1); - - try { - MultDataSourceInfo sourceInfo = - new MultDataSourceInfo(group, paths, dataTypes, request, context, group); - sourceInfo.hasNextDataClient(Long.MIN_VALUE); - - reader = new RemoteMultSeriesReader(sourceInfo); - - for (int i = 0; i < 100; i++) { - assertTrue(reader.hasNextTimeValuePair(paths.get(0).getFullPath())); - TimeValuePair pair = reader.nextTimeValuePair(paths.get(0).getFullPath()); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getDouble(), 0.00001); - } - assertFalse(reader.hasNextTimeValuePair(paths.get(0).getFullPath())); - - for (int i = 0; i < 100; i++) { - assertTrue(reader.hasNextTimeValuePair(paths.get(1).getFullPath())); - TimeValuePair pair = reader.nextTimeValuePair(paths.get(1).getFullPath()); - assertEquals(i, pair.getTimestamp()); - assertEquals(i * 1.0, pair.getValue().getInt(), 0.00001); - } - assertFalse(reader.hasNextTimeValuePair(paths.get(1).getFullPath())); - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testDefaultBatchStrategySelect() { - RemoteMultSeriesReader.DefaultBatchStrategy defaultBatchStrategy = - new RemoteMultSeriesReader.DefaultBatchStrategy(); - Map cachedBatches = Maps.newHashMap(); - Queue queue = new ConcurrentLinkedQueue(); - batchData.forEach( - data -> { - queue.add(data); - }); - cachedBatches.put("root.a.b", queue); - assertEquals(1, defaultBatchStrategy.selectBatchPaths(cachedBatches).size()); - } - - private void setAsyncDataClient() { - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new AsyncDataClient(null, null, node, ClientCategory.DATA) { - @Override - public void fetchMultSeries( - RaftNode header, - long readerId, - List paths, - AsyncMethodCallback> resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread( - () -> { - Map stringByteBufferMap = Maps.newHashMap(); - if (batchUsed) { - paths.forEach( - path -> { - stringByteBufferMap.put(path, ByteBuffer.allocate(0)); - }); - } else { - batchUsed = true; - - for (int i = 0; i < batchData.size(); i++) { - stringByteBufferMap.put( - paths.get(i), generateByteBuffer(batchData.get(i))); - } - } - resultHandler.onComplete(stringByteBufferMap); - }) - .start(); - } - - @Override - public void queryMultSeries( - MultSeriesQueryRequest request, AsyncMethodCallback resultHandler) - throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - new Thread(() -> resultHandler.onComplete(1L)).start(); - } - }; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - private void setSyncDataClient() { - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return null; - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) - throws IOException { - try { - TSocket socket = - new TSocket( - TConfigurationConst.defaultTConfiguration, - node.getInternalIp(), - ClientUtils.getPort(node, category), - ClusterConstant.getConnectionTimeoutInMS()); - return new SyncDataClient( - protocolFactory.getProtocol( - RpcTransportFactory.INSTANCE.getTransport(socket))) { - @Override - public Map fetchMultSeries( - RaftNode header, long readerId, List paths) throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - Map stringByteBufferMap = Maps.newHashMap(); - if (batchUsed) { - paths.forEach( - path -> { - stringByteBufferMap.put(path, ByteBuffer.allocate(0)); - }); - } else { - batchUsed = true; - for (int i = 0; i < batchData.size(); i++) { - stringByteBufferMap.put( - paths.get(i), generateByteBuffer(batchData.get(i))); - } - } - return stringByteBufferMap; - } - - @Override - public long queryMultSeries(MultSeriesQueryRequest request) throws TException { - if (failedNodes.contains(node)) { - throw new TException("Node down."); - } - - return 1L; - } - }; - } catch (TTransportException e) { - throw new IOException(e); - } - } - - @Override - public void returnAsyncClient( - RaftService.AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - } - - private ByteBuffer generateByteBuffer(BatchData batchData) { - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - SerializeUtils.serializeBatchData(batchData, dataOutputStream); - ByteBuffer byteBuffer = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - return byteBuffer; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerTest.java deleted file mode 100644 index 6342ab720035..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServerTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.server.clusterinfo; - -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.rpc.thrift.ClusterInfoService; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.rpc.RpcTransportFactory; - -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -public class ClusterInfoServerTest { - - ClusterInfoServiceImplTest test; - ClusterInfoServer service; - - @Before - public void setUp() throws Exception { - test = new ClusterInfoServiceImplTest(); - test.setUp(); - service = new ClusterInfoServer(); - service.start(); - } - - @After - public void tearDown() throws MetadataException, IOException, StorageEngineException { - test.tearDown(); - service.stop(); - } - - @Test - public void testConnect() { - TTransport transport = null; - try { - transport = - RpcTransportFactory.INSTANCE.getTransport( - new TSocket( - IoTDBDescriptor.getInstance().getConfig().getRpcAddress(), - ClusterDescriptor.getInstance().getConfig().getClusterInfoRpcPort())); - } catch (TTransportException e) { - Assert.fail(e.getMessage()); - } - - try { - transport.open(); - } catch (TTransportException e) { - Assert.fail(e.getMessage()); - } - // connection success means OK. - ClusterInfoService.Client client = - new ClusterInfoService.Client(new TBinaryProtocol(transport)); - Assert.assertNotNull(client); - // client's methods have been tested on ClusterInfoServiceImplTest - transport.close(); - try { - transport = - RpcTransportFactory.INSTANCE.getTransport( - new TSocket( - IoTDBDescriptor.getInstance().getConfig().getRpcAddress(), - ClusterDescriptor.getInstance().getConfig().getClusterInfoRpcPort())); - transport.open(); - - // connection success means OK. - client = new ClusterInfoService.Client(new TBinaryProtocol(transport)); - Assert.assertNotNull(client); - // client's methods have been tested on ClusterInfoServiceImplTest - transport.close(); - } catch (TTransportException e) { - Assert.fail(e.getMessage()); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImplTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImplTest.java deleted file mode 100644 index 93e35c12da92..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/clusterinfo/ClusterInfoServiceImplTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.clusterinfo; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.rpc.thrift.DataPartitionEntry; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMemberTest; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.TException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -public class ClusterInfoServiceImplTest { - - ClusterInfoServiceImpl impl; - - @Before - public void setUp() throws Exception { - MetaGroupMemberTest metaGroupMemberTest = new MetaGroupMemberTest(); - // will create a cluster with 10 nodes, ip: 0,10,20,...100 - metaGroupMemberTest.setUp(); - MetaGroupMember metaGroupMember = metaGroupMemberTest.getTestMetaGroupMember(); - - ClusterIoTDB.getInstance().setMetaGroupMember(metaGroupMember); - - ClusterIoTDB.getInstance() - .getIotdb() - .schemaProcessor - .setStorageGroup(new PartialPath("root", "sg")); - // metaClusterServer.getMember() - impl = new ClusterInfoServiceImpl(); - } - - @After - public void tearDown() throws MetadataException, IOException, StorageEngineException { - ClusterIoTDB.getInstance() - .getIotdb() - .schemaProcessor - .deleteStorageGroups(Collections.singletonList(new PartialPath("root", "sg"))); - ClusterIoTDB.getInstance().getMetaGroupMember().stop(); - EnvironmentUtils.cleanEnv(); - } - - @Test - public void getRing() throws TException { - List nodes = impl.getRing(); - Assert.assertEquals(10, nodes.size()); - } - - @Test - public void getDataPartition() { - List entries = impl.getDataPartition("root.sg", 0, 100); - Assert.assertEquals(1, entries.size()); - List nodes = entries.get(0).getNodes(); - Assert.assertEquals(50, nodes.get(0).getNodeIdentifier()); - Assert.assertEquals(60, nodes.get(1).getNodeIdentifier()); - } - - @Test - public void getMetaPartition() throws TException { - List nodes = impl.getMetaPartition("root.sg"); - Assert.assertEquals(50, nodes.get(0).getNodeIdentifier()); - Assert.assertEquals(60, nodes.get(1).getNodeIdentifier()); - } - - @Test - public void getInstrumentingInfo() throws TException { - // hard to test the content of the instrumentInfo. - Assert.assertNotNull(impl.getInstrumentingInfo()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandlerTest.java deleted file mode 100644 index e468560c6e43..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendGroupEntryHandlerTest.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestLog; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class AppendGroupEntryHandlerTest { - - private int REPLICATION_NUM; - private int prevReplicationNum; - private RaftMember member; - - @Before - public void setUp() { - prevReplicationNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(2); - REPLICATION_NUM = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - member = new TestMetaGroupMember(); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - ClusterDescriptor.getInstance().getConfig().setReplicationNum(prevReplicationNum); - member.stop(); - member.closeLogManager(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testAgreement() throws InterruptedException { - int[] groupReceivedCounter = new int[10]; - for (int i = 0; i < 10; i++) { - groupReceivedCounter[i] = REPLICATION_NUM / 2; - } - AtomicBoolean leadershipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(-1); - Log testLog = new TestLog(); - synchronized (groupReceivedCounter) { - for (int i = 0; i < 10; i += 2) { - AppendGroupEntryHandler handler = - new AppendGroupEntryHandler( - groupReceivedCounter, - i, - TestUtils.getNode(i), - leadershipStale, - testLog, - newLeaderTerm, - member); - new Thread(() -> handler.onComplete(Response.RESPONSE_AGREE)).start(); - } - groupReceivedCounter.wait(); - } - for (int i = 0; i < 10; i++) { - assertEquals(0, groupReceivedCounter[i]); - } - assertFalse(leadershipStale.get()); - assertEquals(-1, newLeaderTerm.get()); - } - - @Test - public void testNoAgreement() throws InterruptedException { - int[] groupReceivedCounter = new int[10]; - for (int i = 0; i < 10; i++) { - groupReceivedCounter[i] = REPLICATION_NUM; - } - AtomicBoolean leadershipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(-1); - Log testLog = new TestLog(); - synchronized (groupReceivedCounter) { - for (int i = 0; i < 5; i++) { - AppendGroupEntryHandler handler = - new AppendGroupEntryHandler( - groupReceivedCounter, - i, - TestUtils.getNode(i), - leadershipStale, - testLog, - newLeaderTerm, - member); - handler.onComplete(Response.RESPONSE_AGREE); - } - } - for (int i = 0; i < 10; i++) { - if (i < 5) { - assertEquals(Math.max(0, REPLICATION_NUM - (5 - i)), groupReceivedCounter[i]); - } else { - assertEquals(Math.min(10 - i, REPLICATION_NUM), groupReceivedCounter[i]); - } - } - assertFalse(leadershipStale.get()); - assertEquals(-1, newLeaderTerm.get()); - } - - @Test - public void testLeadershipStale() throws InterruptedException { - int[] groupReceivedCounter = new int[10]; - for (int i = 0; i < 10; i++) { - groupReceivedCounter[i] = REPLICATION_NUM / 2; - } - AtomicBoolean leadershipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(-1); - Log testLog = new TestLog(); - synchronized (groupReceivedCounter) { - AppendGroupEntryHandler handler = - new AppendGroupEntryHandler( - groupReceivedCounter, - 0, - TestUtils.getNode(0), - leadershipStale, - testLog, - newLeaderTerm, - member); - new Thread(() -> handler.onComplete(100L)).start(); - groupReceivedCounter.wait(); - } - for (int i = 0; i < 10; i++) { - assertEquals(REPLICATION_NUM / 2, groupReceivedCounter[i]); - } - assertTrue(leadershipStale.get()); - assertEquals(100, newLeaderTerm.get()); - } - - @Test - public void testError() throws InterruptedException { - int[] groupReceivedCounter = new int[10]; - for (int i = 0; i < 10; i++) { - groupReceivedCounter[i] = REPLICATION_NUM / 2; - } - AtomicBoolean leadershipStale = new AtomicBoolean(false); - AtomicLong newLeaderTerm = new AtomicLong(-1); - Log testLog = new TestLog(); - - AppendGroupEntryHandler handler = - new AppendGroupEntryHandler( - groupReceivedCounter, - 0, - TestUtils.getNode(0), - leadershipStale, - testLog, - newLeaderTerm, - member); - handler.onError(new TestException()); - - for (int i = 0; i < 10; i++) { - assertEquals(REPLICATION_NUM / 2, groupReceivedCounter[i]); - } - assertFalse(leadershipStale.get()); - assertEquals(-1, newLeaderTerm.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandlerTest.java deleted file mode 100644 index 6ee92f50d1ed..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/AppendNodeEntryHandlerTest.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestLog; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.cluster.server.monitor.Peer; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class AppendNodeEntryHandlerTest { - - private RaftMember member; - - @Before - public void setUp() { - this.member = new TestMetaGroupMember(); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - member.closeLogManager(); - member.stop(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testAgreement() throws InterruptedException { - AtomicLong receiverTerm = new AtomicLong(-1); - AtomicBoolean leadershipStale = new AtomicBoolean(false); - Log log = new TestLog(); - - int replicationNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - try { - ClusterDescriptor.getInstance().getConfig().setReplicationNum(10); - AtomicInteger quorum = new AtomicInteger(5); - Peer peer = new Peer(1); - synchronized (quorum) { - for (int i = 0; i < 10; i++) { - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setLeaderShipStale(leadershipStale); - handler.setVoteCounter(quorum); - handler.setLog(log); - handler.setMember(member); - handler.setReceiverTerm(receiverTerm); - handler.setReceiver(TestUtils.getNode(i)); - handler.setPeer(peer); - long resp = i >= 5 ? Response.RESPONSE_AGREE : Response.RESPONSE_LOG_MISMATCH; - new Thread(() -> handler.onComplete(resp)).start(); - } - quorum.wait(); - } - assertEquals(-1, receiverTerm.get()); - assertFalse(leadershipStale.get()); - assertEquals(0, quorum.get()); - } finally { - ClusterDescriptor.getInstance().getConfig().setReplicationNum(replicationNum); - } - } - - @Test - public void testNoAgreement() { - AtomicLong receiverTerm = new AtomicLong(-1); - AtomicBoolean leadershipStale = new AtomicBoolean(false); - Log log = new TestLog(); - AtomicInteger quorum = new AtomicInteger(5); - Peer peer = new Peer(1); - - for (int i = 0; i < 3; i++) { - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setLeaderShipStale(leadershipStale); - handler.setVoteCounter(quorum); - handler.setLog(log); - handler.setMember(member); - handler.setReceiverTerm(receiverTerm); - handler.setReceiver(TestUtils.getNode(i)); - handler.setPeer(peer); - handler.onComplete(Response.RESPONSE_AGREE); - } - - assertEquals(-1, receiverTerm.get()); - assertFalse(leadershipStale.get()); - assertEquals(2, quorum.get()); - } - - @Test - public void testLeadershipStale() throws InterruptedException { - AtomicLong receiverTerm = new AtomicLong(-1); - AtomicBoolean leadershipStale = new AtomicBoolean(false); - Log log = new TestLog(); - AtomicInteger quorum = new AtomicInteger(5); - Peer peer = new Peer(1); - - synchronized (quorum) { - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setLeaderShipStale(leadershipStale); - handler.setVoteCounter(quorum); - handler.setLog(log); - handler.setMember(member); - handler.setReceiverTerm(receiverTerm); - handler.setReceiver(TestUtils.getNode(0)); - handler.setPeer(peer); - new Thread(() -> handler.onComplete(100L)).start(); - quorum.wait(); - } - assertEquals(100, receiverTerm.get()); - assertTrue(leadershipStale.get()); - assertEquals(5, quorum.get()); - } - - @Test - public void testError() { - AtomicLong receiverTerm = new AtomicLong(-1); - AtomicBoolean leadershipStale = new AtomicBoolean(false); - Log log = new TestLog(); - int replicationNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(10); - try { - AtomicInteger quorum = new AtomicInteger(5); - Peer peer = new Peer(1); - - AppendNodeEntryHandler handler = new AppendNodeEntryHandler(); - handler.setLeaderShipStale(leadershipStale); - handler.setVoteCounter(quorum); - handler.setLog(log); - handler.setMember(member); - handler.setReceiverTerm(receiverTerm); - handler.setReceiver(TestUtils.getNode(0)); - handler.setPeer(peer); - handler.onError(new TestException()); - - assertEquals(-1, receiverTerm.get()); - assertFalse(leadershipStale.get()); - assertEquals(5, quorum.get()); - } finally { - ClusterDescriptor.getInstance().getConfig().setReplicationNum(replicationNum); - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandlerTest.java deleted file mode 100644 index 184c98c96c37..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/ElectionHandlerTest.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class ElectionHandlerTest { - - private RaftMember member; - - @Before - public void setUp() { - member = new TestMetaGroupMember(); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - member.closeLogManager(); - member.stop(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testAgreement() throws InterruptedException { - AtomicBoolean terminated = new AtomicBoolean(false); - AtomicBoolean electionValid = new AtomicBoolean(false); - AtomicInteger failingVoteCounter = new AtomicInteger(5); - long electorTerm = 10; - AtomicInteger quorum = new AtomicInteger(5); - synchronized (member.getTerm()) { - for (int i = 0; i < 5; i++) { - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(i), - electorTerm, - quorum, - terminated, - electionValid, - failingVoteCounter); - new Thread(() -> handler.onComplete(Response.RESPONSE_AGREE)).start(); - } - member.getTerm().wait(); - } - assertEquals(0, quorum.get()); - assertTrue(electionValid.get()); - assertTrue(terminated.get()); - } - - @Test - public void testLogMismatch() { - AtomicBoolean terminated = new AtomicBoolean(false); - AtomicBoolean electionValid = new AtomicBoolean(false); - long electorTerm = 10; - AtomicInteger quorum = new AtomicInteger(5); - for (int i = 0; i < 3; i++) { - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(i), - electorTerm, - quorum, - terminated, - electionValid, - new AtomicInteger(5)); - handler.onComplete(Response.RESPONSE_AGREE); - } - for (int i = 6; i < 10; i++) { - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(i), - electorTerm, - quorum, - terminated, - electionValid, - new AtomicInteger(5)); - handler.onComplete(electorTerm - 3); - } - assertFalse(electionValid.get()); - } - - @Test - public void testTermTooSmall() throws InterruptedException { - AtomicBoolean terminated = new AtomicBoolean(false); - AtomicBoolean electionValid = new AtomicBoolean(false); - long electorTerm = 10; - AtomicInteger quorum = new AtomicInteger(5); - synchronized (member.getTerm()) { - for (int i = 0; i < 3; i++) { - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(i), - electorTerm, - quorum, - terminated, - electionValid, - new AtomicInteger(5)); - new Thread(() -> handler.onComplete(Response.RESPONSE_AGREE)).start(); - } - for (int i = 3; i < 6; i++) { - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(i), - electorTerm, - quorum, - terminated, - electionValid, - new AtomicInteger(5)); - new Thread(() -> handler.onComplete(electorTerm + 3)).start(); - } - member.getTerm().wait(); - } - assertFalse(electionValid.get()); - assertTrue(terminated.get()); - assertEquals(electorTerm + 3, member.getTerm().get()); - } - - @Test - public void testError() { - AtomicBoolean terminated = new AtomicBoolean(false); - AtomicBoolean electionValid = new AtomicBoolean(false); - long electorTerm = 10; - AtomicInteger quorum = new AtomicInteger(5); - - ElectionHandler handler = - new ElectionHandler( - member, - TestUtils.getNode(0), - electorTerm, - quorum, - terminated, - electionValid, - new AtomicInteger(5)); - handler.onError(new TestException()); - - assertEquals(5, quorum.get()); - assertFalse(electionValid.get()); - assertFalse(terminated.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandlerTest.java deleted file mode 100644 index 0e64d7c8c109..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/GenericHandlerTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class GenericHandlerTest { - - @Test - public void testComplete() throws InterruptedException { - Node node = TestUtils.getNode(1); - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(node, result); - synchronized (result) { - new Thread(() -> handler.onComplete("Hello world!")).start(); - result.wait(); - } - assertEquals("Hello world!", result.get()); - } - - @Test - public void testError() throws InterruptedException { - Node node = TestUtils.getNode(1); - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(node, result); - synchronized (result) { - new Thread(() -> handler.onError(new TestException())).start(); - result.wait(); - } - assertNull(result.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandlerTest.java deleted file mode 100644 index 8c4f7f1ed7a4..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/HeartbeatHandlerTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; - -public class HeartbeatHandlerTest { - - private MetaGroupMember metaGroupMember; - private boolean catchUpFlag; - private long looseInconsistentNum = 5; - - @Before - public void setUp() { - metaGroupMember = - new TestMetaGroupMember() { - @Override - public void catchUp(Node follower, long lastLogIdx) { - synchronized (metaGroupMember) { - catchUpFlag = true; - metaGroupMember.notifyAll(); - } - } - }; - metaGroupMember.initPeerMap(); - metaGroupMember.setLogManager(new TestLogManager(1)); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - metaGroupMember.closeLogManager(); - metaGroupMember.stop(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testComplete() { - HeartbeatHandler handler = new HeartbeatHandler(metaGroupMember, TestUtils.getNode(1)); - HeartBeatResponse response = new HeartBeatResponse(); - response.setTerm(Response.RESPONSE_AGREE); - response.setLastLogTerm(-2); - response.setFollower( - new Node("192.168.0.6", 9003, 6, 40010, Constants.RPC_PORT, "192.168.0.6")); - response.setInstallingSnapshot(false); - catchUpFlag = false; - for (int i = 0; i < looseInconsistentNum; i++) { - handler.onComplete(response); - } - assertTrue(catchUpFlag); - } - - @Test - public void testSnapshotRequestOmitted() { - HeartbeatHandler handler = new HeartbeatHandler(metaGroupMember, TestUtils.getNode(1)); - HeartBeatResponse response = new HeartBeatResponse(); - response.setTerm(Response.RESPONSE_AGREE); - response.setLastLogTerm(-2); - response.setFollower( - new Node("192.168.0.6", 9003, 6, 40010, Constants.RPC_PORT, "192.168.0.6")); - response.setInstallingSnapshot(true); - catchUpFlag = false; - for (int i = 0; i < looseInconsistentNum; i++) { - handler.onComplete(response); - } - assertFalse(catchUpFlag); - } - - @Test - public void testLeaderShipStale() { - HeartbeatHandler handler = new HeartbeatHandler(metaGroupMember, TestUtils.getNode(1)); - HeartBeatResponse response = new HeartBeatResponse(); - response.setTerm(10); - handler.onComplete(response); - assertEquals(10, metaGroupMember.getTerm().get()); - } - - @Test - public void testError() { - HeartbeatHandler handler = new HeartbeatHandler(metaGroupMember, TestUtils.getNode(1)); - catchUpFlag = false; - handler.onError(new TestException()); - assertFalse(catchUpFlag); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandlerTest.java deleted file mode 100644 index 5503852ab1f3..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/JoinClusterHandlerTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.Response; - -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class JoinClusterHandlerTest { - - @Test - public void testComplete() throws InterruptedException { - Node contact = TestUtils.getNode(0); - AtomicReference result = new AtomicReference<>(); - AddNodeResponse response = new AddNodeResponse(); - response.setRespNum((int) Response.RESPONSE_AGREE); - response.setPartitionTableBytes(new byte[4096]); - JoinClusterHandler handler = new JoinClusterHandler(); - handler.setContact(contact); - handler.setResponse(result); - synchronized (result) { - new Thread(() -> handler.onComplete(response)).start(); - result.wait(); - } - assertEquals(response, result.get()); - } - - @Test - public void testError() throws InterruptedException { - Node contact = TestUtils.getNode(0); - AtomicReference result = new AtomicReference<>(); - JoinClusterHandler handler = new JoinClusterHandler(); - handler.setContact(contact); - handler.setResponse(result); - synchronized (result) { - new Thread(() -> handler.onError(new TestException())).start(); - result.wait(); - } - assertNull(result.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandlerTest.java deleted file mode 100644 index bf4c12bebd0e..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/LogCatchUpHandlerTest.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestLog; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicBoolean; - -import static junit.framework.TestCase.assertEquals; -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; - -public class LogCatchUpHandlerTest { - - private RaftMember member; - - @Before - public void setUp() { - member = new TestMetaGroupMember(); - } - - @After - public void tearDown() throws IOException, StorageEngineException { - member.closeLogManager(); - member.stop(); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testComplete() throws InterruptedException { - Node follower = TestUtils.getNode(1); - Log log = new TestLog(); - AtomicBoolean appendSucceed = new AtomicBoolean(); - LogCatchUpHandler handler = new LogCatchUpHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setFollower(follower); - handler.setLog(log); - handler.setRaftMember(member); - synchronized (appendSucceed) { - new Thread(() -> handler.onComplete(Response.RESPONSE_AGREE)).start(); - appendSucceed.wait(); - } - assertTrue(appendSucceed.get()); - } - - @Test - public void testLogMismatch() throws InterruptedException { - Node follower = TestUtils.getNode(1); - Log log = new TestLog(); - AtomicBoolean appendSucceed = new AtomicBoolean(); - LogCatchUpHandler handler = new LogCatchUpHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setFollower(follower); - handler.setLog(log); - handler.setRaftMember(member); - synchronized (appendSucceed) { - new Thread(() -> handler.onComplete(Response.RESPONSE_LOG_MISMATCH)).start(); - appendSucceed.wait(); - } - assertTrue(appendSucceed.get()); - } - - @Test - public void testLeadershipStale() throws InterruptedException { - Node follower = TestUtils.getNode(1); - Log log = new TestLog(); - AtomicBoolean appendSucceed = new AtomicBoolean(); - LogCatchUpHandler handler = new LogCatchUpHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setFollower(follower); - handler.setLog(log); - handler.setRaftMember(member); - synchronized (appendSucceed) { - new Thread(() -> handler.onComplete(100L)).start(); - appendSucceed.wait(); - } - assertFalse(appendSucceed.get()); - assertEquals(100, member.getTerm().get()); - } - - @Test - public void testError() throws InterruptedException { - Node follower = TestUtils.getNode(1); - Log log = new TestLog(); - AtomicBoolean appendSucceed = new AtomicBoolean(); - LogCatchUpHandler handler = new LogCatchUpHandler(); - handler.setAppendSucceed(appendSucceed); - handler.setFollower(follower); - handler.setLog(log); - handler.setRaftMember(member); - synchronized (appendSucceed) { - new Thread(() -> handler.onError(new TestException())).start(); - appendSucceed.wait(); - } - assertFalse(appendSucceed.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandlerTest.java deleted file mode 100644 index cb8129c579a3..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullMeasurementSchemaHandlerTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; - -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class PullMeasurementSchemaHandlerTest { - - @Test - public void testComplete() throws InterruptedException { - Node owner = TestUtils.getNode(1); - String prefixPath = "root"; - AtomicReference> result = new AtomicReference<>(); - List measurementSchemas = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - measurementSchemas.add(TestUtils.getTestMeasurementSchema(i)); - } - - PullMeasurementSchemaHandler handler = - new PullMeasurementSchemaHandler(owner, Collections.singletonList(prefixPath), result); - synchronized (result) { - new Thread( - () -> { - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(outputStream); - try { - dataOutputStream.writeInt(measurementSchemas.size()); - for (IMeasurementSchema measurementSchema : measurementSchemas) { - measurementSchema.partialSerializeTo(dataOutputStream); - } - } catch (IOException e) { - // ignore - } - PullSchemaResp resp = new PullSchemaResp(); - resp.setSchemaBytes(outputStream.toByteArray()); - handler.onComplete(resp); - }) - .start(); - result.wait(); - } - assertEquals(measurementSchemas, result.get()); - } - - @Test - public void testError() throws InterruptedException { - Node owner = TestUtils.getNode(1); - String prefixPath = "root"; - AtomicReference> result = new AtomicReference<>(); - - PullMeasurementSchemaHandler handler = - new PullMeasurementSchemaHandler(owner, Collections.singletonList(prefixPath), result); - synchronized (result) { - new Thread(() -> handler.onError(new TestException())).start(); - result.wait(); - } - assertNull(result.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandlerTest.java deleted file mode 100644 index a8c04fe47cec..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/PullSnapshotHandlerTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestSnapshot; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; - -import org.junit.Test; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -public class PullSnapshotHandlerTest { - - @Test - public void testSnapshot() throws InterruptedException { - AtomicReference> result = new AtomicReference<>(); - Node owner = TestUtils.getNode(1); - List slots = new ArrayList<>(); - Map snapshotMap = new HashMap<>(); - Map snapshotBufferMap = new HashMap<>(); - for (int i = 0; i < 10; i++) { - slots.add(i); - TestSnapshot snapshot = new TestSnapshot(i); - snapshotMap.put(i, snapshot); - snapshotBufferMap.put(i, snapshot.serialize()); - } - - PullSnapshotHandler handler = - new PullSnapshotHandler<>(result, owner, slots, TestSnapshot.Factory.INSTANCE); - synchronized (result) { - new Thread( - () -> { - PullSnapshotResp resp = new PullSnapshotResp(); - resp.setSnapshotBytes(snapshotBufferMap); - handler.onComplete(resp); - }) - .start(); - result.wait(); - } - assertEquals(snapshotMap, result.get()); - } - - @Test - public void testError() throws InterruptedException { - AtomicReference> result = new AtomicReference<>(); - Node owner = TestUtils.getNode(1); - List slots = new ArrayList<>(); - PullSnapshotHandler handler = - new PullSnapshotHandler<>(result, owner, slots, TestSnapshot.Factory.INSTANCE); - synchronized (result) { - new Thread(() -> handler.onError(new TestException())).start(); - result.wait(); - } - assertNull(result.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandlerTest.java deleted file mode 100644 index 418724412136..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/caller/SnapshotCatchUpHandlerTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.caller; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.rpc.thrift.Node; - -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static junit.framework.TestCase.assertFalse; -import static junit.framework.TestCase.assertTrue; - -public class SnapshotCatchUpHandlerTest { - - @Test - public void testComplete() throws InterruptedException { - AtomicBoolean succeeded = new AtomicBoolean(false); - Node receiver = TestUtils.getNode(0); - SnapshotCatchUpHandler handler = new SnapshotCatchUpHandler(succeeded, receiver, null); - synchronized (succeeded) { - new Thread(() -> handler.onComplete(null)).start(); - succeeded.wait(); - } - assertTrue(succeeded.get()); - } - - @Test - public void testError() throws InterruptedException { - AtomicBoolean succeeded = new AtomicBoolean(false); - Node receiver = TestUtils.getNode(0); - SnapshotCatchUpHandler handler = new SnapshotCatchUpHandler(succeeded, receiver, null); - synchronized (succeeded) { - new Thread(() -> handler.onError(new TestException())).start(); - succeeded.wait(); - } - assertFalse(succeeded.get()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandlerTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandlerTest.java deleted file mode 100644 index 4edf4317b2f5..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/handlers/forwarder/ForwardPlanHandlerTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.handlers.forwarder; - -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; - -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicReference; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; - -public class ForwardPlanHandlerTest { - - @Test - public void testComplete() throws IllegalPathException { - AtomicReference result = new AtomicReference<>(); - PhysicalPlan plan = new SetStorageGroupPlan(new PartialPath("root.test")); - ForwardPlanHandler handler = new ForwardPlanHandler(result, plan, TestUtils.getNode(0)); - - TSStatus status = new TSStatus(); - handler.onComplete(status); - assertSame(status, result.get()); - } - - @Test - public void testError() throws IllegalPathException { - AtomicReference result = new AtomicReference<>(); - PhysicalPlan plan = new SetStorageGroupPlan(new PartialPath("root.test")); - ForwardPlanHandler handler = new ForwardPlanHandler(result, plan, TestUtils.getNode(0)); - - handler.onError(new TestException()); - assertEquals("Don't worry, this exception is faked", result.get().getMessage()); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThreadTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThreadTest.java deleted file mode 100644 index adb8a54e3483..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/DataHeartbeatThreadTest.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.log.manage.serializable.SyncLogDequeSerializer; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.DataGroupMember; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.exception.StorageEngineException; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.util.List; - -import static org.junit.Assert.assertEquals; - -public class DataHeartbeatThreadTest extends HeartbeatThreadTest { - - private TestLogManager dataLogManager; - private MetaGroupMember metaGroupMember = (MetaGroupMember) super.getMember(); - - @Override - RaftMember getMember() { - return new TestDataGroupMember() { - @Override - public RaftLogManager getLogManager() { - return dataLogManager; - } - - @Override - public void updateHardState(long currentTerm, Node leader) {} - - @Override - public AsyncClient getAsyncClient(Node node) { - return getClient(node); - } - - @Override - public AsyncClient getAsyncHeartbeatClient(Node node) { - return getClient(node); - } - - @Override - public MetaGroupMember getMetaGroupMember() { - return metaGroupMember; - } - }; - } - - @Override - AsyncClient getClient(Node node) { - return new TestAsyncClient(node.nodeIdentifier) { - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (testHeartbeat) { - assertEquals(TestUtils.getNode(0), request.getLeader()); - assertEquals(13, request.getCommitLogIndex()); - assertEquals(10, request.getTerm()); - assertEquals(TestUtils.getRaftNode(0, 0), request.getHeader()); - synchronized (receivedNodes) { - receivedNodes.add(getSerialNum()); - for (int i = 1; i < 10; i++) { - if (!receivedNodes.contains(i)) { - return; - } - } - testThread.interrupt(); - } - } else if (respondToElection) { - synchronized (testThread) { - testThread.notifyAll(); - } - } - }) - .start(); - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - assertEquals(TestUtils.getNode(0), request.getElector()); - assertEquals(11, request.getTerm()); - assertEquals(13, request.getLastLogIndex()); - assertEquals(13, request.getLastLogTerm()); - if (respondToElection) { - resultHandler.onComplete(Response.RESPONSE_AGREE); - } - }) - .start(); - } - }; - } - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - dataLogManager = new TestLogManager(2); - List logs = TestUtils.prepareTestLogs(14); - dataLogManager.append(logs); - dataLogManager.commitTo(13); - } - - @Override - @After - public void tearDown() throws InterruptedException, IOException, StorageEngineException { - dataLogManager.close(); - dataLogManager = null; - metaGroupMember.closeLogManager(); - metaGroupMember = null; - File dir = new File(SyncLogDequeSerializer.getLogDir(2)); - for (File file : dir.listFiles()) { - file.delete(); - } - dir.delete(); - super.tearDown(); - } - - @Override - HeartbeatThread getHeartbeatThread(RaftMember member) { - return new DataHeartbeatThread((DataGroupMember) member); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThreadTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThreadTest.java deleted file mode 100644 index d6ebad3aa5f8..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/HeartbeatThreadTest.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.RaftMember; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.utils.EnvironmentUtils; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -@SuppressWarnings("java:S2699") -public class HeartbeatThreadTest { - - RaftMember member; - TestLogManager logManager; - Thread testThread; - boolean respondToElection; - boolean testHeartbeat; - - Set receivedNodes = new ConcurrentSkipListSet<>(); - PartitionGroup partitionGroup; - private boolean prevUseAsyncServer; - - RaftMember getMember() { - return new TestMetaGroupMember() { - @Override - public RaftLogManager getLogManager() { - return HeartbeatThreadTest.this.logManager; - } - - @Override - public void updateHardState(long currentTerm, Node leader) {} - - @Override - public AsyncClient getAsyncClient(Node node) { - return getClient(node); - } - - @Override - public AsyncClient getAsyncHeartbeatClient(Node node) { - return getClient(node); - } - }; - } - - AsyncClient getClient(Node node) { - return new TestAsyncClient(node.nodeIdentifier) { - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (testHeartbeat) { - assertEquals(TestUtils.getNode(0), request.getLeader()); - assertEquals(6, request.getCommitLogIndex()); - assertEquals(10, request.getTerm()); - assertNull(request.getHeader()); - synchronized (receivedNodes) { - receivedNodes.add(getSerialNum()); - for (int i = 1; i < 10; i++) { - if (!receivedNodes.contains(i)) { - return; - } - } - testThread.interrupt(); - } - } else if (respondToElection) { - synchronized (testThread) { - testThread.notifyAll(); - } - } - }) - .start(); - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - assertEquals(TestUtils.getNode(0), request.getElector()); - assertEquals(11, request.getTerm()); - assertEquals(6, request.getLastLogIndex()); - assertEquals(6, request.getLastLogTerm()); - if (respondToElection) { - resultHandler.onComplete(Response.RESPONSE_AGREE); - } - }) - .start(); - } - }; - } - - HeartbeatThread getHeartbeatThread(RaftMember member) { - return new HeartbeatThread(member); - } - - @Before - public void setUp() throws Exception { - ClusterConstant.setElectionMaxWaitMs(50L); - ClusterConstant.setHeartbeatIntervalMs(100L); - ClusterConstant.setElectionTimeoutMs(1000L); - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - logManager = new TestLogManager(1); - member = getMember(); - - HeartbeatThread heartBeatThread = getHeartbeatThread(member); - testThread = new Thread(heartBeatThread); - member.getTerm().set(10); - List logs = TestUtils.prepareTestLogs(7); - logManager.append(logs); - logManager.commitTo(6); - - respondToElection = false; - testHeartbeat = false; - partitionGroup = new PartitionGroup(); - for (int i = 0; i < 10; i++) { - partitionGroup.add(TestUtils.getNode(i)); - } - member.setAllNodes(partitionGroup); - member.setThisNode(TestUtils.getNode(0)); - member.setSkipElection(false); - receivedNodes.clear(); - } - - @After - public void tearDown() throws InterruptedException, IOException, StorageEngineException { - logManager.close(); - member.closeLogManager(); - member.stop(); - logManager = null; - member = null; - testThread.interrupt(); - testThread.join(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - EnvironmentUtils.cleanAllDir(); - } - - @Test - public void testAsLeader() throws InterruptedException { - testHeartbeat = true; - member.setCharacter(NodeCharacter.LEADER); - member.setLeader(member.getThisNode()); - synchronized (receivedNodes) { - testThread.start(); - } - begin: - while (true) { - for (int i = 1; i < 10; i++) { - if (!receivedNodes.contains(i)) { - continue begin; - } - } - break; - } - testThread.interrupt(); - testThread.join(); - } - - @Test - public void testAsFollower() throws InterruptedException { - int prevTimeOut = ClusterConstant.getConnectionTimeoutInMS(); - ClusterConstant.setConnectionTimeoutInMS(500); - member.setCharacter(NodeCharacter.FOLLOWER); - member.setLastHeartbeatReceivedTime(System.currentTimeMillis()); - respondToElection = false; - try { - testThread.start(); - while (!NodeCharacter.ELECTOR.equals(member.getCharacter())) {} - - testThread.interrupt(); - testThread.join(); - } finally { - ClusterConstant.setConnectionTimeoutInMS(prevTimeOut); - } - } - - @Test - public void testAsElector() throws InterruptedException { - member.setCharacter(NodeCharacter.ELECTOR); - respondToElection = true; - testThread.start(); - while (!NodeCharacter.LEADER.equals(member.getCharacter())) {} - - testThread.interrupt(); - testThread.join(); - } - - @Test - public void testSingleNode() throws InterruptedException { - member.getAllNodes().clear(); - member.getAllNodes().add(TestUtils.getNode(0)); - member.setCharacter(NodeCharacter.ELECTOR); - testThread.start(); - while (!NodeCharacter.LEADER.equals(member.getCharacter())) {} - - testThread.interrupt(); - testThread.join(); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThreadTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThreadTest.java deleted file mode 100644 index 00b3f83838fb..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/heartbeat/MetaHeartbeatThreadTest.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.heartbeat; - -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.partition.NodeAdditionResult; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.member.MetaGroupMember; -import org.apache.iotdb.cluster.server.member.RaftMember; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.Before; - -import java.nio.ByteBuffer; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -public class MetaHeartbeatThreadTest extends HeartbeatThreadTest { - - private Set idConflictNodes = new HashSet<>(); - private ByteBuffer partitionTableBuffer; - private PartitionTable partitionTable = - new PartitionTable() { - @Override - public PartitionGroup route(String storageGroupName, long timestamp) { - return null; - } - - @Override - public RaftNode routeToHeaderByTime(String storageGroupName, long timestamp) { - return null; - } - - @Override - public void addNode(Node node) { - return; - } - - @Override - public NodeAdditionResult getNodeAdditionResult(Node node) { - return null; - } - - @Override - public void removeNode(Node node) { - return; - } - - @Override - public NodeRemovalResult getNodeRemovalResult() { - return null; - } - - @Override - public List getLocalGroups() { - return null; - } - - @Override - public PartitionGroup getPartitionGroup(RaftNode headerNode) { - return null; - } - - @Override - public ByteBuffer serialize() { - return partitionTableBuffer; - } - - @Override - public boolean deserialize(ByteBuffer buffer) { - return true; - } - - @Override - public List getAllNodes() { - return null; - } - - @Override - public List getGlobalGroups() { - return null; - } - - @Override - public List calculateGlobalGroups(List nodeRing) { - return null; - } - - @Override - public long getLastMetaLogIndex() { - return 0; - } - - @Override - public void setLastMetaLogIndex(long index) {} - }; - - @Override - RaftMember getMember() { - return new TestMetaGroupMember() { - - @Override - public RaftLogManager getLogManager() { - return MetaHeartbeatThreadTest.this.logManager; - } - - @Override - public AsyncClient getAsyncClient(Node node) { - return getClient(node); - } - - @Override - public AsyncClient getAsyncHeartbeatClient(Node node) { - return getClient(node); - } - - @Override - public Set getIdConflictNodes() { - return MetaHeartbeatThreadTest.this.idConflictNodes; - } - - @Override - public boolean isNodeBlind(Node node) { - return 6 <= node.getNodeIdentifier() && node.getNodeIdentifier() < 9; - } - - @Override - public PartitionTable getPartitionTable() { - return MetaHeartbeatThreadTest.this.partitionTable; - } - }; - } - - @Override - AsyncClient getClient(Node node) { - return new TestAsyncClient(node.nodeIdentifier) { - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - HeartBeatRequest requestCopy = new HeartBeatRequest(request); - new Thread( - () -> { - if (testHeartbeat) { - assertEquals(TestUtils.getNode(0), requestCopy.getLeader()); - assertEquals(6, requestCopy.getCommitLogIndex()); - assertEquals(10, requestCopy.getTerm()); - assertNull(requestCopy.getHeader()); - if (node.getNodeIdentifier() < 3) { - assertTrue(requestCopy.isRegenerateIdentifier()); - } else if (node.getNodeIdentifier() < 6) { - assertTrue(requestCopy.isRequireIdentifier()); - } else if (node.getNodeIdentifier() < 9) { - assertEquals(partitionTableBuffer, requestCopy.partitionTableBytes); - } - synchronized (receivedNodes) { - receivedNodes.add(getSerialNum()); - for (int i = 1; i < 10; i++) { - if (!receivedNodes.contains(i)) { - return; - } - } - testThread.interrupt(); - } - } else if (respondToElection) { - synchronized (testThread) { - testThread.notifyAll(); - } - } - }) - .start(); - } - - @Override - public void startElection(ElectionRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - assertEquals(TestUtils.getNode(0), request.getElector()); - assertEquals(11, request.getTerm()); - assertEquals(6, request.getLastLogIndex()); - assertEquals(6, request.getLastLogTerm()); - if (respondToElection) { - resultHandler.onComplete(Response.RESPONSE_AGREE); - } - }) - .start(); - } - }; - } - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - for (int i = 0; i < 3; i++) { - idConflictNodes.add(TestUtils.getNode(i)); - } - for (Node node : member.getAllNodes()) { - if (3 <= node.getNodeIdentifier() && node.getNodeIdentifier() < 6) { - node.unsetNodeIdentifier(); - } - } - partitionTableBuffer = ByteBuffer.allocate(1024); - partitionTableBuffer.put("Just a partition table".getBytes()); - } - - @Override - HeartbeatThread getHeartbeatThread(RaftMember member) { - return new MetaHeartbeatThread((MetaGroupMember) member); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/BaseMember.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/member/BaseMember.java deleted file mode 100644 index 88f5b8d0ad29..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/BaseMember.java +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.client.ClientManager.Type; -import org.apache.iotdb.cluster.common.TestAsyncDataClient; -import org.apache.iotdb.cluster.common.TestAsyncMetaClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestLogManager; -import org.apache.iotdb.cluster.common.TestMetaGroupMember; -import org.apache.iotdb.cluster.common.TestPartitionedLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.log.applier.DataLogApplier; -import org.apache.iotdb.cluster.log.manage.PartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.log.manage.RaftLogManager; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.metadata.MetaPuller; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.service.RegisterManager; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.EnvironmentUtils; -import org.apache.iotdb.db.utils.SchemaUtils; -import org.apache.iotdb.db.wal.utils.WALMode; -import org.apache.iotdb.metrics.config.MetricConfigDescriptor; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.junit.After; -import org.junit.Before; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicLong; - -public class BaseMember { - - public static AtomicLong dummyResponse = new AtomicLong(Response.RESPONSE_AGREE); - - protected Map dataGroupMemberMap; - private Map metaGroupMemberMap; - PartitionGroup allNodes; - protected MetaGroupMember testMetaMember; - protected Coordinator coordinator; - RaftLogManager metaLogManager; - PartitionTable partitionTable; - PlanExecutor planExecutor; - protected ExecutorService testThreadPool; - - private List prevUrls; - private long prevLeaderWait; - private boolean prevUseAsyncServer; - private int preLogBufferSize; - private boolean prevUseAsyncApplier; - private WALMode prevWALMode; - - private int syncLeaderMaxWait; - private long heartBeatInterval; - private long electionTimeout; - - @Before - public void setUp() throws Exception, QueryProcessException { - prevUseAsyncApplier = ClusterDescriptor.getInstance().getConfig().isUseAsyncApplier(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncApplier(false); - prevUseAsyncServer = ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(); - preLogBufferSize = ClusterDescriptor.getInstance().getConfig().getRaftLogBufferSize(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(true); - ClusterDescriptor.getInstance().getConfig().setRaftLogBufferSize(409600); - testThreadPool = Executors.newFixedThreadPool(4); - prevLeaderWait = RaftMember.getWaitLeaderTimeMs(); - prevWALMode = IoTDBDescriptor.getInstance().getConfig().getWalMode(); - IoTDBDescriptor.getInstance().getConfig().setWalMode(WALMode.DISABLE); - MetricConfigDescriptor.getInstance().getMetricConfig().setEnableMetric(false); - RaftMember.setWaitLeaderTimeMs(10); - - syncLeaderMaxWait = ClusterConstant.getSyncLeaderMaxWaitMs(); - heartBeatInterval = ClusterConstant.getHeartbeatIntervalMs(); - electionTimeout = ClusterConstant.getElectionTimeoutMs(); - - ClusterConstant.setSyncLeaderMaxWaitMs(100); - ClusterConstant.setHeartbeatIntervalMs(100); - ClusterConstant.setElectionTimeoutMs(1000); - - electionTimeout = ClusterConstant.getElectionTimeoutMs(); - - ClusterConstant.setElectionTimeoutMs(1000); - - allNodes = new PartitionGroup(); - for (int i = 0; i < 100; i += 10) { - allNodes.add(TestUtils.getNode(i)); - } - - partitionTable = new SlotPartitionTable(allNodes, TestUtils.getNode(0)); - - dataGroupMemberMap = new HashMap<>(); - metaGroupMemberMap = new HashMap<>(); - metaLogManager = new TestLogManager(1); - testMetaMember = getMetaGroupMember(TestUtils.getNode(0)); - - coordinator = new Coordinator(testMetaMember); - testMetaMember.setCoordinator(coordinator); - - for (Node node : allNodes) { - // pre-create data members - getDataGroupMember(node); - } - - IoTDB.setSchemaProcessor(CSchemaProcessor.getInstance()); - CSchemaProcessor.getInstance().setMetaGroupMember(testMetaMember); - CSchemaProcessor.getInstance().setCoordinator(coordinator); - - EnvironmentUtils.envSetUp(); - prevUrls = ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls(); - List testUrls = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - Node node = TestUtils.getNode(i); - testUrls.add(node.getInternalIp() + ":" + node.getMetaPort()); - } - ClusterDescriptor.getInstance().getConfig().setSeedNodeUrls(testUrls); - - for (int i = 0; i < 10; i++) { - try { - IoTDB.schemaProcessor.setStorageGroup(new PartialPath(TestUtils.getTestSg(i))); - for (int j = 0; j < 20; j++) { - SchemaUtils.registerTimeseries(TestUtils.getTestTimeSeriesSchema(i, j)); - } - } catch (MetadataException e) { - // ignore - } - } - planExecutor = new PlanExecutor(); - - testMetaMember.setPartitionTable(partitionTable); - MetaPuller.getInstance().init(testMetaMember); - } - - @After - public void tearDown() throws Exception { - testMetaMember.stop(); - metaLogManager.close(); - for (DataGroupMember member : dataGroupMemberMap.values()) { - member.stop(); - member.closeLogManager(); - } - dataGroupMemberMap.clear(); - for (MetaGroupMember member : metaGroupMemberMap.values()) { - member.stop(); - member.closeLogManager(); - } - metaGroupMemberMap.clear(); - RegisterManager.setDeregisterTimeOut(100); - EnvironmentUtils.cleanEnv(); - ClusterDescriptor.getInstance().getConfig().setSeedNodeUrls(prevUrls); - new File(MetaGroupMember.PARTITION_FILE_NAME).delete(); - new File(MetaGroupMember.NODE_IDENTIFIER_FILE_NAME).delete(); - RaftMember.setWaitLeaderTimeMs(prevLeaderWait); - testThreadPool.shutdownNow(); - ClusterDescriptor.getInstance().getConfig().setUseAsyncServer(prevUseAsyncServer); - ClusterDescriptor.getInstance().getConfig().setRaftLogBufferSize(preLogBufferSize); - ClusterDescriptor.getInstance().getConfig().setUseAsyncApplier(prevUseAsyncApplier); - IoTDBDescriptor.getInstance().getConfig().setWalMode(prevWALMode); - - ClusterConstant.setSyncLeaderMaxWaitMs(syncLeaderMaxWait); - ClusterConstant.setHeartbeatIntervalMs(heartBeatInterval); - ClusterConstant.setElectionTimeoutMs(electionTimeout); - } - - DataGroupMember getDataGroupMember(Node node) { - return getDataGroupMember(new RaftNode(node, 0)); - } - - DataGroupMember getDataGroupMember(RaftNode node) { - return dataGroupMemberMap.computeIfAbsent(node, this::newDataGroupMember); - } - - private DataGroupMember newDataGroupMember(RaftNode raftNode) { - DataGroupMember newMember = - new TestDataGroupMember(raftNode.getNode(), partitionTable.getPartitionGroup(raftNode)) { - - @Override - public boolean syncLeader(RaftMember.CheckConsistency checkConsistency) { - return true; - } - - @Override - public long appendEntry(AppendEntryRequest request) { - return Response.RESPONSE_AGREE; - } - - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } catch (IOException e) { - return null; - } - } - - @Override - public AsyncClient getSendLogAsyncClient(Node node) { - return getAsyncClient(node); - } - }; - newMember.setThisNode(raftNode.getNode()); - newMember.setMetaGroupMember(testMetaMember); - newMember.setLeader(raftNode.getNode()); - newMember.setCharacter(NodeCharacter.LEADER); - newMember.setLogManager( - getLogManager( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(0), 0)), newMember)); - - newMember.setAppendLogThreadPool(testThreadPool); - return newMember; - } - - private PartitionedSnapshotLogManager getLogManager( - PartitionGroup partitionGroup, DataGroupMember dataGroupMember) { - return new TestPartitionedLogManager( - new DataLogApplier(testMetaMember, dataGroupMember), - testMetaMember.getPartitionTable(), - partitionGroup.getHeader().getNode(), - FileSnapshot.Factory.INSTANCE) { - @Override - public void takeSnapshot() {} - }; - } - - protected MetaGroupMember getMetaGroupMember(Node node) throws QueryProcessException { - return metaGroupMemberMap.computeIfAbsent(node, this::newMetaGroupMember); - } - - private MetaGroupMember newMetaGroupMember(Node node) { - MetaGroupMember ret = - new TestMetaGroupMember() { - - @Override - public DataGroupMember getLocalDataMember(RaftNode header, Object request) { - return getDataGroupMember(header); - } - - @Override - public DataGroupMember getLocalDataMember(RaftNode header) { - return getDataGroupMember(header); - } - - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncMetaClient(null, null, node) { - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(new TNodeStatus())).start(); - } - }; - } catch (IOException e) { - return null; - } - } - - @Override - public AsyncClient getSendLogAsyncClient(Node node) { - return getAsyncClient(node); - } - }; - ret.setThisNode(node); - ret.setCoordinator(new Coordinator()); - ret.setPartitionTable(partitionTable); - ret.setAllNodes(allNodes); - ret.setLogManager(metaLogManager); - ret.setLeader(node); - ret.setCharacter(NodeCharacter.LEADER); - ret.setAppendLogThreadPool(testThreadPool); - // TODO fixme : restore normal provider - ClusterIoTDB.getInstance() - .setClientManager( - new ClientManager(true, Type.RequestForwardClient) { - @Override - public RaftService.AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } - }); - return ret; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/DataGroupMemberTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/member/DataGroupMemberTest.java deleted file mode 100644 index 08276478272b..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/DataGroupMemberTest.java +++ /dev/null @@ -1,1255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.RemoteTsFileResource; -import org.apache.iotdb.cluster.common.TestAsyncDataClient; -import org.apache.iotdb.cluster.common.TestException; -import org.apache.iotdb.cluster.common.TestPartitionedLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.exception.ReaderNotFoundException; -import org.apache.iotdb.cluster.exception.SnapshotInstallationException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.Snapshot; -import org.apache.iotdb.cluster.log.applier.DataLogApplier; -import org.apache.iotdb.cluster.log.manage.PartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.log.snapshot.FileSnapshot; -import org.apache.iotdb.cluster.log.snapshot.PartitionedSnapshot; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.slot.SlotNodeAdditionResult; -import org.apache.iotdb.cluster.partition.slot.SlotNodeRemovalResult; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.GetAllPathsResult; -import org.apache.iotdb.cluster.rpc.thrift.GroupByRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSnapshotResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.SingleSeriesQueryRequest; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullMeasurementSchemaHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullSnapshotHandler; -import org.apache.iotdb.cluster.server.handlers.caller.PullTimeseriesSchemaHandler; -import org.apache.iotdb.cluster.server.service.DataAsyncService; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.commons.utils.SerializeUtils; -import org.apache.iotdb.db.conf.IoTDBDescriptor; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.modification.Deletion; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.engine.storagegroup.TsFileResource; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.TriggerExecutionException; -import org.apache.iotdb.db.exception.WriteProcessException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.query.aggregation.AggregateResult; -import org.apache.iotdb.db.query.aggregation.AggregationType; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.read.filter.basic.Filter; -import org.apache.iotdb.tsfile.read.filter.operator.AndFilter; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TCompactProtocol.Factory; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.iotdb.cluster.common.TestUtils.getTestMeasurement; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -@Ignore -public class DataGroupMemberTest extends BaseMember { - - private DataGroupMember dataGroupMember; - private Map snapshotMap; - private Set pulledSnapshots; - private boolean hasInitialSnapshots; - private boolean enableSyncLeader; - private int prevReplicationNum; - private int raftId = 0; - private boolean enableInflux; - - @Override - @Before - public void setUp() throws Exception { - prevReplicationNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(3); - enableInflux = IoTDBDescriptor.getInstance().getConfig().isEnableInfluxDBRpcService(); - IoTDBDescriptor.getInstance().getConfig().setEnableInfluxDBRpcService(false); - super.setUp(); - dataGroupMember = getDataGroupMember(TestUtils.getNode(0)); - snapshotMap = new HashMap<>(); - for (int i = 0; i < ClusterConstant.SLOT_NUM; i++) { - FileSnapshot fileSnapshot = new FileSnapshot(); - fileSnapshot.setTimeseriesSchemas( - Collections.singletonList(TestUtils.getTestTimeSeriesSchema(0, i))); - snapshotMap.put(i, fileSnapshot); - } - pulledSnapshots = new ConcurrentSkipListSet<>(); - } - - @Override - @After - public void tearDown() throws Exception { - dataGroupMember.stop(); - super.tearDown(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(prevReplicationNum); - IoTDBDescriptor.getInstance().getConfig().setEnableInfluxDBRpcService(enableInflux); - } - - private PartitionedSnapshotLogManager getLogManager( - PartitionGroup partitionGroup, DataGroupMember dataGroupMember) { - return new TestPartitionedLogManager( - new DataLogApplier(testMetaMember, dataGroupMember), - testMetaMember.getPartitionTable(), - partitionGroup.getHeader().getNode(), - FileSnapshot.Factory.INSTANCE) { - @Override - public Snapshot getSnapshot(long minIndex) { - PartitionedSnapshot snapshot = - new PartitionedSnapshot<>(FileSnapshot.Factory.INSTANCE); - if (hasInitialSnapshots) { - for (int i = 0; i < 100; i++) { - snapshot.putSnapshot(i, snapshotMap.get(i)); - } - } - return snapshot; - } - }; - } - - @Override - DataGroupMember getDataGroupMember(RaftNode raftNode) { - PartitionGroup nodes = partitionTable.getPartitionGroup(raftNode); - return dataGroupMemberMap.computeIfAbsent( - raftNode, n -> getDataGroupMember(n.getNode(), nodes)); - } - - private DataGroupMember getDataGroupMember(Node node, PartitionGroup nodes) { - DataGroupMember dataGroupMember = - new DataGroupMember(new Factory(), nodes, testMetaMember) { - @Override - public boolean syncLeader(CheckConsistency checkConsistency) { - return true; - } - - @Override - public long appendEntry(AppendEntryRequest request) { - return Response.RESPONSE_AGREE; - } - - @Override - public void updateHardState(long currentTerm, Node leader) {} - - @Override - public AsyncClient getSendLogAsyncClient(Node node) { - return getAsyncClient(node); - } - - @Override - public AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncDataClient(node, dataGroupMemberMap) { - - @Override - public void pullMeasurementSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - dataGroupMemberMap.get(request.getHeader()).setCharacter(NodeCharacter.LEADER); - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .pullMeasurementSchema(request, resultHandler); - } - - @Override - public void pullTimeSeriesSchema( - PullSchemaRequest request, AsyncMethodCallback resultHandler) { - dataGroupMemberMap.get(request.getHeader()).setCharacter(NodeCharacter.LEADER); - new DataAsyncService(dataGroupMemberMap.get(request.getHeader())) - .pullTimeSeriesSchema(request, resultHandler); - } - - @Override - public void pullSnapshot( - PullSnapshotRequest request, - AsyncMethodCallback resultHandler) { - PullSnapshotResp resp = new PullSnapshotResp(); - Map snapshotBufferMap = new HashMap<>(); - for (Integer requiredSlot : request.getRequiredSlots()) { - FileSnapshot fileSnapshot = snapshotMap.get(requiredSlot); - if (fileSnapshot != null) { - snapshotBufferMap.put(requiredSlot, fileSnapshot.serialize()); - } - synchronized (DataGroupMemberTest.this.dataGroupMember) { - pulledSnapshots.add(requiredSlot); - } - } - resp.setSnapshotBytes(snapshotBufferMap); - resultHandler.onComplete(resp); - } - - @Override - public void requestCommitIndex( - RaftNode header, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (enableSyncLeader) { - resultHandler.onComplete(new RequestCommitIndexResponse()); - } else { - resultHandler.onError(new TestException()); - } - }) - .start(); - } - - @Override - public void removeHardLink( - String hardLinkPath, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - Files.deleteIfExists(new File(hardLinkPath).toPath()); - } catch (IOException e) { - // ignore - } - }) - .start(); - } - }; - } catch (IOException e) { - return null; - } - } - }; - PartitionedSnapshotLogManager logManager = getLogManager(nodes, dataGroupMember); - dataGroupMember.setThisNode(node); - dataGroupMember.setLogManager(logManager); - dataGroupMember.setLeader(node); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - dataGroupMember.setAppendLogThreadPool(testThreadPool); - return dataGroupMember; - } - - @Test - public void testGetHeader() { - System.out.println("Start testGetHeader()"); - assertEquals(TestUtils.getRaftNode(0, 0), dataGroupMember.getHeader()); - } - - @Test - public void testAddNode() { - System.out.println("Start testAddNode()"); - PartitionGroup partitionGroup = - new PartitionGroup( - raftId, TestUtils.getNode(0), TestUtils.getNode(50), TestUtils.getNode(90)); - DataGroupMember firstMember = - getDataGroupMember(TestUtils.getNode(0), new PartitionGroup(partitionGroup)); - DataGroupMember midMember = - getDataGroupMember(TestUtils.getNode(50), new PartitionGroup(partitionGroup)); - DataGroupMember lastMember = - getDataGroupMember(TestUtils.getNode(90), new PartitionGroup(partitionGroup)); - SlotNodeAdditionResult result = new SlotNodeAdditionResult(); - result.setLostSlots(new HashMap<>()); - - try { - Node newNodeBeforeGroup = TestUtils.getNode(-5); - assertFalse(firstMember.preAddNode(newNodeBeforeGroup)); - assertFalse(midMember.preAddNode(newNodeBeforeGroup)); - assertFalse(lastMember.preAddNode(newNodeBeforeGroup)); - assertFalse(firstMember.addNode(newNodeBeforeGroup, result)); - assertFalse(midMember.addNode(newNodeBeforeGroup, result)); - assertFalse(lastMember.addNode(newNodeBeforeGroup, result)); - - Node newNodeInGroup = TestUtils.getNode(66); - assertTrue(firstMember.preAddNode(newNodeInGroup)); - assertTrue(midMember.preAddNode(newNodeInGroup)); - assertTrue(lastMember.preAddNode(newNodeInGroup)); - assertFalse(firstMember.addNode(newNodeInGroup, result)); - assertFalse(midMember.addNode(newNodeInGroup, result)); - assertTrue(lastMember.addNode(newNodeInGroup, result)); - - Node newNodeAfterGroup = TestUtils.getNode(101); - assertFalse(firstMember.preAddNode(newNodeAfterGroup)); - assertFalse(midMember.preAddNode(newNodeAfterGroup)); - assertFalse(lastMember.preAddNode(newNodeAfterGroup)); - assertFalse(firstMember.addNode(newNodeAfterGroup, result)); - assertFalse(midMember.addNode(newNodeAfterGroup, result)); - } finally { - firstMember.closeLogManager(); - midMember.closeLogManager(); - lastMember.closeLogManager(); - } - } - - @Test - public void testStartElection() { - System.out.println("Start testStartElection()"); - class TestHandler implements AsyncMethodCallback { - - private long response; - - @Override - public void onComplete(Long resp) { - response = resp; - } - - @Override - public void onError(Exception e) {} - - public long getResponse() { - return response; - } - } - List dataLogs = TestUtils.prepareTestLogs(11); - dataGroupMember.getLogManager().append(dataLogs); - dataGroupMember.getTerm().set(10); - testMetaMember.getTerm().set(10); - List metaLogs = TestUtils.prepareTestLogs(6); - metaLogManager.append(metaLogs); - Node voteFor = TestUtils.getNode(0); - Node elector = new Node("127.0.0.1", 30001, 1, 40001, Constants.RPC_PORT + 1, "127.0.0.1"); - - // a request with smaller term - ElectionRequest electionRequest = new ElectionRequest(); - electionRequest.setTerm(1); - electionRequest.setLastLogIndex(100); - electionRequest.setLastLogTerm(100); - electionRequest.setElector(TestUtils.getNode(0)); - TestHandler handler = new TestHandler(); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(10, handler.getResponse()); - - // a valid request with same term and voteFor is empty - electionRequest.setTerm(10); - handler = new TestHandler(); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_AGREE, handler.getResponse()); - - dataGroupMember.setVoteFor(null); - - // a request with same term and voteFor is empty and elector is not in the group - electionRequest.setTerm(10); - electionRequest.setElector(elector); - handler = new TestHandler(); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_NODE_IS_NOT_IN_GROUP, handler.getResponse()); - - dataGroupMember.setVoteFor(voteFor); - - // a request with same term and voteFor is not empty and elector is not same to voteFor - // should reject election - electionRequest.setTerm(10); - electionRequest.setElector(elector); - handler = new TestHandler(); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_REJECT, handler.getResponse()); - - // a valid request with same term and voteFor is not empty and elector is same to voteFor - electionRequest.setTerm(10); - electionRequest.setElector(voteFor); - handler = new TestHandler(); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_AGREE, handler.getResponse()); - - // a request with with larger term and stale data log - // should reject election but update term - electionRequest.setTerm(14); - electionRequest.setLastLogIndex(1); - electionRequest.setLastLogTerm(1); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_LOG_MISMATCH, handler.getResponse()); - assertEquals(14, dataGroupMember.getTerm().get()); - - // a valid request with with larger term - electionRequest.setTerm(15); - electionRequest.setLastLogIndex(100); - electionRequest.setLastLogTerm(100); - new DataAsyncService(dataGroupMember).startElection(electionRequest, handler); - assertEquals(Response.RESPONSE_AGREE, handler.getResponse()); - assertEquals(15, dataGroupMember.getTerm().get()); - } - - @Test - public void testSendSnapshot() { - System.out.println("Start testSendSnapshot()"); - PartitionedSnapshot partitionedSnapshot = - new PartitionedSnapshot<>(FileSnapshot.Factory.INSTANCE); - partitionedSnapshot.setLastLogIndex(100); - partitionedSnapshot.setLastLogTerm(100); - - for (int i = 0; i < 3; i++) { - FileSnapshot fileSnapshot = new FileSnapshot(); - partitionedSnapshot.putSnapshot(i, fileSnapshot); - } - ByteBuffer serialize = partitionedSnapshot.serialize(); - - SendSnapshotRequest request = new SendSnapshotRequest(); - request.setSnapshotBytes(serialize); - AtomicBoolean callbackCalled = new AtomicBoolean(false); - new DataAsyncService(dataGroupMember) - .sendSnapshot( - request, - new AsyncMethodCallback() { - @Override - public void onComplete(Object o) { - callbackCalled.set(true); - } - - @Override - public void onError(Exception e) { - e.printStackTrace(); - fail(e.getMessage()); - } - }); - - assertTrue(callbackCalled.get()); - assertEquals(100, dataGroupMember.getLogManager().getLastLogIndex()); - assertEquals(100, dataGroupMember.getLogManager().getLastLogTerm()); - } - - @Test - public void testApplySnapshot() - throws IOException, WriteProcessException, SnapshotInstallationException, - QueryProcessException, IllegalPathException, TriggerExecutionException { - System.out.println("Start testApplySnapshot()"); - FileSnapshot snapshot = new FileSnapshot(); - List schemaList = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - schemaList.add(TestUtils.getTestTimeSeriesSchema(0, i)); - } - snapshot.setTimeseriesSchemas(schemaList); - - // resource1, resource1 exists locally, resource2 is closed but resource1 is not - // resource3 does not exist locally and without modification, - // resource4 does not exist locally and with modification - snapshot.addFile(prepareResource(1, false, true), TestUtils.getNode(0)); - snapshot.addFile(prepareResource(2, false, true), TestUtils.getNode(0)); - snapshot.addFile(prepareResource(3, false, true), TestUtils.getNode(0)); - snapshot.addFile(prepareResource(4, true, true), TestUtils.getNode(0)); - // resource5 is the merge result of 3,4,5 - TsFileResource tsFileResource = prepareResource(5, true, true); - tsFileResource.updateStartTime(TestUtils.getTestSg(0), 300); - tsFileResource.updateEndTime(TestUtils.getTestSg(0), 599); - tsFileResource.setMinPlanIndex(3); - tsFileResource.setMaxPlanIndex(5); - snapshot.addFile(tsFileResource, TestUtils.getNode(0), true); - - // create a local resource1 - DataRegion processor; - while (true) { - try { - processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - break; - } catch (StorageEngineException e) { - // ignore - } - } - - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setTime(0); - insertPlan.setMeasurements(new String[] {"s0"}); - insertPlan.setNeedInferType(true); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - insertPlan.setValues(new Object[] {"1.0"}); - insertPlan.setMeasurementMNodes(new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - insertPlan.transferType(); - processor.insert(insertPlan); - processor.syncCloseAllWorkingTsFileProcessors(); - - // create a local resource2 - insertPlan.setTime(101); - processor.insert(insertPlan); - - snapshot.getDefaultInstaller(dataGroupMember).install(snapshot, 0, false); - assertEquals(2, processor.getSequenceFileList().size()); - assertEquals(1, processor.getUnSequenceFileList().size()); - Deletion deletion = new Deletion(new PartialPath(TestUtils.getTestSg(0)), 0, 0); - assertTrue( - processor - .getUnSequenceFileList() - .get(0) - .getModFile() - .getModifications() - .contains(deletion)); - } - - @Test - public void testForwardPullSnapshot() { - System.out.println("Start testForwardPullSnapshot()"); - hasInitialSnapshots = true; - dataGroupMember.setCharacter(NodeCharacter.FOLLOWER); - dataGroupMember.setLeader(TestUtils.getNode(1)); - PullSnapshotRequest request = new PullSnapshotRequest(); - List requiredSlots = Arrays.asList(1, 3, 5, 7, 9); - request.setRequiredSlots(requiredSlots); - AtomicReference> reference = new AtomicReference<>(); - PullSnapshotHandler handler = - new PullSnapshotHandler<>( - reference, - TestUtils.getNode(1), - request.getRequiredSlots(), - FileSnapshot.Factory.INSTANCE); - new DataAsyncService(dataGroupMember).pullSnapshot(request, handler); - assertEquals(requiredSlots.size(), reference.get().size()); - for (Integer requiredSlot : requiredSlots) { - assertEquals(snapshotMap.get(requiredSlot), reference.get().get(requiredSlot)); - } - } - - @Test - public void testPullSnapshot() { - System.out.println("Start testPullSnapshot()"); - hasInitialSnapshots = true; - dataGroupMember.setCharacter(NodeCharacter.LEADER); - PullSnapshotRequest request = new PullSnapshotRequest(); - List requiredSlots = Arrays.asList(1, 3, 5, 7, 9, 11, 101); - request.setRequiredSlots(requiredSlots); - AtomicReference> reference = new AtomicReference<>(); - PullSnapshotHandler handler = - new PullSnapshotHandler<>( - reference, - TestUtils.getNode(1), - request.getRequiredSlots(), - FileSnapshot.Factory.INSTANCE); - new DataAsyncService(dataGroupMember).pullSnapshot(request, handler); - assertEquals(requiredSlots.size() - 1, reference.get().size()); - for (int i = 0; i < requiredSlots.size() - 1; i++) { - Integer requiredSlot = requiredSlots.get(i); - assertEquals(snapshotMap.get(requiredSlot), reference.get().get(requiredSlot)); - } - } - - @Test - public void testFollowerExecuteNonQuery() throws IllegalPathException { - System.out.println("Start testFollowerExecuteNonQuery()"); - dataGroupMember.setCharacter(NodeCharacter.FOLLOWER); - dataGroupMember.setLeader(TestUtils.getNode(1)); - TimeseriesSchema timeseriesSchema = TestUtils.getTestTimeSeriesSchema(0, 100); - CreateTimeSeriesPlan createTimeSeriesPlan = - new CreateTimeSeriesPlan( - new PartialPath(timeseriesSchema.getFullPath()), - timeseriesSchema.getType(), - timeseriesSchema.getEncodingType(), - timeseriesSchema.getCompressor(), - timeseriesSchema.getProps(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - assertEquals(200, dataGroupMember.executeNonQueryPlan(createTimeSeriesPlan).code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - } - - @Test - public void testLeaderExecuteNonQuery() throws QueryProcessException, IllegalPathException { - System.out.println("Start testLeaderExecuteNonQuery()"); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - dataGroupMember.setLeader(TestUtils.getNode(1)); - ExecutorService testThreadPool = Executors.newFixedThreadPool(4); - dataGroupMember.setAppendLogThreadPool(testThreadPool); - - TimeseriesSchema timeseriesSchema = TestUtils.getTestTimeSeriesSchema(0, 100); - CreateTimeSeriesPlan createTimeSeriesPlan = - new CreateTimeSeriesPlan( - new PartialPath(timeseriesSchema.getFullPath()), - timeseriesSchema.getType(), - timeseriesSchema.getEncodingType(), - timeseriesSchema.getCompressor(), - timeseriesSchema.getProps(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - testMetaMember = super.getMetaGroupMember(TestUtils.getNode(0)); - testMetaMember.setPartitionTable(partitionTable); - dataGroupMember.setLogManager( - getLogManager( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(0), 0)), - dataGroupMember)); - assertEquals(200, dataGroupMember.executeNonQueryPlan(createTimeSeriesPlan).code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(timeseriesSchema.getFullPath()))); - testThreadPool.shutdownNow(); - } - - @Test - public void testPullTimeseriesSchema() { - System.out.println("Start testPullTimeseriesSchema()"); - int prevTimeOut = ClusterConstant.getConnectionTimeoutInMS(); - int prevMaxWait = ClusterConstant.getSyncLeaderMaxWaitMs(); - ClusterConstant.setConnectionTimeoutInMS(20); - ClusterConstant.setSyncLeaderMaxWaitMs(200); - try { - // sync with leader is temporarily disabled, the request should be forward to the leader - dataGroupMember.setLeader(TestUtils.getNode(0)); - dataGroupMember.setCharacter(NodeCharacter.FOLLOWER); - enableSyncLeader = false; - - PullSchemaRequest request = new PullSchemaRequest(); - request.setPrefixPaths(Collections.singletonList(TestUtils.getTestSg(0))); - request.setHeader(TestUtils.getRaftNode(0, raftId)); - AtomicReference> result = new AtomicReference<>(); - PullTimeseriesSchemaHandler handler = - new PullTimeseriesSchemaHandler(TestUtils.getNode(1), request.getPrefixPaths(), result); - new DataAsyncService(dataGroupMember).pullTimeSeriesSchema(request, handler); - for (int i = 0; i < 10; i++) { - assertTrue(result.get().contains(TestUtils.getTestTimeSeriesSchema(0, i))); - } - - // the member is a leader itself - dataGroupMember.setCharacter(NodeCharacter.LEADER); - result.set(null); - handler = - new PullTimeseriesSchemaHandler(TestUtils.getNode(1), request.getPrefixPaths(), result); - new DataAsyncService(dataGroupMember).pullTimeSeriesSchema(request, handler); - for (int i = 0; i < 10; i++) { - assertTrue(result.get().contains(TestUtils.getTestTimeSeriesSchema(0, i))); - } - } finally { - ClusterConstant.setConnectionTimeoutInMS(prevTimeOut); - ClusterConstant.setSyncLeaderMaxWaitMs(prevMaxWait); - } - } - - @Test - public void testPullMeasurementSchema() { - System.out.println("Start testPullMeasurementSchema()"); - int prevTimeOut = ClusterConstant.getConnectionTimeoutInMS(); - int prevMaxWait = ClusterConstant.getSyncLeaderMaxWaitMs(); - ClusterConstant.setConnectionTimeoutInMS(20); - ClusterConstant.setSyncLeaderMaxWaitMs(200); - try { - // sync with leader is temporarily disabled, the request should be forward to the leader - dataGroupMember.setLeader(TestUtils.getNode(0)); - dataGroupMember.setCharacter(NodeCharacter.FOLLOWER); - enableSyncLeader = false; - - PullSchemaRequest request = new PullSchemaRequest(); - request.setPrefixPaths(Collections.singletonList(TestUtils.getTestSg(0))); - request.setHeader(TestUtils.getRaftNode(0, raftId)); - AtomicReference> result = new AtomicReference<>(); - PullMeasurementSchemaHandler handler = - new PullMeasurementSchemaHandler(TestUtils.getNode(1), request.getPrefixPaths(), result); - new DataAsyncService(dataGroupMember).pullMeasurementSchema(request, handler); - for (int i = 0; i < 10; i++) { - assertTrue(result.get().contains(TestUtils.getTestMeasurementSchema(i))); - } - - // the member is a leader itself - dataGroupMember.setCharacter(NodeCharacter.LEADER); - result.set(null); - handler = - new PullMeasurementSchemaHandler(TestUtils.getNode(1), request.getPrefixPaths(), result); - new DataAsyncService(dataGroupMember).pullMeasurementSchema(request, handler); - for (int i = 0; i < 10; i++) { - assertTrue(result.get().contains(TestUtils.getTestMeasurementSchema(i))); - } - } finally { - ClusterConstant.setConnectionTimeoutInMS(prevTimeOut); - ClusterConstant.setSyncLeaderMaxWaitMs(prevMaxWait); - } - } - - @Test - public void testQuerySingleSeries() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - System.out.println("Start testQuerySingleSeries()"); - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - PlanExecutor PlanExecutor = new PlanExecutor(); - PlanExecutor.processNonQuery(insertPlan); - } - - // node1 manages the data above - dataGroupMember.setThisNode(TestUtils.getNode(10)); - dataGroupMember.setAllNodes( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(10), raftId))); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - request.setPath(TestUtils.getTestSeries(0, 0)); - request.setDataTypeOrdinal(TSDataType.DOUBLE.ordinal()); - request.setRequester(TestUtils.getNode(1)); - request.setQueryId(0); - request.setAscending(true); - Filter filter = TimeFilter.gtEq(5); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - filter.serialize(dataOutputStream); - request.setTimeFilterBytes(byteArrayOutputStream.toByteArray()); - - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(getTestMeasurement(0)); - request.setDeviceMeasurements(deviceMeasurements); - - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - new DataAsyncService(dataGroupMember).querySingleSeries(request, handler); - long readerId = result.get(); - assertEquals(1, readerId); - - AtomicReference dataResult = new AtomicReference<>(); - GenericHandler dataHandler = new GenericHandler<>(TestUtils.getNode(0), dataResult); - new DataAsyncService(dataGroupMember) - .fetchSingleSeries(TestUtils.getRaftNode(0, raftId), readerId, dataHandler); - ByteBuffer dataBuffer = dataResult.get(); - BatchData batchData = SerializeUtils.deserializeBatchData(dataBuffer); - for (int i = 5; i < 10; i++) { - assertTrue(batchData.hasCurrent()); - assertEquals(i, batchData.currentTime()); - assertEquals(i * 1.0, batchData.getDouble(), 0.00001); - batchData.next(); - } - assertFalse(batchData.hasCurrent()); - - new DataAsyncService(dataGroupMember) - .endQuery( - TestUtils.getRaftNode(0, raftId), - TestUtils.getNode(1), - 0, - new GenericHandler<>(TestUtils.getNode(0), null)); - } - - @Test - public void testQuerySingleSeriesWithValueFilter() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - System.out.println("Start testQuerySingleSeriesWithValueFilter()"); - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - PlanExecutor PlanExecutor = new PlanExecutor(); - PlanExecutor.processNonQuery(insertPlan); - } - - // node1 manages the data above - dataGroupMember.setThisNode(TestUtils.getNode(10)); - dataGroupMember.setAllNodes( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(10), raftId))); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - request.setPath(TestUtils.getTestSeries(0, 0)); - request.setDataTypeOrdinal(TSDataType.DOUBLE.ordinal()); - request.setRequester(TestUtils.getNode(1)); - request.setQueryId(0); - request.setAscending(true); - Filter filter = new AndFilter(TimeFilter.gtEq(5), ValueFilter.ltEq(8.0)); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - filter.serialize(dataOutputStream); - request.setTimeFilterBytes(byteArrayOutputStream.toByteArray()); - - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(getTestMeasurement(0)); - request.setDeviceMeasurements(deviceMeasurements); - - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - new DataAsyncService(dataGroupMember).querySingleSeries(request, handler); - long readerId = result.get(); - assertEquals(1, readerId); - - AtomicReference dataResult = new AtomicReference<>(); - GenericHandler dataHandler = new GenericHandler<>(TestUtils.getNode(0), dataResult); - new DataAsyncService(dataGroupMember) - .fetchSingleSeries(TestUtils.getRaftNode(0, raftId), readerId, dataHandler); - ByteBuffer dataBuffer = dataResult.get(); - BatchData batchData = SerializeUtils.deserializeBatchData(dataBuffer); - for (int i = 5; i < 9; i++) { - assertTrue(batchData.hasCurrent()); - assertEquals(i, batchData.currentTime()); - assertEquals(i * 1.0, batchData.getDouble(), 0.00001); - batchData.next(); - } - assertFalse(batchData.hasCurrent()); - - new DataAsyncService(dataGroupMember) - .endQuery( - TestUtils.getRaftNode(0, raftId), - TestUtils.getNode(1), - 0, - new GenericHandler<>(TestUtils.getNode(0), null)); - } - - @Test - public void testQuerySingleSeriesByTimestamp() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - System.out.println("Start testQuerySingleSeriesByTimestamp()"); - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - PlanExecutor PlanExecutor = new PlanExecutor(); - PlanExecutor.processNonQuery(insertPlan); - } - - // node1 manages the data above - dataGroupMember.setThisNode(TestUtils.getNode(10)); - dataGroupMember.setAllNodes( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(10), 0))); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - request.setPath(TestUtils.getTestSeries(0, 0)); - request.setDataTypeOrdinal(TSDataType.DOUBLE.ordinal()); - request.setRequester(TestUtils.getNode(1)); - request.setQueryId(0); - Filter filter = TimeFilter.gtEq(5); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - filter.serialize(dataOutputStream); - request.setTimeFilterBytes(byteArrayOutputStream.toByteArray()); - request.setAscending(true); - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(getTestMeasurement(0)); - request.setDeviceMeasurements(deviceMeasurements); - - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - new DataAsyncService(dataGroupMember).querySingleSeriesByTimestamp(request, handler); - long readerId = result.get(); - assertEquals(1, readerId); - - AtomicReference dataResult = new AtomicReference<>(); - GenericHandler dataHandler = new GenericHandler<>(TestUtils.getNode(0), dataResult); - - List timestamps = new ArrayList<>(5); - for (int i = 5; i < 10; i++) { - timestamps.add((long) i); - } - new DataAsyncService(dataGroupMember) - .fetchSingleSeriesByTimestamps( - TestUtils.getRaftNode(0, raftId), readerId, timestamps, dataHandler); - Object[] values = SerializeUtils.deserializeObjects(dataResult.get()); - for (int i = 5; i < 10; i++) { - assertEquals(i * 1.0, (Double) values[i - 5], 0.00001); - } - - new DataAsyncService(dataGroupMember) - .endQuery( - TestUtils.getRaftNode(0, raftId), - TestUtils.getNode(1), - 0, - new GenericHandler<>(TestUtils.getNode(0), null)); - } - - @Test - public void testQuerySingleSeriesByTimestampWithValueFilter() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - System.out.println("Start testQuerySingleSeriesByTimestampWithValueFilter()"); - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - PlanExecutor PlanExecutor = new PlanExecutor(); - PlanExecutor.processNonQuery(insertPlan); - } - - // node1 manages the data above - dataGroupMember.setThisNode(TestUtils.getNode(10)); - dataGroupMember.setAllNodes( - partitionTable.getPartitionGroup(new RaftNode(TestUtils.getNode(10), 0))); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - SingleSeriesQueryRequest request = new SingleSeriesQueryRequest(); - request.setPath(TestUtils.getTestSeries(0, 0)); - request.setDataTypeOrdinal(TSDataType.DOUBLE.ordinal()); - request.setRequester(TestUtils.getNode(10)); - request.setQueryId(0); - Filter filter = new AndFilter(TimeFilter.gtEq(5), ValueFilter.ltEq(8.0)); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - filter.serialize(dataOutputStream); - request.setTimeFilterBytes(byteArrayOutputStream.toByteArray()); - request.setAscending(true); - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(getTestMeasurement(0)); - request.setDeviceMeasurements(deviceMeasurements); - - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - new DataAsyncService(dataGroupMember).querySingleSeriesByTimestamp(request, handler); - long readerId = result.get(); - assertEquals(1, readerId); - - AtomicReference dataResult = new AtomicReference<>(); - GenericHandler dataHandler = new GenericHandler<>(TestUtils.getNode(0), dataResult); - List timestamps = new ArrayList<>(4); - for (int i = 5; i < 9; i++) { - timestamps.add((long) i); - } - new DataAsyncService(dataGroupMember) - .fetchSingleSeriesByTimestamps( - TestUtils.getRaftNode(0, raftId), readerId, timestamps, dataHandler); - Object[] values = SerializeUtils.deserializeObjects(dataResult.get()); - for (int i = 5; i < 9; i++) { - assertEquals(i * 1.0, (Double) values[i - 5], 0.00001); - } - - new DataAsyncService(dataGroupMember) - .endQuery( - TestUtils.getRaftNode(0, raftId), - TestUtils.getNode(1), - 0, - new GenericHandler<>(TestUtils.getNode(0), null)); - } - - @Test - public void testGetPaths() { - System.out.println("Start testGetPaths()"); - String path = TestUtils.getTestSg(0) + ".**"; - AtomicReference pathResult = new AtomicReference<>(); - GenericHandler handler = - new GenericHandler<>(TestUtils.getNode(0), pathResult); - new DataAsyncService(dataGroupMember) - .getAllPaths( - TestUtils.getRaftNode(0, raftId), Collections.singletonList(path), false, handler); - List result = new ArrayList<>(pathResult.get().paths); - assertEquals(20, result.size()); - for (int i = 0; i < 10; i++) { - assertTrue(result.contains(TestUtils.getTestSeries(0, i))); - } - } - - @Test - public void testFetchWithoutQuery() { - System.out.println("Start testFetchWithoutQuery()"); - AtomicReference result = new AtomicReference<>(); - List timestamps = new ArrayList<>(1); - timestamps.add((long) 0); - new DataAsyncService(dataGroupMember) - .fetchSingleSeriesByTimestamps( - TestUtils.getRaftNode(0, raftId), - 0, - timestamps, - new AsyncMethodCallback() { - @Override - public void onComplete(ByteBuffer buffer) {} - - @Override - public void onError(Exception e) { - result.set(e); - } - }); - Exception exception = result.get(); - assertTrue(exception instanceof ReaderNotFoundException); - assertEquals("The requested reader 0 is not found", exception.getMessage()); - - new DataAsyncService(dataGroupMember) - .fetchSingleSeries( - TestUtils.getRaftNode(0, raftId), - 0, - new AsyncMethodCallback() { - @Override - public void onComplete(ByteBuffer buffer) {} - - @Override - public void onError(Exception e) { - result.set(e); - } - }); - exception = result.get(); - assertTrue(exception instanceof ReaderNotFoundException); - assertEquals("The requested reader 0 is not found", exception.getMessage()); - } - - private TsFileResource prepareResource( - long serialNum, boolean withModification, boolean asHardLink) - throws IOException, IllegalPathException { - TsFileResource resource = new RemoteTsFileResource(); - String fileName = - "target" - + File.separator - + TestUtils.getTestSg(0) - + File.separator - + "0" - + File.separator - + "0" - + File.separator - + "0-" - + serialNum - + "-0.tsfile"; - if (asHardLink) { - fileName = fileName + ".0_0"; - } - File file = new File(fileName); - file.getParentFile().mkdirs(); - file.createNewFile(); - - resource.setFile(file); - resource.setMaxPlanIndex(serialNum); - resource.setMaxPlanIndex(serialNum); - resource.updateStartTime(TestUtils.getTestSg(0), serialNum * 100); - resource.updateEndTime(TestUtils.getTestSg(0), (serialNum + 1) * 100 - 1); - if (withModification) { - Deletion deletion = new Deletion(new PartialPath(TestUtils.getTestSg(0)), 0, 0); - resource.getModFile().write(deletion); - resource.getModFile().close(); - } - return resource; - } - - @Test - public void testRemoveLeader() { - System.out.println("Start testRemoveLeader()"); - Node nodeToRemove = TestUtils.getNode(10); - testMetaMember.getPartitionTable().removeNode(nodeToRemove); - SlotNodeRemovalResult nodeRemovalResult = - (SlotNodeRemovalResult) testMetaMember.getPartitionTable().getNodeRemovalResult(); - dataGroupMember.setLeader(nodeToRemove); - dataGroupMember.start(); - - try { - dataGroupMember.preRemoveNode(nodeToRemove); - dataGroupMember.removeNode(nodeToRemove); - - assertEquals(NodeCharacter.ELECTOR, dataGroupMember.getCharacter()); - assertTrue(dataGroupMember.getAllNodes().contains(TestUtils.getNode(30))); - assertFalse(dataGroupMember.getAllNodes().contains(nodeToRemove)); - - dataGroupMember.pullSlots(nodeRemovalResult); - List newSlots = - nodeRemovalResult.getNewSlotOwners().get(new RaftNode(TestUtils.getNode(0), raftId)); - while (newSlots.size() != pulledSnapshots.size()) {} - for (Integer newSlot : newSlots) { - assertTrue(pulledSnapshots.contains(newSlot)); - } - } finally { - dataGroupMember.stop(); - } - } - - @Test - public void testRemoveNonLeader() { - System.out.println("Start testRemoveNonLeader()"); - Node nodeToRemove = TestUtils.getNode(10); - testMetaMember.getPartitionTable().removeNode(nodeToRemove); - NodeRemovalResult nodeRemovalResult = testMetaMember.getPartitionTable().getNodeRemovalResult(); - dataGroupMember.setLeader(TestUtils.getNode(20)); - dataGroupMember.start(); - - try { - dataGroupMember.preRemoveNode(nodeToRemove); - dataGroupMember.removeNode(nodeToRemove); - - assertEquals(0, dataGroupMember.getLastHeartbeatReceivedTime()); - assertTrue(dataGroupMember.getAllNodes().contains(TestUtils.getNode(30))); - assertFalse(dataGroupMember.getAllNodes().contains(nodeToRemove)); - - dataGroupMember.pullSlots(nodeRemovalResult); - List newSlots = - ((SlotNodeRemovalResult) nodeRemovalResult) - .getNewSlotOwners() - .get(new RaftNode(TestUtils.getNode(0), 0)); - while (newSlots.size() != pulledSnapshots.size()) {} - - for (Integer newSlot : newSlots) { - assertTrue(pulledSnapshots.contains(newSlot)); - } - } finally { - dataGroupMember.stop(); - } - } - - @Test - public void testGroupBy() - throws QueryProcessException, StorageGroupNotSetException, StorageEngineException, - IllegalPathException { - System.out.println("Start testGroupBy()"); - TestUtils.prepareData(); - - GroupByRequest request = new GroupByRequest(); - request.setPath(TestUtils.getTestSeries(0, 0)); - List aggregationTypes = new ArrayList<>(); - for (AggregationType value : AggregationType.values()) { - aggregationTypes.add(value.ordinal()); - } - request.setAggregationTypeOrdinals(aggregationTypes); - Filter timeFilter = TimeFilter.gtEq(5); - request.setTimeFilterBytes(SerializeUtils.serializeFilter(timeFilter)); - QueryContext queryContext = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - try { - request.setQueryId(queryContext.getQueryId()); - request.setRequestor(TestUtils.getNode(0)); - request.setDataTypeOrdinal(TSDataType.DOUBLE.ordinal()); - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(getTestMeasurement(0)); - request.setDeviceMeasurements(deviceMeasurements); - request.setAscending(true); - - DataGroupMember dataGroupMember; - AtomicReference resultRef; - GenericHandler handler; - Long executorId; - AtomicReference> aggrResultRef; - GenericHandler> aggrResultHandler; - List byteBuffers; - List aggregateResults; - Object[] answers; - // get an executor from a node holding this timeseries - request.setHeader(TestUtils.getRaftNode(10, raftId)); - dataGroupMember = getDataGroupMember(TestUtils.getNode(10)); - try { - resultRef = new AtomicReference<>(); - handler = new GenericHandler<>(TestUtils.getNode(0), resultRef); - new DataAsyncService(dataGroupMember).getGroupByExecutor(request, handler); - executorId = resultRef.get(); - assertEquals(1L, (long) executorId); - - // fetch result - aggrResultRef = new AtomicReference<>(); - aggrResultHandler = new GenericHandler<>(TestUtils.getNode(0), aggrResultRef); - new DataAsyncService(dataGroupMember) - .getGroupByResult( - TestUtils.getRaftNode(10, raftId), executorId, 0, 20, aggrResultHandler); - - byteBuffers = aggrResultRef.get(); - assertNotNull(byteBuffers); - aggregateResults = new ArrayList<>(); - for (ByteBuffer byteBuffer : byteBuffers) { - aggregateResults.add(AggregateResult.deserializeFrom(byteBuffer)); - } - answers = new Object[] {15.0, 12.0, 180.0, 5.0, 19.0, 19.0, 5.0, 19.0, 5.0, 19.0}; - checkAggregates(answers, aggregateResults); - } finally { - dataGroupMember.closeLogManager(); - } - - // get an executor from a node not holding this timeseries - request.setHeader(TestUtils.getRaftNode(30, raftId)); - dataGroupMember = getDataGroupMember(TestUtils.getNode(30)); - try { - resultRef = new AtomicReference<>(); - handler = new GenericHandler<>(TestUtils.getNode(0), resultRef); - request.timeFilterBytes.position(0); - new DataAsyncService(dataGroupMember).getGroupByExecutor(request, handler); - executorId = resultRef.get(); - assertEquals(-1L, (long) executorId); - - // fetch result - aggrResultRef = new AtomicReference<>(); - aggrResultHandler = new GenericHandler<>(TestUtils.getNode(0), aggrResultRef); - new DataAsyncService(dataGroupMember) - .getGroupByResult( - TestUtils.getRaftNode(30, raftId), executorId, 0, 20, aggrResultHandler); - - byteBuffers = aggrResultRef.get(); - assertNull(byteBuffers); - } finally { - dataGroupMember.closeLogManager(); - } - } finally { - QueryResourceManager.getInstance().endQuery(queryContext.getQueryId()); - } - } - - private void checkAggregates(Object[] answers, List aggregateResults) { - assertEquals(answers.length, aggregateResults.size()); - for (int i = 0; i < aggregateResults.size(); i++) { - if (answers[i] != null) { - assertEquals( - (double) answers[i], - Double.parseDouble(aggregateResults.get(i).getResult().toString()), - 0.000001); - } else { - assertNull(aggregateResults.get(i).getResult()); - } - } - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberTest.java deleted file mode 100644 index 97f043631ca9..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/MetaGroupMemberTest.java +++ /dev/null @@ -1,1488 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.ClusterIoTDB; -import org.apache.iotdb.cluster.client.ClientCategory; -import org.apache.iotdb.cluster.client.ClientManager; -import org.apache.iotdb.cluster.client.IClientManager; -import org.apache.iotdb.cluster.common.TestAsyncClient; -import org.apache.iotdb.cluster.common.TestAsyncDataClient; -import org.apache.iotdb.cluster.common.TestAsyncMetaClient; -import org.apache.iotdb.cluster.common.TestPartitionedLogManager; -import org.apache.iotdb.cluster.common.TestSnapshot; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterConstant; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.coordinator.Coordinator; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.exception.ConfigInconsistentException; -import org.apache.iotdb.cluster.exception.EmptyIntervalException; -import org.apache.iotdb.cluster.exception.LogExecutionException; -import org.apache.iotdb.cluster.exception.PartitionTableUnavailableException; -import org.apache.iotdb.cluster.exception.StartUpCheckFailureException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.logtypes.AddNodeLog; -import org.apache.iotdb.cluster.log.logtypes.CloseFileLog; -import org.apache.iotdb.cluster.log.logtypes.RemoveNodeLog; -import org.apache.iotdb.cluster.log.snapshot.MetaSimpleSnapshot; -import org.apache.iotdb.cluster.metadata.CSchemaProcessor; -import org.apache.iotdb.cluster.partition.NodeRemovalResult; -import org.apache.iotdb.cluster.partition.PartitionGroup; -import org.apache.iotdb.cluster.partition.PartitionTable; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.query.ClusterPlanRouter; -import org.apache.iotdb.cluster.query.LocalQueryExecutor; -import org.apache.iotdb.cluster.query.RemoteQueryContext; -import org.apache.iotdb.cluster.query.reader.ClusterReaderFactory; -import org.apache.iotdb.cluster.rpc.thrift.AddNodeResponse; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.CheckStatusResponse; -import org.apache.iotdb.cluster.rpc.thrift.ElectionRequest; -import org.apache.iotdb.cluster.rpc.thrift.ExecutNonQueryReq; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatRequest; -import org.apache.iotdb.cluster.rpc.thrift.HeartBeatResponse; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaRequest; -import org.apache.iotdb.cluster.rpc.thrift.PullSchemaResp; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RaftService.AsyncClient; -import org.apache.iotdb.cluster.rpc.thrift.SendSnapshotRequest; -import org.apache.iotdb.cluster.rpc.thrift.StartUpStatus; -import org.apache.iotdb.cluster.rpc.thrift.TNodeStatus; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; -import org.apache.iotdb.cluster.server.handlers.caller.GenericHandler; -import org.apache.iotdb.cluster.server.monitor.NodeStatusManager; -import org.apache.iotdb.cluster.server.service.DataGroupEngine; -import org.apache.iotdb.cluster.server.service.MetaAsyncService; -import org.apache.iotdb.cluster.utils.ClusterUtils; -import org.apache.iotdb.cluster.utils.Constants; -import org.apache.iotdb.cluster.utils.CreateTemplatePlanUtil; -import org.apache.iotdb.cluster.utils.StatusUtils; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.auth.AuthException; -import org.apache.iotdb.commons.auth.authorizer.IAuthorizer; -import org.apache.iotdb.commons.auth.authorizer.LocalFileAuthorizer; -import org.apache.iotdb.commons.auth.entity.Role; -import org.apache.iotdb.commons.auth.entity.User; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.engine.StorageEngine; -import org.apache.iotdb.db.engine.storagegroup.DataRegion; -import org.apache.iotdb.db.exception.StorageEngineException; -import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException; -import org.apache.iotdb.db.exception.query.QueryProcessException; -import org.apache.iotdb.db.metadata.mnode.IMeasurementMNode; -import org.apache.iotdb.db.metadata.path.MeasurementPath; -import org.apache.iotdb.db.metadata.template.Template; -import org.apache.iotdb.db.metadata.template.TemplateManager; -import org.apache.iotdb.db.qp.executor.PlanExecutor; -import org.apache.iotdb.db.qp.physical.PhysicalPlan; -import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTemplatePlan; -import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan; -import org.apache.iotdb.db.qp.physical.sys.SetStorageGroupPlan; -import org.apache.iotdb.db.query.context.QueryContext; -import org.apache.iotdb.db.query.control.QueryResourceManager; -import org.apache.iotdb.db.query.reader.series.IReaderByTimestamp; -import org.apache.iotdb.db.query.reader.series.ManagedSeriesReader; -import org.apache.iotdb.db.service.IoTDB; -import org.apache.iotdb.db.utils.TimeValuePairUtils; -import org.apache.iotdb.rpc.TSStatusCode; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.read.common.BatchData; -import org.apache.iotdb.tsfile.read.filter.TimeFilter; -import org.apache.iotdb.tsfile.read.filter.ValueFilter; -import org.apache.iotdb.tsfile.write.schema.IMeasurementSchema; -import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema; - -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.protocol.TCompactProtocol.Factory; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.iotdb.cluster.server.NodeCharacter.ELECTOR; -import static org.apache.iotdb.cluster.server.NodeCharacter.FOLLOWER; -import static org.apache.iotdb.cluster.server.NodeCharacter.LEADER; -import static org.awaitility.Awaitility.await; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -public class MetaGroupMemberTest extends BaseMember { - - private DataGroupEngine dataGroupEngine; - protected boolean mockDataClusterServer; - private Node exiledNode; - - private int prevReplicaNum; - private List prevSeedNodes; - - @Override - @After - public void tearDown() throws Exception { - dataGroupEngine.stop(); - super.tearDown(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(prevReplicaNum); - ClusterDescriptor.getInstance().getConfig().setSeedNodeUrls(prevSeedNodes); - } - - @Override - @Before - public void setUp() throws Exception { - prevSeedNodes = ClusterDescriptor.getInstance().getConfig().getSeedNodeUrls(); - ClusterDescriptor.getInstance().getConfig().setSeedNodeUrls(Collections.emptyList()); - prevReplicaNum = ClusterDescriptor.getInstance().getConfig().getReplicationNum(); - ClusterDescriptor.getInstance().getConfig().setReplicationNum(2); - ClusterConstant.setConnectionTimeoutInMS(1000); - ClusterConstant.setWriteOperationTimeoutMS(1000); - ClusterConstant.setReadOperationTimeoutMS(1000); - - super.setUp(); - partitionTable = new SlotPartitionTable(allNodes, TestUtils.getNode(0)); - testMetaMember.setPartitionTable(partitionTable); - dummyResponse.set(Response.RESPONSE_AGREE); - testMetaMember.setAllNodes(allNodes); - - dataGroupEngine = - new DataGroupEngine( - new DataGroupMember.Factory(new Factory(), testMetaMember) { - @Override - public DataGroupMember create(PartitionGroup partitionGroup) { - return getDataGroupMember(partitionGroup, TestUtils.getNode(0)); - } - }, - testMetaMember); - - buildDataGroups(dataGroupEngine); - ClusterIoTDB.getInstance().setDataGroupEngine(dataGroupEngine); - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - testMetaMember.getThisNode().setNodeIdentifier(0); - testMetaMember.setRouter(new ClusterPlanRouter(testMetaMember.getPartitionTable())); - mockDataClusterServer = false; - NodeStatusManager.getINSTANCE().setMetaGroupMember(testMetaMember); - exiledNode = null; - } - - private DataGroupMember getDataGroupMember(PartitionGroup group, Node node) { - DataGroupMember dataGroupMember = - new DataGroupMember(new Factory(), group, testMetaMember) { - @Override - public boolean syncLeader(CheckConsistency checkConsistency) { - return true; - } - - @Override - public void pullSlots(NodeRemovalResult removalResult) {} - - @Override - public TSStatus executeNonQueryPlan(PhysicalPlan plan) { - try { - planExecutor.processNonQuery(plan); - return StatusUtils.OK; - } catch (QueryProcessException - | StorageGroupNotSetException - | StorageEngineException e) { - return StatusUtils.getStatus(StatusUtils.EXECUTE_STATEMENT_ERROR, e.getMessage()); - } - } - - @Override - public TSStatus forwardPlan(PhysicalPlan plan, Node node, RaftNode header) { - return executeNonQueryPlan(plan); - } - - @Override - protected AppendLogResult sendLogToFollowers(Log log) { - return AppendLogResult.OK; - } - - @Override - public AsyncClient getAsyncClient(Node node) { - return getClient(node); - } - - @Override - public AsyncClient getAsyncHeartbeatClient(Node node) { - return getClient(node); - } - - AsyncClient getClient(Node node) { - return new TestAsyncClient(node.nodeIdentifier) { - @Override - public void startElection( - ElectionRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - long resp = dummyResponse.get(); - // MIN_VALUE means let the request time out - if (resp != Long.MIN_VALUE) { - resultHandler.onComplete(resp); - } - }) - .start(); - } - - @Override - public void sendHeartbeat( - HeartBeatRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - HeartBeatResponse response = new HeartBeatResponse(); - response.setFollower(thisNode); - response.setTerm(Response.RESPONSE_AGREE); - resultHandler.onComplete(response); - }) - .start(); - } - }; - } - }; - - dataGroupMember.setLogManager( - new TestPartitionedLogManager( - null, partitionTable, group.getHeader().getNode(), TestSnapshot.Factory.INSTANCE)); - dataGroupMember.setThisNode(TestUtils.getNode(0)); - dataGroupMember.setLeader(node); - dataGroupMember.setCharacter(NodeCharacter.LEADER); - dataGroupMember.setLocalQueryExecutor( - new LocalQueryExecutor(dataGroupMember) { - @Override - public PullSchemaResp queryTimeSeriesSchema(PullSchemaRequest request) { - return mockedPullTimeSeriesSchema(request); - } - - @Override - public PullSchemaResp queryMeasurementSchema(PullSchemaRequest request) { - return mockedPullTimeSeriesSchema(request); - } - }); - return dataGroupMember; - } - - private PullSchemaResp mockedPullTimeSeriesSchema(PullSchemaRequest request) { - List schemas = new ArrayList<>(); - List prefixPaths = request.getPrefixPaths(); - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - try { - for (String prefixPath : prefixPaths) { - if (!prefixPath.equals(TestUtils.getTestSeries(10, 0))) { - // IoTDB.schemaProcessor.collectMeasurementSchema(new PartialPath(prefixPath), - // schemas); - dataOutputStream.writeInt(schemas.size()); - for (IMeasurementSchema schema : schemas) { - schema.partialSerializeTo(dataOutputStream); - } - } else { - dataOutputStream.writeInt(1); - TestUtils.getTestMeasurementSchema(0).partialSerializeTo(dataOutputStream); - } - } - } catch (IOException e) { - // ignore - } - PullSchemaResp resp = new PullSchemaResp(); - resp.setSchemaBytes(byteArrayOutputStream.toByteArray()); - return resp; - } - - @Override - protected MetaGroupMember getMetaGroupMember(Node node) throws QueryProcessException { - MetaGroupMember metaGroupMember = - new MetaGroupMember(new Factory(), node, new Coordinator()) { - - @Override - public void applyAddNode(AddNodeLog addNodeLog) { - allNodes.add(addNodeLog.getNewNode()); - } - - @Override - public void applyRemoveNode(RemoveNodeLog removeNodeLog) { - super.applyRemoveNode(removeNodeLog); - exiledNode = removeNodeLog.getRemovedNode(); - } - - @Override - public DataGroupEngine getDataGroupEngine() { - return mockDataClusterServer - ? MetaGroupMemberTest.this.dataGroupEngine - : ClusterIoTDB.getInstance().getDataGroupEngine(); - } - - @Override - public DataGroupMember getLocalDataMember(RaftNode header, Object request) { - return getDataGroupMember(header); - } - - @Override - public DataGroupMember getLocalDataMember(RaftNode header) { - return getDataGroupMember(header); - } - - @Override - public void updateHardState(long currentTerm, Node leader) {} - - @Override - protected void addSeedNodes() { - List seedUrls = config.getSeedNodeUrls(); - // initialize allNodes - for (String seedUrl : seedUrls) { - Node node = ClusterUtils.parseNode(seedUrl); - if (node != null - && (!node.getInternalIp().equals(thisNode.internalIp) - || node.getMetaPort() != thisNode.getMetaPort()) - && !allNodes.contains(node)) { - // do not add the local node since it is added in `setThisNode()` - allNodes.add(node); - } - } - } - - @Override - public AsyncClient getAsyncHeartbeatClient(Node node) { - return getClient(node); - } - - @Override - public AsyncClient getSendLogAsyncClient(Node node) { - return getAsyncClient(node); - } - - @Override - public AsyncClient getAsyncClient(Node node) { - return getClient(node); - } - - AsyncClient getClient(Node node) { - try { - return new TestAsyncMetaClient(null, null, node) { - @Override - public void startElection( - ElectionRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - long resp = dummyResponse.get(); - // MIN_VALUE means let the request time out - if (resp != Long.MIN_VALUE) { - resultHandler.onComplete(resp); - } - }) - .start(); - } - - @Override - public void handshake(Node sender, AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(null)).start(); - } - - @Override - public void sendHeartbeat( - HeartBeatRequest request, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - HeartBeatResponse response = new HeartBeatResponse(); - response.setFollower(thisNode); - response.setTerm(Response.RESPONSE_AGREE); - resultHandler.onComplete(response); - }) - .start(); - } - - @Override - public void appendEntry( - AppendEntryRequest request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - long resp = dummyResponse.get(); - // MIN_VALUE means let the request time out - if (resp != Long.MIN_VALUE) { - resultHandler.onComplete(dummyResponse.get()); - } - }) - .start(); - } - - @Override - public void addNode( - Node node, - StartUpStatus startUpStatus, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - if (node.getNodeIdentifier() == 10) { - resultHandler.onComplete( - new AddNodeResponse((int) Response.RESPONSE_IDENTIFIER_CONFLICT)); - } else { - partitionTable.addNode(node); - AddNodeResponse resp = new AddNodeResponse((int) dummyResponse.get()); - resp.setPartitionTableBytes(partitionTable.serialize()); - resultHandler.onComplete(resp); - } - }) - .start(); - } - - @Override - public void executeNonQueryPlan( - ExecutNonQueryReq request, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - try { - PhysicalPlan plan = PhysicalPlan.Factory.create(request.planBytes); - planExecutor.processNonQuery(plan); - resultHandler.onComplete(StatusUtils.OK); - } catch (IOException - | QueryProcessException - | StorageGroupNotSetException - | StorageEngineException - | IllegalPathException e) { - resultHandler.onError(e); - } - }) - .start(); - } - - @Override - public void queryNodeStatus(AsyncMethodCallback resultHandler) { - new Thread(() -> resultHandler.onComplete(new TNodeStatus())).start(); - } - - @Override - public void exile( - ByteBuffer removeNodeLog, AsyncMethodCallback resultHandler) { - System.out.printf("%s was exiled%n", node); - exiledNode = node; - } - - @Override - public void removeNode(Node node, AsyncMethodCallback resultHandler) { - new Thread( - () -> { - testMetaMember.applyRemoveNode( - new RemoveNodeLog(partitionTable.serialize(), node)); - resultHandler.onComplete(Response.RESPONSE_AGREE); - }) - .start(); - } - - @Override - public void checkStatus( - StartUpStatus startUpStatus, - AsyncMethodCallback resultHandler) { - new Thread( - () -> { - CheckStatusResponse response = new CheckStatusResponse(); - response.setHashSaltEquals(true); - response.setPartitionalIntervalEquals(true); - response.setReplicationNumEquals(true); - response.setSeedNodeEquals(true); - resultHandler.onComplete(response); - }) - .start(); - } - - @Override - public void collectMigrationStatus(AsyncMethodCallback resultHandler) { - new Thread( - () -> { - resultHandler.onComplete( - ClusterUtils.serializeMigrationStatus(Collections.emptyMap())); - }) - .start(); - } - }; - } catch (IOException e) { - return null; - } - } - }; - metaGroupMember.getCoordinator().linkMetaGroupMember(metaGroupMember); - metaGroupMember.setLeader(node); - metaGroupMember.setAllNodes(allNodes); - metaGroupMember.setCharacter(NodeCharacter.LEADER); - metaGroupMember.setAppendLogThreadPool(testThreadPool); - metaGroupMember.setReady(true); - metaGroupMember.setPartitionTable(partitionTable); - // TODO fixme : restore normal provider - ClusterIoTDB.getInstance() - .setClientManager( - new ClientManager( - ClusterDescriptor.getInstance().getConfig().isUseAsyncServer(), - ClientManager.Type.RequestForwardClient)); - return metaGroupMember; - } - - private void buildDataGroups(DataGroupEngine dataGroupServiceImpls) { - List partitionGroups = partitionTable.getLocalGroups(); - - dataGroupServiceImpls.setPartitionTable(partitionTable); - for (PartitionGroup partitionGroup : partitionGroups) { - RaftNode header = partitionGroup.getHeader(); - DataGroupMember dataGroupMember = getDataGroupMember(partitionGroup, TestUtils.getNode(0)); - dataGroupMember.start(); - dataGroupServiceImpls.addDataGroupMember(dataGroupMember, header); - } - } - - @Test - public void testClosePartition() - throws QueryProcessException, StorageEngineException, StorageGroupNotSetException, - IllegalPathException { - System.out.println("Start testClosePartition()"); - // the operation is accepted - dummyResponse.set(Response.RESPONSE_AGREE); - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(0))); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {TestUtils.getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - - ExecutorService testThreadPool = Executors.newFixedThreadPool(4); - assertTrue(testMetaMember.closePartition(TestUtils.getTestSg(0), 0, true)); - - DataRegion processor = - StorageEngine.getInstance().getProcessor(new PartialPath(TestUtils.getTestSg(0))); - assertTrue(processor.getWorkSequenceTsFileProcessors().isEmpty()); - - int prevTimeout = ClusterConstant.getConnectionTimeoutInMS(); - ClusterConstant.setConnectionTimeoutInMS(100); - try { - System.out.println("Create the first file"); - for (int i = 20; i < 30; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - // the net work is down - dummyResponse.set(Long.MIN_VALUE); - - System.out.println("Close the first file"); - assertFalse(testMetaMember.closePartition(TestUtils.getTestSg(0), 0, true)); - assertFalse(processor.getWorkSequenceTsFileProcessors().isEmpty()); - - // network resume in 100ms - dummyResponse.set(Response.RESPONSE_AGREE); - assertTrue(testMetaMember.closePartition(TestUtils.getTestSg(0), 0, true)); - assertTrue(processor.getWorkSequenceTsFileProcessors().isEmpty()); - - System.out.println("Create the second file"); - for (int i = 30; i < 40; i++) { - insertPlan.setTime(i); - insertPlan.setValues(new Object[] {String.valueOf(i)}); - PlanExecutor planExecutor = new PlanExecutor(); - planExecutor.processNonQuery(insertPlan); - } - - // indicating the leader is stale - System.out.println("Close the second file"); - dummyResponse.set(100); - assertFalse(testMetaMember.closePartition(TestUtils.getTestSg(0), 0, true)); - assertFalse(processor.getWorkSequenceTsFileProcessors().isEmpty()); - } finally { - ClusterConstant.setConnectionTimeoutInMS(prevTimeout); - } - testThreadPool.shutdownNow(); - } - - @Test - public void testAddNode() { - System.out.println("Start testAddNode()"); - Node newNode = TestUtils.getNode(11); - testMetaMember.getPartitionTable().addNode(newNode); - testMetaMember.onElectionWins(); - testMetaMember.applyAddNode( - new AddNodeLog(testMetaMember.getPartitionTable().serialize(), newNode)); - assertTrue(partitionTable.getAllNodes().contains(newNode)); - } - - @Test - public void testBuildCluster() { - System.out.println("Start testBuildCluster()"); - testMetaMember.start(); - try { - testMetaMember.buildCluster(); - long startTime = System.currentTimeMillis(); - long timeConsumption = 0; - while (timeConsumption < 5000 && testMetaMember.getCharacter() != LEADER) { - timeConsumption = System.currentTimeMillis() - startTime; - } - if (timeConsumption >= 5000) { - fail("The member takes too long to be the leader"); - } - assertEquals(LEADER, testMetaMember.getCharacter()); - } catch (ConfigInconsistentException | StartUpCheckFailureException e) { - // do nothing - } finally { - testMetaMember.stop(); - } - } - - @Test - public void testJoinCluster() throws QueryProcessException { - System.out.println("Start testJoinCluster()"); - MetaGroupMember newMember = getMetaGroupMember(TestUtils.getNode(10)); - newMember.setCoordinator(new Coordinator()); - newMember.start(); - try { - newMember.joinCluster(); - newMember.setCharacter(ELECTOR); - while (!LEADER.equals(newMember.getCharacter())) { - // wait until character changes - } - } catch (Exception e) { - fail("The expected exception is not thrown" + e); - } finally { - newMember.stop(); - } - } - - @Test - public void testJoinClusterFailed() throws QueryProcessException { - System.out.println("Start testJoinClusterFailed()"); - long prevInterval = ClusterConstant.getHeartbeatIntervalMs(); - ClusterConstant.setHeartbeatIntervalMs(10); - ClusterDescriptor.getInstance().getConfig().setJoinClusterTimeOutMs(100); - dummyResponse.set(Response.RESPONSE_NO_CONNECTION); - MetaGroupMember newMember = getMetaGroupMember(TestUtils.getNode(10)); - try { - newMember.joinCluster(); - fail("The unexpected exception is thrown"); - } catch (Exception e) { - assertTrue(e instanceof StartUpCheckFailureException); - } finally { - newMember.closeLogManager(); - ClusterConstant.setHeartbeatIntervalMs(prevInterval); - } - } - - @Test - public void testSendSnapshot() throws IllegalPathException { - System.out.println("Start testSendSnapshot()"); - SendSnapshotRequest request = new SendSnapshotRequest(); - - // 1. prepare storage group and its tll - Map storageGroupTTL = new HashMap<>(); - long baseTTL = 3600; - for (int i = 0; i <= 10; i++) { - storageGroupTTL.put(new PartialPath(TestUtils.getTestSg(i)), baseTTL + i * 100); - if (i >= 5) { - storageGroupTTL.put(new PartialPath(TestUtils.getTestSg(i)), Long.MAX_VALUE); - } - } - - HashMap userMap = new HashMap<>(); - HashMap roleMap = new HashMap<>(); - - try { - - IAuthorizer authorizer = LocalFileAuthorizer.getInstance(); - - // 2. prepare the role info - authorizer.createRole("role_1"); - authorizer.createRole("role_2"); - authorizer.createRole("role_3"); - authorizer.createRole("role_4"); - - authorizer.grantPrivilegeToRole("role_1", TestUtils.getTestSg(3), 1); - authorizer.grantPrivilegeToRole("role_2", TestUtils.getTestSg(4), 1); - - roleMap.put("role_1", authorizer.getRole("role_1")); - roleMap.put("role_2", authorizer.getRole("role_2")); - roleMap.put("role_3", authorizer.getRole("role_3")); - roleMap.put("role_4", authorizer.getRole("role_4")); - - // 3. prepare the user info - authorizer.createUser("user_1", "password_1"); - authorizer.createUser("user_2", "password_2"); - authorizer.createUser("user_3", "password_3"); - authorizer.createUser("user_4", "password_4"); - - authorizer.grantPrivilegeToUser("user_1", TestUtils.getTestSg(1), 1); - authorizer.setUserUseWaterMark("user_2", true); - - authorizer.grantRoleToUser("role_1", "user_1"); - - userMap.put("user_1", authorizer.getUser("user_1")); - userMap.put("user_2", authorizer.getUser("user_2")); - userMap.put("user_3", authorizer.getUser("user_3")); - userMap.put("user_4", authorizer.getUser("user_4")); - } catch (AuthException e) { - Assert.fail(e.getMessage()); - } - - // 4. prepare the template info - Map templateMap = new HashMap<>(); - - CreateTemplatePlan createTemplatePlan = CreateTemplatePlanUtil.getCreateTemplatePlan(); - for (int i = 0; i < 10; i++) { - String templateName = "template_" + i; - createTemplatePlan.setName(templateName); - Template template = new Template(createTemplatePlan); - templateMap.put(templateName, template); - } - - // 5. prepare the partition table - SlotPartitionTable partitionTable = (SlotPartitionTable) TestUtils.getPartitionTable(3); - partitionTable.setLastMetaLogIndex(0); - - ByteBuffer beforePartitionTableBuffer = partitionTable.serialize(); - // 6. serialize - MetaSimpleSnapshot snapshot = - new MetaSimpleSnapshot( - storageGroupTTL, userMap, roleMap, templateMap, beforePartitionTableBuffer); - request.setSnapshotBytes(snapshot.serialize()); - AtomicReference reference = new AtomicReference<>(); - new MetaAsyncService(testMetaMember) - .sendSnapshot(request, new GenericHandler(TestUtils.getNode(0), reference)); - - // 6. check whether the snapshot applied or not - Map localStorageGroupTTL = IoTDB.schemaProcessor.getStorageGroupsTTL(); - assertNotNull(localStorageGroupTTL); - assertEquals(storageGroupTTL, localStorageGroupTTL); - - try { - IAuthorizer authorizer = LocalFileAuthorizer.getInstance(); - - assertTrue(authorizer.checkUserPrivileges("user_1", TestUtils.getTestSg(1), 1)); - assertTrue(authorizer.checkUserPrivileges("user_1", TestUtils.getTestSg(3), 1)); - assertFalse(authorizer.checkUserPrivileges("user_3", TestUtils.getTestSg(1), 1)); - - assertTrue(authorizer.isUserUseWaterMark("user_2")); - assertFalse(authorizer.isUserUseWaterMark("user_4")); - - Map localRoleMap = authorizer.getAllRoles(); - assertEquals(roleMap, localRoleMap); - - Map localTemplateMap = TemplateManager.getInstance().getTemplateMap(); - assertEquals(templateMap, localTemplateMap); - - PartitionTable localPartitionTable = this.testMetaMember.getPartitionTable(); - assertEquals(localPartitionTable, partitionTable); - - } catch (AuthException e) { - Assert.fail(e.getMessage()); - } - } - - @Test - public void testProcessNonQuery() throws IllegalPathException { - System.out.println("Start testProcessNonQuery()"); - mockDataClusterServer = true; - // as a leader - testMetaMember.setCharacter(LEADER); - testMetaMember.setAppendLogThreadPool(testThreadPool); - for (int i = 10; i < 20; i++) { - // process a non partitioned plan - SetStorageGroupPlan setStorageGroupPlan = - new SetStorageGroupPlan(new PartialPath(TestUtils.getTestSg(i))); - TSStatus status = coordinator.executeNonQueryPlan(setStorageGroupPlan); - assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(TestUtils.getTestSg(i)))); - - // process a partitioned plan - TimeseriesSchema schema = TestUtils.getTestTimeSeriesSchema(i, 0); - CreateTimeSeriesPlan createTimeSeriesPlan = - new CreateTimeSeriesPlan( - new PartialPath(schema.getFullPath()), - schema.getType(), - schema.getEncodingType(), - schema.getCompressor(), - schema.getProps(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - status = coordinator.executeNonQueryPlan(createTimeSeriesPlan); - if (status.getCode() == TSStatusCode.NEED_REDIRECTION.getStatusCode()) { - status.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } - assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(TestUtils.getTestSeries(i, 0)))); - } - testThreadPool.shutdownNow(); - } - - @Test - public void testProcessNonQueryAsFollower() throws IllegalPathException, QueryProcessException { - System.out.println("Start testProcessNonQuery()"); - mockDataClusterServer = true; - - MetaGroupMember testMetaMember2 = getMetaGroupMember(TestUtils.getNode(2)); - testMetaMember2.setCharacter(LEADER); - - // as a follower - testMetaMember.setCharacter(FOLLOWER); - testMetaMember.setLeader(testMetaMember2.thisNode); - testMetaMember.setAppendLogThreadPool(testThreadPool); - for (int i = 10; i < 20; i++) { - // process a non partitioned plan - SetStorageGroupPlan setStorageGroupPlan = - new SetStorageGroupPlan(new PartialPath(TestUtils.getTestSg(i))); - TSStatus status = coordinator.executeNonQueryPlan(setStorageGroupPlan); - assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(TestUtils.getTestSg(i)))); - - // process a partitioned plan - TimeseriesSchema schema = TestUtils.getTestTimeSeriesSchema(i, 0); - CreateTimeSeriesPlan createTimeSeriesPlan = - new CreateTimeSeriesPlan( - new PartialPath(schema.getFullPath()), - schema.getType(), - schema.getEncodingType(), - schema.getCompressor(), - schema.getProps(), - Collections.emptyMap(), - Collections.emptyMap(), - null); - ClusterIoTDB.getInstance() - .setClientManager( - new IClientManager() { - @Override - public AsyncClient borrowAsyncClient(Node node, ClientCategory category) - throws IOException { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } - - @Override - public RaftService.Client borrowSyncClient(Node node, ClientCategory category) { - return null; - } - - @Override - public void returnAsyncClient( - AsyncClient client, Node node, ClientCategory category) {} - - @Override - public void returnSyncClient( - RaftService.Client client, Node node, ClientCategory category) {} - }); - status = coordinator.executeNonQueryPlan(createTimeSeriesPlan); - if (status.getCode() == TSStatusCode.NEED_REDIRECTION.getStatusCode()) { - status.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } - assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.code); - assertTrue(IoTDB.schemaProcessor.isPathExist(new PartialPath(TestUtils.getTestSeries(i, 0)))); - } - testThreadPool.shutdownNow(); - } - - @Test - public void testGetReaderByTimestamp() - throws QueryProcessException, StorageEngineException, IOException, - StorageGroupNotSetException, IllegalPathException { - System.out.println("Start testGetReaderByTimestamp()"); - ClusterConstant.setReadOperationTimeoutMS(10000); - mockDataClusterServer = true; - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {TestUtils.getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - for (int i = 0; i < 10; i++) { - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(i))); - IMeasurementSchema schema = TestUtils.getTestMeasurementSchema(0); - try { - IoTDB.schemaProcessor.createTimeseries( - new PartialPath(schema.getMeasurementId()), - schema.getType(), - schema.getEncodingType(), - schema.getCompressor(), - schema.getProps()); - } catch (MetadataException e) { - // ignore - } - for (int j = 0; j < 10; j++) { - insertPlan.setTime(j); - insertPlan.setValues(new Object[] {String.valueOf(j)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - planExecutor.processNonQuery(insertPlan); - } - } - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - ClusterReaderFactory readerFactory = new ClusterReaderFactory(testMetaMember); - long[] times = new long[10]; - for (int i = 0; i < 10; i++) { - times[i] = i; - } - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(TestUtils.getTestMeasurement(0)); - - for (int i = 0; i < 10; i++) { - IReaderByTimestamp readerByTimestamp = - readerFactory.getReaderByTimestamp( - new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE), - deviceMeasurements, - TSDataType.DOUBLE, - context, - true, - null); - - Object[] values = readerByTimestamp.getValuesInTimestamps(times, 10); - for (int j = 0; j < 10; j++) { - assertEquals(j * 1.0, (double) values[j], 0.00001); - } - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testGetReader() - throws QueryProcessException, StorageEngineException, IOException, - StorageGroupNotSetException, IllegalPathException, EmptyIntervalException { - System.out.println("Start testGetReader()"); - mockDataClusterServer = true; - InsertRowPlan insertPlan = new InsertRowPlan(); - insertPlan.setNeedInferType(true); - insertPlan.setMeasurements(new String[] {TestUtils.getTestMeasurement(0)}); - insertPlan.setDataTypes(new TSDataType[insertPlan.getMeasurements().length]); - ClusterConstant.setReadOperationTimeoutMS(1000); - - for (int i = 0; i < 10; i++) { - insertPlan.setDevicePath(new PartialPath(TestUtils.getTestSg(i))); - IMeasurementSchema schema = TestUtils.getTestMeasurementSchema(0); - try { - IoTDB.schemaProcessor.createTimeseries( - new PartialPath(schema.getMeasurementId()), - schema.getType(), - schema.getEncodingType(), - schema.getCompressor(), - schema.getProps()); - } catch (MetadataException e) { - // ignore - } - for (int j = 0; j < 10; j++) { - insertPlan.setTime(j); - insertPlan.setValues(new Object[] {String.valueOf(j)}); - insertPlan.setMeasurementMNodes( - new IMeasurementMNode[] {TestUtils.getTestMeasurementMNode(0)}); - planExecutor.processNonQuery(insertPlan); - } - } - - QueryContext context = - new RemoteQueryContext(QueryResourceManager.getInstance().assignQueryId(true)); - - try { - ClusterReaderFactory readerFactory = new ClusterReaderFactory(testMetaMember); - Set deviceMeasurements = new HashSet<>(); - deviceMeasurements.add(TestUtils.getTestMeasurement(0)); - - for (int i = 0; i < 10; i++) { - ManagedSeriesReader reader = - readerFactory.getSeriesReader( - new MeasurementPath(TestUtils.getTestSeries(i, 0), TSDataType.DOUBLE), - deviceMeasurements, - TSDataType.DOUBLE, - TimeFilter.gtEq(5), - ValueFilter.ltEq(8.0), - context, - true); - assertTrue(reader.hasNextBatch()); - BatchData batchData = reader.nextBatch(); - for (int j = 5; j < 9; j++) { - assertTrue(batchData.hasCurrent()); - assertEquals(j, batchData.currentTime()); - assertEquals(j * 1.0, batchData.getDouble(), 0.00001); - batchData.next(); - } - assertFalse(batchData.hasCurrent()); - assertFalse(reader.hasNextBatch()); - } - } finally { - QueryResourceManager.getInstance().endQuery(context.getQueryId()); - } - } - - @Test - public void testGetMatchedPaths() throws MetadataException { - System.out.println("Start testGetMatchedPaths()"); - List matchedPaths = - ((CSchemaProcessor) IoTDB.schemaProcessor) - .getMatchedPaths(new PartialPath(TestUtils.getTestSg(0) + ".*")); - assertEquals(20, matchedPaths.size()); - for (int j = 0; j < 10; j++) { - assertTrue(matchedPaths.contains(new PartialPath(TestUtils.getTestSeries(0, j)))); - } - matchedPaths = - ((CSchemaProcessor) IoTDB.schemaProcessor) - .getMatchedPaths(new PartialPath(TestUtils.getTestSg(10) + ".*")); - assertTrue(matchedPaths.isEmpty()); - } - - @Test - public void testProcessValidHeartbeatReq() throws QueryProcessException { - System.out.println("Start testProcessValidHeartbeatReq()"); - MetaGroupMember testMetaMember = getMetaGroupMember(TestUtils.getNode(10)); - partitionTable = new SlotPartitionTable(allNodes, TestUtils.getNode(0)); - testMetaMember.setCoordinator(new Coordinator()); - try { - HeartBeatRequest request = new HeartBeatRequest(); - request.setRequireIdentifier(true); - HeartBeatResponse response = new HeartBeatResponse(); - testMetaMember.processValidHeartbeatReq(request, response); - assertEquals(10, response.getFollowerIdentifier()); - - request.setRegenerateIdentifier(true); - testMetaMember.setPartitionTable(null); - testMetaMember.processValidHeartbeatReq(request, response); - assertTrue(response.getFollowerIdentifier() != 10); - assertTrue(response.isRequirePartitionTable()); - - request.setPartitionTableBytes(partitionTable.serialize()); - testMetaMember.processValidHeartbeatReq(request, response); - assertEquals(partitionTable, testMetaMember.getPartitionTable()); - } finally { - testMetaMember.stop(); - } - } - - @Test - public void testProcessValidHeartbeatResp() throws QueryProcessException { - System.out.println("Start testProcessValidHeartbeatResp()"); - MetaGroupMember metaGroupMember = getMetaGroupMember(TestUtils.getNode(9)); - metaGroupMember.start(); - metaGroupMember.onElectionWins(); - try { - for (int i = 0; i < 10; i++) { - HeartBeatResponse response = new HeartBeatResponse(); - response.setFollowerIdentifier(i); - response.setRequirePartitionTable(true); - response.setFollower(TestUtils.getNode(i)); - metaGroupMember.processValidHeartbeatResp(response, TestUtils.getNode(i)); - metaGroupMember.removeBlindNode(TestUtils.getNode(i)); - } - assertNotNull(metaGroupMember.getPartitionTable()); - } finally { - metaGroupMember.stop(); - } - } - - @Test - public void testAppendEntry() { - System.out.println("Start testAppendEntry()"); - System.out.println("Term before append: " + testMetaMember.getTerm().get()); - - testMetaMember.setPartitionTable(null); - CloseFileLog log = new CloseFileLog(TestUtils.getTestSg(0), 0, true); - log.setCurrLogIndex(0); - log.setCurrLogTerm(0); - AppendEntryRequest request = new AppendEntryRequest(); - request.setEntry(log.serialize()); - request.setTerm(0); - request.setLeaderCommit(0); - request.setPrevLogIndex(-1); - request.setPrevLogTerm(-1); - request.setLeader(new Node("127.0.0.1", 30000, 0, 40000, Constants.RPC_PORT, "127.0.0.1")); - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - testMetaMember.setPartitionTable(null); - testMetaMember.setReady(false); - new MetaAsyncService(testMetaMember).appendEntry(request, handler); - assertEquals(Response.RESPONSE_PARTITION_TABLE_UNAVAILABLE, (long) result.get()); - System.out.println("Term after first append: " + testMetaMember.getTerm().get()); - - testMetaMember.setPartitionTable(partitionTable); - testMetaMember.setReady(true); - new MetaAsyncService(testMetaMember).appendEntry(request, handler); - System.out.println("Term after second append: " + testMetaMember.getTerm().get()); - assertEquals(Response.RESPONSE_AGREE, (long) result.get()); - } - - @Test - public void testRemoteAddNode() { - System.out.println("Start testRemoteAddNode()"); - int prevTimeout = ClusterConstant.getConnectionTimeoutInMS(); - - try { - // cannot add node when partition table is not built - testMetaMember.setPartitionTable(null); - AtomicReference result = new AtomicReference<>(); - GenericHandler handler = new GenericHandler<>(TestUtils.getNode(0), result); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(10), TestUtils.getStartUpStatus(), handler); - AddNodeResponse response = result.get(); - assertEquals(Response.RESPONSE_PARTITION_TABLE_UNAVAILABLE, response.getRespNum()); - - // cannot add itself - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(0), TestUtils.getStartUpStatus(), handler); - assertNull(result.get()); - - // process the request as a leader - testMetaMember.setCharacter(LEADER); - testMetaMember.onElectionWins(); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(10), TestUtils.getStartUpStatus(), handler); - response = result.get(); - assertEquals(Response.RESPONSE_AGREE, response.getRespNum()); - assertEquals(partitionTable.serialize(), response.partitionTableBytes); - - // adding an existing node is ok - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(10), TestUtils.getStartUpStatus(), handler); - response = result.get(); - assertEquals(Response.RESPONSE_AGREE, response.getRespNum()); - assertEquals(partitionTable.serialize(), response.partitionTableBytes); - - // process the request as a follower - testMetaMember.setCharacter(FOLLOWER); - testMetaMember.setLeader(TestUtils.getNode(1)); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(11), TestUtils.getStartUpStatus(), handler); - while (result.get() == null) {} - - response = result.get(); - assertEquals(Response.RESPONSE_AGREE, response.getRespNum()); - assertEquals(partitionTable.serialize(), response.partitionTableBytes); - - // cannot add a node with conflict id - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - Node node = TestUtils.getNode(12).setNodeIdentifier(10); - new MetaAsyncService(testMetaMember).addNode(node, TestUtils.getStartUpStatus(), handler); - response = result.get(); - assertEquals(Response.RESPONSE_IDENTIFIER_CONFLICT, response.getRespNum()); - - // cannot add a node due to configuration conflict, partition interval - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - node = TestUtils.getNode(13); - StartUpStatus startUpStatus = TestUtils.getStartUpStatus(); - startUpStatus.setPartitionInterval(-1); - new MetaAsyncService(testMetaMember).addNode(node, startUpStatus, handler); - response = result.get(); - assertEquals(Response.RESPONSE_NEW_NODE_PARAMETER_CONFLICT, response.getRespNum()); - assertFalse(response.getCheckStatusResponse().isPartitionalIntervalEquals()); - assertTrue(response.getCheckStatusResponse().isHashSaltEquals()); - assertTrue(response.getCheckStatusResponse().isReplicationNumEquals()); - - // cannot add a node due to configuration conflict, hash salt - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - node = TestUtils.getNode(12); - startUpStatus = TestUtils.getStartUpStatus(); - startUpStatus.setHashSalt(0); - new MetaAsyncService(testMetaMember).addNode(node, startUpStatus, handler); - response = result.get(); - assertEquals(Response.RESPONSE_NEW_NODE_PARAMETER_CONFLICT, response.getRespNum()); - assertTrue(response.getCheckStatusResponse().isPartitionalIntervalEquals()); - assertFalse(response.getCheckStatusResponse().isHashSaltEquals()); - assertTrue(response.getCheckStatusResponse().isReplicationNumEquals()); - - // cannot add a node due to configuration conflict, replication number - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - node = TestUtils.getNode(12); - startUpStatus = TestUtils.getStartUpStatus(); - startUpStatus.setReplicationNumber(0); - new MetaAsyncService(testMetaMember).addNode(node, startUpStatus, handler); - response = result.get(); - assertEquals(Response.RESPONSE_NEW_NODE_PARAMETER_CONFLICT, response.getRespNum()); - assertTrue(response.getCheckStatusResponse().isPartitionalIntervalEquals()); - assertTrue(response.getCheckStatusResponse().isHashSaltEquals()); - assertFalse(response.getCheckStatusResponse().isReplicationNumEquals()); - assertTrue(response.getCheckStatusResponse().isClusterNameEquals()); - - // cannot add a node due to network failure - dummyResponse.set(Response.RESPONSE_NO_CONNECTION); - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new Thread( - () -> { - await().atLeast(200, TimeUnit.MILLISECONDS); - dummyResponse.set(Response.RESPONSE_AGREE); - }) - .start(); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(12), TestUtils.getStartUpStatus(), handler); - response = result.get(); - assertEquals(Response.RESPONSE_AGREE, response.getRespNum()); - - // cannot add a node due to leadership lost - dummyResponse.set(100); - testMetaMember.setCharacter(LEADER); - result.set(null); - testMetaMember.setPartitionTable(partitionTable); - new MetaAsyncService(testMetaMember) - .addNode(TestUtils.getNode(13), TestUtils.getStartUpStatus(), handler); - response = result.get(); - assertNull(response); - - } finally { - testMetaMember.stop(); - ClusterConstant.setConnectionTimeoutInMS(prevTimeout); - } - } - - @Test - public void testLoadIdentifier() throws IOException, QueryProcessException { - System.out.println("Start testLoadIdentifier()"); - try (RandomAccessFile raf = - new RandomAccessFile(MetaGroupMember.NODE_IDENTIFIER_FILE_NAME, "rw")) { - raf.writeBytes("100"); - } - MetaGroupMember metaGroupMember = getMetaGroupMember(new Node()); - assertEquals(100, metaGroupMember.getThisNode().getNodeIdentifier()); - metaGroupMember.closeLogManager(); - } - - @Test - public void testRemoveNodeWithoutPartitionTable() throws LogExecutionException { - System.out.println("Start testRemoveNodeWithoutPartitionTable()"); - testMetaMember.setPartitionTable(null); - try { - testMetaMember.removeNode(TestUtils.getNode(0)); - fail("Expect PartitionTableUnavailableException"); - } catch (PartitionTableUnavailableException - | InterruptedException - | CheckConsistencyException e) { - // ignore - } - } - - @Test - public void testRemoveThisNode() { - System.out.println("Start testRemoveThisNode()"); - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setLeader(testMetaMember.getThisNode()); - testMetaMember.setCharacter(LEADER); - - doRemoveNode(resultRef, testMetaMember.getThisNode()); - - assertEquals(Response.RESPONSE_AGREE, (long) resultRef.get()); - assertFalse(testMetaMember.getAllNodes().contains(testMetaMember.getThisNode())); - } - - @Test - public void testRemoveLeader() { - System.out.println("Start testRemoveLeader()"); - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setLeader(TestUtils.getNode(40)); - testMetaMember.setCharacter(FOLLOWER); - doRemoveNode(resultRef, TestUtils.getNode(40)); - - assertEquals(Response.RESPONSE_AGREE, (long) resultRef.get()); - assertFalse(testMetaMember.getAllNodes().contains(TestUtils.getNode(40))); - assertEquals(ELECTOR, testMetaMember.getCharacter()); - } - - @Test - public void testRemoveNonLeader() { - System.out.println("Start testRemoveNonLeader()"); - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setLeader(TestUtils.getNode(40)); - testMetaMember.setCharacter(FOLLOWER); - doRemoveNode(resultRef, TestUtils.getNode(20)); - - assertEquals(Response.RESPONSE_AGREE, (long) resultRef.get()); - assertFalse(testMetaMember.getAllNodes().contains(TestUtils.getNode(20))); - assertEquals(0, testMetaMember.getLastHeartbeatReceivedTime()); - } - - @Test - public void testRemoveNodeAsLeader() { - System.out.println("Start testRemoveNodeAsLeader()"); - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setLeader(testMetaMember.getThisNode()); - testMetaMember.setCharacter(LEADER); - doRemoveNode(resultRef, TestUtils.getNode(20)); - - assertEquals(Response.RESPONSE_AGREE, (long) resultRef.get()); - assertFalse(testMetaMember.getAllNodes().contains(TestUtils.getNode(20))); - System.out.println("Checking exiled node in testRemoveNodeAsLeader()"); - assertEquals(TestUtils.getNode(20), exiledNode); - } - - @Test - public void testRemoveNonExistNode() { - System.out.println("Start testRemoveNonExistNode()"); - AtomicBoolean passed = new AtomicBoolean(false); - testMetaMember.setCharacter(LEADER); - testMetaMember.setLeader(testMetaMember.getThisNode()); - new MetaAsyncService(testMetaMember) - .removeNode( - TestUtils.getNode(120), - new AsyncMethodCallback() { - @Override - public void onComplete(Long aLong) { - passed.set(aLong.equals(Response.RESPONSE_REJECT)); - } - - @Override - public void onError(Exception e) { - e.printStackTrace(); - } - }); - - assertTrue(passed.get()); - } - - @Test - public void testRemoveTooManyNodes() { - System.out.println("Start testRemoveTooManyNodes()"); - for (int i = 0; i < 8; i++) { - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setCharacter(LEADER); - testMetaMember.setLeader(testMetaMember.getThisNode()); - doRemoveNode(resultRef, TestUtils.getNode(90 - i * 10)); - assertEquals(Response.RESPONSE_AGREE, (long) resultRef.get()); - - assertFalse(testMetaMember.getAllNodes().contains(TestUtils.getNode(90 - i * 10))); - } - AtomicReference resultRef = new AtomicReference<>(); - testMetaMember.setCharacter(LEADER); - doRemoveNode(resultRef, TestUtils.getNode(10)); - - assertEquals(Response.RESPONSE_CLUSTER_TOO_SMALL, (long) resultRef.get()); - assertTrue(testMetaMember.getAllNodes().contains(TestUtils.getNode(10))); - } - - @Test - public void testRouteIntervalsDisablePartition() - throws IllegalPathException, StorageEngineException { - boolean isEablePartition = StorageEngine.isEnablePartition(); - StorageEngine.setEnablePartition(false); - testMetaMember.setCharacter(LEADER); - testMetaMember.setLeader(testMetaMember.getThisNode()); - TimeValuePairUtils.Intervals intervals = new TimeValuePairUtils.Intervals(); - intervals.addInterval(Long.MIN_VALUE, Long.MAX_VALUE); - - List partitionGroups = - testMetaMember.routeIntervals(intervals, new PartialPath(TestUtils.getTestSg(0))); - assertEquals(1, partitionGroups.size()); - StorageEngine.setEnablePartition(isEablePartition); - } - - @Test - public void testRouteIntervalsEnablePartition() - throws IllegalPathException, StorageEngineException { - boolean isEablePartition = StorageEngine.isEnablePartition(); - StorageEngine.setEnablePartition(true); - testMetaMember.setCharacter(LEADER); - testMetaMember.setLeader(testMetaMember.getThisNode()); - TimeValuePairUtils.Intervals intervals = new TimeValuePairUtils.Intervals(); - intervals.addInterval(Long.MIN_VALUE, Long.MAX_VALUE); - - List partitionGroups = - testMetaMember.routeIntervals(intervals, new PartialPath(TestUtils.getTestSg(0))); - assertTrue(partitionGroups.size() > 1); - StorageEngine.setEnablePartition(isEablePartition); - } - - private void doRemoveNode(AtomicReference resultRef, Node nodeToRemove) { - mockDataClusterServer = true; - new MetaAsyncService(testMetaMember) - .removeNode( - nodeToRemove, - new AsyncMethodCallback() { - @Override - public void onComplete(Long o) { - resultRef.set(o); - } - - @Override - public void onError(Exception e) { - e.printStackTrace(); - } - }); - while (resultRef.get() == null) {} - } - - public MetaGroupMember getTestMetaGroupMember() { - return testMetaMember; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/RaftMemberTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/server/member/RaftMemberTest.java deleted file mode 100644 index b94da9ee877c..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/server/member/RaftMemberTest.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.cluster.server.member; - -import org.apache.iotdb.cluster.common.TestAsyncDataClient; -import org.apache.iotdb.cluster.common.TestDataGroupMember; -import org.apache.iotdb.cluster.common.TestPartitionedLogManager; -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.config.ClusterDescriptor; -import org.apache.iotdb.cluster.config.ConsistencyLevel; -import org.apache.iotdb.cluster.exception.CheckConsistencyException; -import org.apache.iotdb.cluster.log.manage.PartitionedSnapshotLogManager; -import org.apache.iotdb.cluster.rpc.thrift.AppendEntryRequest; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.cluster.rpc.thrift.RaftNode; -import org.apache.iotdb.cluster.rpc.thrift.RaftService; -import org.apache.iotdb.cluster.rpc.thrift.RequestCommitIndexResponse; -import org.apache.iotdb.cluster.server.NodeCharacter; -import org.apache.iotdb.cluster.server.Response; - -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; - -public class RaftMemberTest extends BaseMember { - @Test - public void testsyncLeaderStrongConsistencyCheckFalse() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyFalse = - newDataGroupMemberWithSyncLeaderFalse(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.STRONG_CONSISTENCY); - try { - dataGroupMemberWithWriteStrongConsistencyFalse.waitUntilCatchUp( - new RaftMember.StrongCheckConsistency()); - } catch (CheckConsistencyException e) { - Assert.assertNotNull(e); - Assert.assertEquals(CheckConsistencyException.CHECK_STRONG_CONSISTENCY_EXCEPTION, e); - } - } - - @Test - public void testsyncLeaderStrongConsistencyCheckTrue() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyTrue = - newDataGroupMemberWithSyncLeaderTrue(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.STRONG_CONSISTENCY); - try { - - PartitionedSnapshotLogManager partitionedSnapshotLogManager = - Mockito.mock(PartitionedSnapshotLogManager.class); - Mockito.when(partitionedSnapshotLogManager.getMaxHaveAppliedCommitIndex()).thenReturn(1000L); - dataGroupMemberWithWriteStrongConsistencyTrue.setLogManager(partitionedSnapshotLogManager); - - dataGroupMemberWithWriteStrongConsistencyTrue.waitUntilCatchUp( - new RaftMember.StrongCheckConsistency()); - } catch (CheckConsistencyException e) { - Assert.fail(); - } - } - - @Test - public void testsyncLeaderMidConsistencyCheckFalse() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyFalse = - newDataGroupMemberWithSyncLeaderFalse(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.MID_CONSISTENCY); - ClusterDescriptor.getInstance().getConfig().setMaxReadLogLag(1); - try { - - PartitionedSnapshotLogManager partitionedSnapshotLogManager = - Mockito.mock(PartitionedSnapshotLogManager.class); - Mockito.when(partitionedSnapshotLogManager.getMaxHaveAppliedCommitIndex()).thenReturn(-2L); - dataGroupMemberWithWriteStrongConsistencyFalse.setLogManager(partitionedSnapshotLogManager); - - dataGroupMemberWithWriteStrongConsistencyFalse.waitUntilCatchUp( - new RaftMember.MidCheckConsistency()); - } catch (CheckConsistencyException e) { - Assert.assertEquals(CheckConsistencyException.CHECK_MID_CONSISTENCY_EXCEPTION, e); - } - } - - @Test - public void testsyncLeaderMidConsistencyCheckTrue() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyTrue = - newDataGroupMemberWithSyncLeaderTrue(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.MID_CONSISTENCY); - ClusterDescriptor.getInstance().getConfig().setMaxReadLogLag(500); - try { - - PartitionedSnapshotLogManager partitionedSnapshotLogManager = - Mockito.mock(PartitionedSnapshotLogManager.class); - Mockito.when(partitionedSnapshotLogManager.getMaxHaveAppliedCommitIndex()).thenReturn(600L); - dataGroupMemberWithWriteStrongConsistencyTrue.setLogManager(partitionedSnapshotLogManager); - - dataGroupMemberWithWriteStrongConsistencyTrue.waitUntilCatchUp( - new RaftMember.MidCheckConsistency()); - } catch (CheckConsistencyException e) { - Assert.fail(); - } - } - - @Test - public void testsyncLeaderWeakConsistencyCheckFalse() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyFalse = - newDataGroupMemberWithSyncLeaderFalse(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.WEAK_CONSISTENCY); - ClusterDescriptor.getInstance().getConfig().setMaxReadLogLag(1); - try { - - PartitionedSnapshotLogManager partitionedSnapshotLogManager = - Mockito.mock(PartitionedSnapshotLogManager.class); - Mockito.when(partitionedSnapshotLogManager.getMaxHaveAppliedCommitIndex()).thenReturn(-2L); - dataGroupMemberWithWriteStrongConsistencyFalse.setLogManager(partitionedSnapshotLogManager); - - dataGroupMemberWithWriteStrongConsistencyFalse.waitUntilCatchUp(null); - } catch (CheckConsistencyException e) { - Assert.fail(); - } - } - - @Test - public void testsyncLeaderWeakConsistencyCheckTrue() { - // 1. write request : Strong consistency level with syncLeader false - DataGroupMember dataGroupMemberWithWriteStrongConsistencyTrue = - newDataGroupMemberWithSyncLeaderTrue(TestUtils.getNode(0), false); - ClusterDescriptor.getInstance() - .getConfig() - .setConsistencyLevel(ConsistencyLevel.WEAK_CONSISTENCY); - ClusterDescriptor.getInstance().getConfig().setMaxReadLogLag(500); - try { - - PartitionedSnapshotLogManager partitionedSnapshotLogManager = - Mockito.mock(PartitionedSnapshotLogManager.class); - Mockito.when(partitionedSnapshotLogManager.getMaxHaveAppliedCommitIndex()).thenReturn(600L); - dataGroupMemberWithWriteStrongConsistencyTrue.setLogManager(partitionedSnapshotLogManager); - - dataGroupMemberWithWriteStrongConsistencyTrue.waitUntilCatchUp(null); - } catch (CheckConsistencyException e) { - Assert.fail(); - } - } - - private DataGroupMember newDataGroupMemberWithSyncLeaderFalse(Node node, boolean syncLeader) { - DataGroupMember newMember = - new TestDataGroupMember(node, partitionTable.getPartitionGroup(new RaftNode(node, 0))) { - - @Override - public boolean syncLeader(RaftMember.CheckConsistency checkConsistency) { - return syncLeader; - } - - @Override - protected RequestCommitIndexResponse requestCommitIdAsync() { - return new RequestCommitIndexResponse(5, 5, 5); - } - - @Override - public long appendEntry(AppendEntryRequest request) { - return Response.RESPONSE_AGREE; - } - - @Override - public RaftService.AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } catch (IOException e) { - return null; - } - } - }; - newMember.setThisNode(node); - newMember.setMetaGroupMember(testMetaMember); - newMember.setLeader(node); - newMember.setCharacter(NodeCharacter.LEADER); - newMember.setLogManager(new TestPartitionedLogManager()); - newMember.setAppendLogThreadPool(testThreadPool); - return newMember; - } - - private DataGroupMember newDataGroupMemberWithSyncLeaderTrue(Node node, boolean syncLeader) { - DataGroupMember newMember = - new TestDataGroupMember(node, partitionTable.getPartitionGroup(new RaftNode(node, 0))) { - - @Override - public boolean syncLeader(RaftMember.CheckConsistency checkConsistency) { - return syncLeader; - } - - @Override - protected RequestCommitIndexResponse requestCommitIdAsync() { - return new RequestCommitIndexResponse(1000, 1000, 1000); - } - - @Override - public long appendEntry(AppendEntryRequest request) { - return Response.RESPONSE_AGREE; - } - - @Override - public RaftService.AsyncClient getAsyncClient(Node node) { - try { - return new TestAsyncDataClient(node, dataGroupMemberMap); - } catch (IOException e) { - return null; - } - } - }; - newMember.setThisNode(node); - newMember.setMetaGroupMember(testMetaMember); - newMember.setLeader(node); - newMember.setCharacter(NodeCharacter.LEADER); - newMember.setLogManager(new TestPartitionedLogManager()); - newMember.setAppendLogThreadPool(testThreadPool); - return newMember; - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/utils/Constants.java b/cluster/src/test/java/org/apache/iotdb/cluster/utils/Constants.java deleted file mode 100644 index 20b514b9e9c9..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/Constants.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -public class Constants { - public static int RPC_PORT = 6667; -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/utils/CreateTemplatePlanUtil.java b/cluster/src/test/java/org/apache/iotdb/cluster/utils/CreateTemplatePlanUtil.java deleted file mode 100644 index d75c2f2db090..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/CreateTemplatePlanUtil.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.db.qp.physical.sys.CreateTemplatePlan; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class CreateTemplatePlanUtil { - - public static CreateTemplatePlan getCreateTemplatePlan() { - // create createTemplatePlan for template - List> measurementList = new ArrayList<>(); - measurementList.add(Collections.singletonList("template_sensor")); - List measurements = new ArrayList<>(); - for (int j = 0; j < 10; j++) { - measurements.add("s" + j); - } - measurementList.add(measurements); - - List> dataTypeList = new ArrayList<>(); - dataTypeList.add(Collections.singletonList(TSDataType.INT64)); - List dataTypes = new ArrayList<>(); - for (int j = 0; j < 10; j++) { - dataTypes.add(TSDataType.INT64); - } - dataTypeList.add(dataTypes); - - List> encodingList = new ArrayList<>(); - encodingList.add(Collections.singletonList(TSEncoding.RLE)); - List encodings = new ArrayList<>(); - for (int j = 0; j < 10; j++) { - encodings.add(TSEncoding.RLE); - } - encodingList.add(encodings); - - List> compressionTypes = new ArrayList<>(); - compressionTypes.add(Collections.singletonList(CompressionType.SNAPPY)); - List compressors = new ArrayList<>(); - for (int j = 0; j < 11; j++) { - compressors.add(CompressionType.SNAPPY); - } - compressionTypes.add(compressors); - - List schemaNames = new ArrayList<>(); - schemaNames.add("template_sensor"); - schemaNames.add("vector"); - - return new CreateTemplatePlan( - "template", schemaNames, measurementList, dataTypeList, encodingList, compressionTypes); - } -} diff --git a/cluster/src/test/java/org/apache/iotdb/cluster/utils/SerializeUtilTest.java b/cluster/src/test/java/org/apache/iotdb/cluster/utils/SerializeUtilTest.java deleted file mode 100644 index 37c9abba1254..000000000000 --- a/cluster/src/test/java/org/apache/iotdb/cluster/utils/SerializeUtilTest.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.cluster.utils; - -import org.apache.iotdb.cluster.common.TestUtils; -import org.apache.iotdb.cluster.exception.UnknownLogTypeException; -import org.apache.iotdb.cluster.log.Log; -import org.apache.iotdb.cluster.log.LogParser; -import org.apache.iotdb.cluster.log.logtypes.PhysicalPlanLog; -import org.apache.iotdb.cluster.partition.slot.SlotPartitionTable; -import org.apache.iotdb.cluster.rpc.thrift.Node; -import org.apache.iotdb.commons.exception.IllegalPathException; -import org.apache.iotdb.commons.path.PartialPath; -import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan; -import org.apache.iotdb.db.qp.physical.sys.CreateMultiTimeSeriesPlan; -import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; -import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; -import org.apache.iotdb.tsfile.utils.Binary; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class SerializeUtilTest { - - @Test - public void testSlotPartitionTable() { - List nodes = new ArrayList<>(); - nodes.add(TestUtils.getNode(0)); - nodes.add(TestUtils.getNode(1)); - nodes.add(TestUtils.getNode(2)); - SlotPartitionTable slotPartitionTable1 = new SlotPartitionTable(nodes, TestUtils.getNode(0)); - SlotPartitionTable slotPartitionTable2 = new SlotPartitionTable(nodes, TestUtils.getNode(0)); - SlotPartitionTable slotPartitionTable3 = new SlotPartitionTable(nodes, TestUtils.getNode(0)); - slotPartitionTable1.removeNode(TestUtils.getNode(0)); - slotPartitionTable2.deserialize(slotPartitionTable1.serialize()); - assertEquals(slotPartitionTable2, slotPartitionTable1); - slotPartitionTable1.addNode(TestUtils.getNode(0)); - slotPartitionTable3.deserialize(slotPartitionTable1.serialize()); - assertEquals(slotPartitionTable3, slotPartitionTable1); - } - - @Test - public void testStrToNode() { - for (int i = 0; i < 10; i++) { - Node node = TestUtils.getNode(i); - String nodeStr = node.toString(); - Node fromStr = ClusterUtils.stringToNode(nodeStr); - assertEquals(node, fromStr); - } - } - - @Test - public void testInsertTabletPlanLog() - throws UnknownLogTypeException, IOException, IllegalPathException { - long[] times = new long[] {110L, 111L, 112L, 113L}; - List dataTypes = new ArrayList<>(); - dataTypes.add(TSDataType.DOUBLE.ordinal()); - dataTypes.add(TSDataType.FLOAT.ordinal()); - dataTypes.add(TSDataType.INT64.ordinal()); - dataTypes.add(TSDataType.INT32.ordinal()); - dataTypes.add(TSDataType.BOOLEAN.ordinal()); - dataTypes.add(TSDataType.TEXT.ordinal()); - - Object[] columns = new Object[6]; - columns[0] = new double[4]; - columns[1] = new float[4]; - columns[2] = new long[4]; - columns[3] = new int[4]; - columns[4] = new boolean[4]; - columns[5] = new Binary[4]; - - for (int r = 0; r < 4; r++) { - ((double[]) columns[0])[r] = 1.0; - ((float[]) columns[1])[r] = 2; - ((long[]) columns[2])[r] = 10000; - ((int[]) columns[3])[r] = 100; - ((boolean[]) columns[4])[r] = false; - ((Binary[]) columns[5])[r] = new Binary("hh" + r); - } - - InsertTabletPlan tabletPlan = - new InsertTabletPlan( - new PartialPath("root.test"), - new String[] {"s1", "s2", "s3", "s4", "s5", "s6"}, - dataTypes); - tabletPlan.setTimes(times); - tabletPlan.setColumns(columns); - tabletPlan.setRowCount(times.length); - - Log log = new PhysicalPlanLog(tabletPlan); - log.setCurrLogTerm(1); - log.setCurrLogIndex(2); - - ByteBuffer serialized = log.serialize(); - - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - tabletPlan.serialize(dataOutputStream); - ByteBuffer bufferA = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - ByteBuffer bufferB = ByteBuffer.allocate(bufferA.limit()); - tabletPlan.serialize(bufferB); - bufferB.flip(); - assertEquals(bufferA, bufferB); - - Log parsed = LogParser.getINSTANCE().parse(serialized); - assertEquals(log, parsed); - } - - @Test - public void testCreateMultiTimeSeriesPlanLog() - throws UnknownLogTypeException, IOException, IllegalPathException { - List paths = new ArrayList<>(); - paths.add(new PartialPath("root.sg1.d2.s1")); - paths.add(new PartialPath("root.sg1.d2.s2")); - List tsDataTypes = new ArrayList<>(); - tsDataTypes.add(TSDataType.INT64); - tsDataTypes.add(TSDataType.INT32); - List tsEncodings = new ArrayList<>(); - tsEncodings.add(TSEncoding.RLE); - tsEncodings.add(TSEncoding.RLE); - List tsCompressionTypes = new ArrayList<>(); - tsCompressionTypes.add(CompressionType.SNAPPY); - tsCompressionTypes.add(CompressionType.SNAPPY); - - List> tagsList = new ArrayList<>(); - Map tags = new HashMap<>(); - tags.put("unit", "kg"); - tagsList.add(tags); - tagsList.add(tags); - - List> attributesList = new ArrayList<>(); - Map attributes = new HashMap<>(); - attributes.put("minValue", "1"); - attributes.put("maxValue", "100"); - attributesList.add(attributes); - attributesList.add(attributes); - - List alias = new ArrayList<>(); - alias.add("weight1"); - alias.add("weight2"); - - CreateMultiTimeSeriesPlan plan = new CreateMultiTimeSeriesPlan(); - plan.setPaths(paths); - plan.setDataTypes(tsDataTypes); - plan.setEncodings(tsEncodings); - plan.setCompressors(tsCompressionTypes); - plan.setTags(tagsList); - plan.setAttributes(attributesList); - plan.setAlias(alias); - - Log log = new PhysicalPlanLog(plan); - log.setCurrLogTerm(1); - log.setCurrLogIndex(2); - - ByteBuffer serialized = log.serialize(); - - ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); - DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); - plan.serialize(dataOutputStream); - ByteBuffer bufferA = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); - ByteBuffer bufferB = ByteBuffer.allocate(bufferA.limit()); - plan.serialize(bufferB); - bufferB.flip(); - assertEquals(bufferA, bufferB); - - Log parsed = LogParser.getINSTANCE().parse(serialized); - assertEquals(log, parsed); - } - - @Test - public void serdesNodeTest() { - Node node = new Node("127.0.0.1", 6667, 1, 6535, 4678, "127.0.0.1"); - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - DataOutputStream outputStream = new DataOutputStream(baos); - NodeSerializeUtils.serialize(node, outputStream); - ByteBuffer buffer = ByteBuffer.wrap(baos.toByteArray()); - Node anotherNode = new Node("127.0.0.1", 6667, 1, 6535, 4678, "127.0.0.1"); - NodeSerializeUtils.deserialize(anotherNode, buffer); - Assert.assertEquals(node, anotherNode); - } -} diff --git a/cluster/src/test/resources/logback-test.xml b/cluster/src/test/resources/logback-test.xml deleted file mode 100644 index 024434acfca0..000000000000 --- a/cluster/src/test/resources/logback-test.xml +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - System.out - - %-5p [%d] [%thread] %C:%L - %m %n - utf-8 - - - INFO - - - - - - - - - - - - diff --git a/cluster/src/test/resources/node1conf/iotdb-cluster.properties b/cluster/src/test/resources/node1conf/iotdb-cluster.properties deleted file mode 100644 index 050ee8d64f3e..000000000000 --- a/cluster/src/test/resources/node1conf/iotdb-cluster.properties +++ /dev/null @@ -1,37 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -internal_ip=127.0.0.1 -internal_meta_port=9003 -internal_data_port=40010 -cluster_info_public_port=6567 -seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007 -default_replica_num=1 -consistency_level=mid -connection_timeout_ms=20000 -write_operation_timeout_ms=30000 -read_operation_timeout_ms=30000 -catch_up_timeout_ms=300000 -use_batch_in_catch_up=true -min_num_of_logs_in_mem=1000 -max_num_of_logs_in_mem=2000 -log_deletion_check_interval_second=-1 -is_use_async_server=false -is_use_async_applier=true -is_enable_raft_log_persistence=true -open_server_rpc_port=false diff --git a/cluster/src/test/resources/node1conf/iotdb-engine.properties b/cluster/src/test/resources/node1conf/iotdb-engine.properties deleted file mode 100644 index 7aad71b2554b..000000000000 --- a/cluster/src/test/resources/node1conf/iotdb-engine.properties +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -# -#http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - - -base_dir=target/node1/tmp -data_dirs=target/node1/data -wal_dir=target/node1/wal -index_root_dir=target/node1/index -udf_root_dir=target/node1/ext -tracing_dir=target/node1/data/tracing -enable_influxdb_rpc_service=false - -rpc_port=6667 -metrics_port=8181 -sync_server_port=5555 -mqtt_port=1883 \ No newline at end of file diff --git a/cluster/src/test/resources/node1conf/iotdb-metric.yml b/cluster/src/test/resources/node1conf/iotdb-metric.yml deleted file mode 100644 index 14ab7bc2d614..000000000000 --- a/cluster/src/test/resources/node1conf/iotdb-metric.yml +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# whether enable the module -enableMetric: false \ No newline at end of file diff --git a/cluster/src/test/resources/node1conf/logback.xml b/cluster/src/test/resources/node1conf/logback.xml deleted file mode 100644 index 10a2d4d66a0b..000000000000 --- a/cluster/src/test/resources/node1conf/logback.xml +++ /dev/null @@ -1,282 +0,0 @@ - - - - - - - - ${IOTDB_HOME}/node1/logs/log_error.log - - ${IOTDB_HOME}/node1/logs/log-error-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - error - ACCEPT - DENY - - - - ${IOTDB_HOME}/node1/logs/log_warn.log - - ${IOTDB_HOME}/node1/logs/log-warn-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - WARN - ACCEPT - DENY - - - - ${IOTDB_HOME}/node1/logs/log_info.log - - ${IOTDB_HOME}/node1/logs/log-info-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - ACCEPT - DENY - - - - ${IOTDB_HOME}/node1/logs/log_debug.log - - ${IOTDB_HOME}/node1/logs/log-debug-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - ACCEPT - DENY - - - - ${IOTDB_HOME}/node1/logs/log_trace.log - - ${IOTDB_HOME}/node1/logs/log-trace-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - TRACE - ACCEPT - DENY - - - - System.out - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - - - - - ${IOTDB_HOME}/node1/logs/log_all.log - - ${IOTDB_HOME}/node1/logs/log-all-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_measure.log - - ${IOTDB_HOME}/node1/logs/log-measure-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_sync.log - - ${IOTDB_HOME}/node1/logs/log-sync-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_audit.log - - ${IOTDB_HOME}/node1/logs/log-audit-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_query_debug.log - - ${IOTDB_HOME}/node1/logs/log-query-debug-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_slow_sql.log - - ${IOTDB_HOME}/node1/logs/log-slow-sql-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node1/logs/log_query_frequency.log - - ${IOTDB_HOME}/node1/logs/log-query-frequency-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cluster/src/test/resources/node2conf/iotdb-cluster.properties b/cluster/src/test/resources/node2conf/iotdb-cluster.properties deleted file mode 100644 index 58be23a39b01..000000000000 --- a/cluster/src/test/resources/node2conf/iotdb-cluster.properties +++ /dev/null @@ -1,37 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -internal_ip=127.0.0.1 -internal_meta_port=9005 -internal_data_port=40012 -cluster_info_public_port=6568 -seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007 -default_replica_num=1 -consistency_level=mid -connection_timeout_ms=20000 -write_operation_timeout_ms=30000 -read_operation_timeout_ms=30000 -catch_up_timeout_ms=300000 -use_batch_in_catch_up=true -min_num_of_logs_in_mem=1000 -max_num_of_logs_in_mem=2000 -log_deletion_check_interval_second=-1 -is_use_async_server=false -is_use_async_applier=true -is_enable_raft_log_persistence=true -open_server_rpc_port=false diff --git a/cluster/src/test/resources/node2conf/iotdb-engine.properties b/cluster/src/test/resources/node2conf/iotdb-engine.properties deleted file mode 100644 index 645a30ebbb76..000000000000 --- a/cluster/src/test/resources/node2conf/iotdb-engine.properties +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -# -#http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - - -base_dir=target/node2/tmp -data_dirs=target/node2/data -wal_dir=target/node2/wal -index_root_dir=target/node2/index -udf_root_dir=target/node2/ext -tracing_dir=target/node2/data/tracing -enable_influxdb_rpc_service=false - -rpc_port=6669 -metrics_port=8182 -sync_server_port=5556 -mqtt_port=1884 \ No newline at end of file diff --git a/cluster/src/test/resources/node2conf/iotdb-metric.yml b/cluster/src/test/resources/node2conf/iotdb-metric.yml deleted file mode 100644 index 14ab7bc2d614..000000000000 --- a/cluster/src/test/resources/node2conf/iotdb-metric.yml +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# whether enable the module -enableMetric: false \ No newline at end of file diff --git a/cluster/src/test/resources/node2conf/logback.xml b/cluster/src/test/resources/node2conf/logback.xml deleted file mode 100644 index 2b2c08c2bbad..000000000000 --- a/cluster/src/test/resources/node2conf/logback.xml +++ /dev/null @@ -1,282 +0,0 @@ - - - - - - - - ${IOTDB_HOME}/node2/logs/log_error.log - - ${IOTDB_HOME}/node2/logs/log-error-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - error - ACCEPT - DENY - - - - ${IOTDB_HOME}/node2/logs/log_warn.log - - ${IOTDB_HOME}/node2/logs/log-warn-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - WARN - ACCEPT - DENY - - - - ${IOTDB_HOME}/node2/logs/log_info.log - - ${IOTDB_HOME}/node2/logs/log-info-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - ACCEPT - DENY - - - - ${IOTDB_HOME}/node2/logs/log_debug.log - - ${IOTDB_HOME}/node2/logs/log-debug-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - ACCEPT - DENY - - - - ${IOTDB_HOME}/node2/logs/log_trace.log - - ${IOTDB_HOME}/node2/logs/log-trace-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - TRACE - ACCEPT - DENY - - - - System.out - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - - - - - ${IOTDB_HOME}/node2/logs/log_all.log - - ${IOTDB_HOME}/node2/logs/log-all-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_measure.log - - ${IOTDB_HOME}/node2/logs/log-measure-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_sync.log - - ${IOTDB_HOME}/node2/logs/log-sync-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_audit.log - - ${IOTDB_HOME}/node2/logs/log-audit-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_query_debug.log - - ${IOTDB_HOME}/node2/logs/log-query-debug-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_slow_sql.log - - ${IOTDB_HOME}/node2/logs/log-slow-sql-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node2/logs/log_query_frequency.log - - ${IOTDB_HOME}/node2/logs/log-query-frequency-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cluster/src/test/resources/node3conf/iotdb-cluster.properties b/cluster/src/test/resources/node3conf/iotdb-cluster.properties deleted file mode 100644 index 37a91d5be886..000000000000 --- a/cluster/src/test/resources/node3conf/iotdb-cluster.properties +++ /dev/null @@ -1,37 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -internal_ip=127.0.0.1 -internal_meta_port=9007 -internal_data_port=40014 -cluster_info_public_port=6569 -seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007 -default_replica_num=1 -consistency_level=mid -connection_timeout_ms=20000 -write_operation_timeout_ms=30000 -read_operation_timeout_ms=30000 -catch_up_timeout_ms=300000 -use_batch_in_catch_up=true -min_num_of_logs_in_mem=1000 -max_num_of_logs_in_mem=2000 -log_deletion_check_interval_second=-1 -is_use_async_server=false -is_use_async_applier=true -is_enable_raft_log_persistence=true -open_server_rpc_port=false diff --git a/cluster/src/test/resources/node3conf/iotdb-engine.properties b/cluster/src/test/resources/node3conf/iotdb-engine.properties deleted file mode 100644 index 640e367bb64c..000000000000 --- a/cluster/src/test/resources/node3conf/iotdb-engine.properties +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -# -#http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - - -base_dir=target/node3/tmp -data_dirs=target/node3/data -wal_dir=target/node3/wal -index_root_dir=target/node3/index -udf_root_dir=target/node3/ext -tracing_dir=target/node3/data/tracing -enable_influxdb_rpc_service=false - -rpc_port=6671 -metrics_port=8183 -sync_server_port=5557 -mqtt_port=1885 \ No newline at end of file diff --git a/cluster/src/test/resources/node3conf/iotdb-metric.yml b/cluster/src/test/resources/node3conf/iotdb-metric.yml deleted file mode 100644 index 14ab7bc2d614..000000000000 --- a/cluster/src/test/resources/node3conf/iotdb-metric.yml +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# whether enable the module -enableMetric: false \ No newline at end of file diff --git a/cluster/src/test/resources/node3conf/logback.xml b/cluster/src/test/resources/node3conf/logback.xml deleted file mode 100644 index 372410df6df5..000000000000 --- a/cluster/src/test/resources/node3conf/logback.xml +++ /dev/null @@ -1,282 +0,0 @@ - - - - - - - - ${IOTDB_HOME}/node3/logs/log_error.log - - ${IOTDB_HOME}/node3/logs/log-error-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - error - ACCEPT - DENY - - - - ${IOTDB_HOME}/node3/logs/log_warn.log - - ${IOTDB_HOME}/node3/logs/log-warn-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - WARN - ACCEPT - DENY - - - - ${IOTDB_HOME}/node3/logs/log_info.log - - ${IOTDB_HOME}/node3/logs/log-info-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - ACCEPT - DENY - - - - ${IOTDB_HOME}/node3/logs/log_debug.log - - ${IOTDB_HOME}/node3/logs/log-debug-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - ACCEPT - DENY - - - - ${IOTDB_HOME}/node3/logs/log_trace.log - - ${IOTDB_HOME}/node3/logs/log-trace-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - TRACE - ACCEPT - DENY - - - - System.out - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - DEBUG - - - - - ${IOTDB_HOME}/node3/logs/log_all.log - - ${IOTDB_HOME}/node3/logs/log-all-%d{yyyyMMdd-HH}.%i.log.gz - 50MB - 168 - 5GB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_measure.log - - ${IOTDB_HOME}/node3/logs/log-measure-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_sync.log - - ${IOTDB_HOME}/node3/logs/log-sync-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_audit.log - - ${IOTDB_HOME}/node3/logs/log-audit-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_query_debug.log - - ${IOTDB_HOME}/node3/logs/log-query-debug-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_slow_sql.log - - ${IOTDB_HOME}/node3/logs/log-slow-sql-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - ${IOTDB_HOME}/node3/logs/log_query_frequency.log - - ${IOTDB_HOME}/node3/logs/log-query-frequency-%d{yyyyMMdd-HH}.%i.log.gz - 10MB - 168 - 512MB - - true - - %d [%t] %-5p %C{25}:%L - %m %n - utf-8 - - - INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/confignode/pom.xml b/confignode/pom.xml index 875d23250956..0fd153c7affb 100644 --- a/confignode/pom.xml +++ b/confignode/pom.xml @@ -30,9 +30,9 @@ iotdb-confignode IoTDB ConfigNode - false - ${iotdb.test.skip} - ${iotdb.test.skip} + false + ${confignode.test.skip} + ${confignode.test.skip} @@ -50,11 +50,6 @@ iotdb-server ${project.version} - - org.apache.iotdb - iotdb-procedure - ${project.version} - junit junit @@ -107,6 +102,44 @@ + + + org.apache.maven.plugins + maven-surefire-plugin + + ${confignode.ut.skip} + pertest + + + + + skipConfigNodeTests + + + skipTests + true + + + + true + true + true + + + + skipUT_ConfigNode_Tests + + + skipUTs + true + + + + true + + + diff --git a/confignode/src/assembly/confignode.xml b/confignode/src/assembly/confignode.xml index 5982b6cdd81b..68d639b8faab 100644 --- a/confignode/src/assembly/confignode.xml +++ b/confignode/src/assembly/confignode.xml @@ -41,5 +41,9 @@ sbin 0755 + + ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf + conf + diff --git a/confignode/src/assembly/resources/conf/confignode-env.bat b/confignode/src/assembly/resources/conf/confignode-env.bat index 294b52be03a6..e00a464764fc 100644 --- a/confignode/src/assembly/resources/conf/confignode-env.bat +++ b/confignode/src/assembly/resources/conf/confignode-env.bat @@ -53,9 +53,9 @@ for /f %%b in ('wmic ComputerSystem get TotalPhysicalMemory ^| findstr "[0-9]"' set system_memory=%%b ) -echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > %temp%\tmp.vbs -for /f "tokens=*" %%a in ('cscript //nologo %temp%\tmp.vbs') do set system_memory_in_mb=%%a -del %temp%\tmp.vbs +echo wsh.echo FormatNumber(cdbl(%system_memory%)/(1024*1024), 0) > %CONFIGNODE_HOME%\sbin\tmp.vbs +for /f "tokens=*" %%a in ('cscript //nologo %CONFIGNODE_HOME%\sbin\tmp.vbs') do set system_memory_in_mb=%%a +del %CONFIGNODE_HOME%\sbin\tmp.vbs set system_memory_in_mb=%system_memory_in_mb:,=% set /a half_=%system_memory_in_mb%/2 @@ -111,7 +111,7 @@ set temp_buffer_pool_size=1024 @REM which equals DIRECT_MEMORY_SIZE / threads_number / temp_buffer_pool_size set MAX_CACHED_BUFFER_SIZE=%max_heap_size_in_mb%*1024*1024/%threads_number%/%temp_buffer_pool_size% -set CONFIGNODE_HEAP_OPTS=-Xmx%MAX_HEAP_SIZE% -Xms%HEAP_NEWSIZE% -Xlog:gc:"..\gc.log" +set CONFIGNODE_HEAP_OPTS=-Xmx%MAX_HEAP_SIZE% -Xms%HEAP_NEWSIZE% -Xlog:gc:"%CONFIGNODE_HOME%\gc.log" set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -XX:MaxDirectMemorySize=%MAX_DIRECT_MEMORY_SIZE% set CONFIGNODE_HEAP_OPTS=%CONFIGNODE_HEAP_OPTS% -Djdk.nio.maxCachedBufferSize=%MAX_CACHED_BUFFER_SIZE% diff --git a/confignode/src/assembly/resources/conf/iotdb-confignode.properties b/confignode/src/assembly/resources/conf/iotdb-confignode.properties index b0fa282e918b..86590d6f34b3 100644 --- a/confignode/src/assembly/resources/conf/iotdb-confignode.properties +++ b/confignode/src/assembly/resources/conf/iotdb-confignode.properties @@ -41,10 +41,11 @@ consensus_port=22278 # Format: ip:port # where the ip should be consistent with the target ConfigNode's confignode_rpc_address, # and the port should be consistent with the target ConfigNode's confignode_rpc_port. -# For the first ConfigNode to start, target_configNode points to its own ip:port. -# For other ConfigNodes that are started or restarted, target_ConfigNode points to any running ConfigNode's ip:port. +# For the first ConfigNode to start, config_nodes points to its own ip:port. +# For other ConfigNodes that are started or restarted, config_nodes points to any running ConfigNode's ip:port. # Datatype: String -target_confignode=0.0.0.0:22277 +config_nodes=0.0.0.0:22277 + #################### ### Consensus protocol configuration @@ -53,13 +54,20 @@ target_confignode=0.0.0.0:22277 # All parameters in Consensus protocol configuration is unmodifiable after ConfigNode starts for the first time. # And these parameters should be consistent within the ConfigNodeGroup. +# DataRegion consensus protocol type +# These consensus protocols are currently supported: +# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica) +# 2. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol) +# 3. org.apache.iotdb.consensus.multileader.MultiLeaderConsensus(weak consistency, high performance) +# Datatype: String +# data_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus -# DataNode consensus protocol type +# SchemaRegion consensus protocol type # These consensus protocols are currently supported: -# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(No protocol, only supports stand-alone machine) +# 1. org.apache.iotdb.consensus.standalone.StandAloneConsensus(Consensus patterns optimized specifically for single replica) # 2. org.apache.iotdb.consensus.ratis.RatisConsensus(Raft protocol) # Datatype: String -# data_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +# schema_region_consensus_protocol_class=org.apache.iotdb.consensus.standalone.StandAloneConsensus #################### ### PartitionSlot configuration @@ -109,22 +117,12 @@ target_confignode=0.0.0.0:22277 # Default number of SchemaRegion replicas # Datatype: int -# schema_replication_factor=3 +# schema_replication_factor=1 # Default number of DataRegion replicas # Datatype: int -# data_replication_factor=3 - - -# The maximum number of SchemaRegions of each StorageGroup -# Datatype: int -# maximum_schema_region_count=4 - - -# The maximum number of DataRegions of each StorageGroup -# Datatype: int -# maximum_data_region_count=20 +# data_replication_factor=1 #################### @@ -196,25 +194,56 @@ target_confignode=0.0.0.0:22277 # If its prefix is "/", then the path is absolute. Otherwise, it is relative. # consensus_dir=data/consensus -# procedure wal dir -# If this property is unset, system will save the data in the default relative path directory under the confignode folder(i.e., %CONFIGNODE_HOME%/data/consensus). +# UDF lib dir +# If this property is unset, system will save the data in the default relative path directory under +# the UDF folder(i.e., %CONFIGNODE_HOME%/ext/udf). +# # If it is absolute, system will save the data in exact location it points to. -# If it is relative, system will save the data in the relative path directory it indicates under the confignode folder. -# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. -# For windows platform -# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. -# proc_wal_dir=data\\proc +# If it is relative, system will save the data in the relative path directory it indicates under the +# UDF folder. +# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative +# path. +# +# For Window platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# udf_lib_dir=ext\\udf +# # For Linux platform # If its prefix is "/", then the path is absolute. Otherwise, it is relative. -# proc_wal_dir=data/proc +# udf_lib_dir=ext/udf + + +# temporary lib dir +# If this property is unset, system will save the data in the default relative path directory under +# the UDF folder(i.e., %CONFIGNODE_HOME%/ext/temporary). +# +# If it is absolute, system will save the data in exact location it points to. +# If it is relative, system will save the data in the relative path directory it indicates under the +# UDF folder. +# Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative +# path. +# +# For Window platform +# If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is +# absolute. Otherwise, it is relative. +# temporary_lib_dir=ext\\temporary +# +# For Linux platform +# If its prefix is "/", then the path is absolute. Otherwise, it is relative. +# temporary_lib_dir=ext/temporary + + #################### ### Procedure Configuration #################### + # Default number of worker thread count # Datatype: int #procedure_core_worker_thread_size=4 + # Default time interval of completed procedure cleaner work in, time unit is second # Datatype: int #procedure_completed_clean_interval=30 @@ -224,3 +253,11 @@ target_confignode=0.0.0.0:22277 # Datatype: int #procedure_completed_evict_ttl=800 +#################### +### Heartbeat configuration +#################### + + +# The heartbeat interval in milliseconds, default is 3000ms +# Datatype: long +# heartbeat_interval=1000 diff --git a/confignode/src/assembly/resources/conf/logback.xml b/confignode/src/assembly/resources/conf/logback.xml index e5181c56ddb0..28d214b2a424 100644 --- a/confignode/src/assembly/resources/conf/logback.xml +++ b/confignode/src/assembly/resources/conf/logback.xml @@ -106,7 +106,7 @@ utf-8 - DEBUG + ${CONSOLE_LOG_LEVEL:-DEBUG} @@ -136,5 +136,5 @@ - + diff --git a/confignode/src/assembly/resources/sbin/remove-confignode.sh b/confignode/src/assembly/resources/sbin/remove-confignode.sh new file mode 100644 index 000000000000..eace0d2912ec --- /dev/null +++ b/confignode/src/assembly/resources/sbin/remove-confignode.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + + +echo ---------------------------- +echo Starting to remove IoTDB ConfigNode +echo ---------------------------- + +if [ -z "${CONFIGNODE_HOME}" ]; then + export CONFIGNODE_HOME="`dirname "$0"`/.." +fi + +CONFIGNODE_CONF=${CONFIGNODE_HOME}/conf +CONFIGNODE_LOGS=${CONFIGNODE_HOME}/logs + +is_conf_path=false +for arg do + shift + if [ "$arg" == "-c" ]; then + is_conf_path=true + continue + fi + if [ $is_conf_path == true ]; then + CONFIGNODE_CONF=$arg + is_conf_path=false + continue + fi + set -- "$@" "$arg" +done + +CONF_PARAMS="-r "$* + +if [ -f "$CONFIGNODE_CONF/confignode-env.sh" ]; then + if [ "$#" -ge "1" -a "$1" == "printgc" ]; then + . "$CONFIGNODE_CONF/confignode-env.sh" "printgc" + else + . "$CONFIGNODE_CONF/confignode-env.sh" + fi +else + echo "can't find $CONFIGNODE_CONF/confignode-env.sh" +fi + +CLASSPATH="" +for f in ${CONFIGNODE_HOME}/lib/*.jar; do + CLASSPATH=${CLASSPATH}":"$f +done +classname=org.apache.iotdb.confignode.service.ConfigNode + +launch_service() +{ + class="$1" + confignode_parms="-Dlogback.configurationFile=${CONFIGNODE_CONF}/logback.xml" + confignode_parms="$confignode_parms -DCONFIGNODE_HOME=${CONFIGNODE_HOME}" + confignode_parms="$confignode_parms -DCONFIGNODE_CONF=${CONFIGNODE_CONF}" + exec "$JAVA" $illegal_access_params $confignode_parms $CONFIGNODE_JMX_OPTS -cp "$CLASSPATH" "$class" $CONF_PARAMS + return $? +} + +# Start up the service +launch_service "$classname" + +exit $? diff --git a/confignode/src/assembly/resources/sbin/start-confignode.bat b/confignode/src/assembly/resources/sbin/start-confignode.bat index 62ff64ec96d9..918c0b997e5c 100644 --- a/confignode/src/assembly/resources/sbin/start-confignode.bat +++ b/confignode/src/assembly/resources/sbin/start-confignode.bat @@ -93,7 +93,7 @@ set JAVA_OPTS=-ea^ @REM ***** CLASSPATH library setting ***** @REM Ensure that any user defined CLASSPATH variables are not used on startup -set CLASSPATH="%CONFIGNODE_HOME%\lib\*" +if EXIST %CONFIGNODE_HOME%\lib (set CLASSPATH="%CONFIGNODE_HOME%\lib\*") else set CLASSPATH="%CONFIGNODE_HOME%\..\lib\*" set CLASSPATH=%CLASSPATH%;iotdb.ConfigNode goto okClasspath diff --git a/confignode/src/assembly/resources/sbin/start-confignode.sh b/confignode/src/assembly/resources/sbin/start-confignode.sh index dffd31f5cd92..8a04fb47f591 100644 --- a/confignode/src/assembly/resources/sbin/start-confignode.sh +++ b/confignode/src/assembly/resources/sbin/start-confignode.sh @@ -57,8 +57,14 @@ else echo "can't find $CONFIGNODE_CONF/confignode-env.sh" fi +if [ -d ${CONFIGNODE_HOME}/lib ]; then +LIB_PATH=${CONFIGNODE_HOME}/lib +else +LIB_PATH=${CONFIGNODE_HOME}/../lib +fi + CLASSPATH="" -for f in ${CONFIGNODE_HOME}/lib/*.jar; do +for f in ${LIB_PATH}/*.jar; do CLASSPATH=${CLASSPATH}":"$f done classname=org.apache.iotdb.confignode.service.ConfigNode diff --git a/confignode/src/assembly/resources/sbin/stop-confignode.bat b/confignode/src/assembly/resources/sbin/stop-confignode.bat index 9771f475ad00..bcfc5d864303 100644 --- a/confignode/src/assembly/resources/sbin/stop-confignode.bat +++ b/confignode/src/assembly/resources/sbin/stop-confignode.bat @@ -22,7 +22,7 @@ set current_dir=%~dp0 set superior_dir=%current_dir%\..\ -for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "rpc_port" +for /f "eol=; tokens=2,2 delims==" %%i in ('findstr /i "^rpc_port" %superior_dir%\conf\iotdb-confignode.properties') do ( set rpc_port=%%i ) diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/AsyncDataNodeClientPool.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/AsyncDataNodeClientPool.java index b4333651c175..9ecab37d8af9 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/client/AsyncDataNodeClientPool.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/AsyncDataNodeClientPool.java @@ -18,18 +18,34 @@ */ package org.apache.iotdb.confignode.client; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.async.AsyncDataNodeInternalServiceClient; import org.apache.iotdb.confignode.client.handlers.CreateRegionHandler; +import org.apache.iotdb.confignode.client.handlers.FlushHandler; +import org.apache.iotdb.confignode.client.handlers.FunctionManagementHandler; +import org.apache.iotdb.confignode.client.handlers.HeartbeatHandler; +import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; import org.apache.iotdb.mpp.rpc.thrift.TCreateDataRegionReq; +import org.apache.iotdb.mpp.rpc.thrift.TCreateFunctionRequest; import org.apache.iotdb.mpp.rpc.thrift.TCreateSchemaRegionReq; +import org.apache.iotdb.mpp.rpc.thrift.TDropFunctionRequest; +import org.apache.iotdb.mpp.rpc.thrift.THeartbeatReq; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.CountDownLatch; /** Asynchronously send RPC requests to DataNodes. See mpp.thrift for more details. */ public class AsyncDataNodeClientPool { @@ -45,12 +61,120 @@ private AsyncDataNodeClientPool() { new ConfigNodeClientPoolFactory.AsyncDataNodeInternalServiceClientPoolFactory()); } + /** + * Execute CreateRegionsReq asynchronously + * + * @param createRegionGroupsReq CreateRegionsReq + * @param ttlMap Map + */ + public void createRegions(CreateRegionsReq createRegionGroupsReq, Map ttlMap) { + + // Index of each Region + int index = 0; + // Number of regions to be created + int regionNum = 0; + // Map> + Map> indexMap = new TreeMap<>(); + + // Assign an independent index to each Region + for (Map.Entry> entry : + createRegionGroupsReq.getRegionGroupMap().entrySet()) { + for (TRegionReplicaSet regionReplicaSet : entry.getValue()) { + regionNum += regionReplicaSet.getDataNodeLocationsSize(); + for (TDataNodeLocation dataNodeLocation : regionReplicaSet.getDataNodeLocations()) { + indexMap + .computeIfAbsent(regionReplicaSet.getRegionId(), idMap -> new TreeMap<>()) + .put(dataNodeLocation.getDataNodeId(), index); + index += 1; + } + } + } + + BitSet bitSet = new BitSet(regionNum); + for (int retry = 0; retry < 3; retry++) { + CountDownLatch latch = new CountDownLatch(regionNum - bitSet.cardinality()); + createRegionGroupsReq + .getRegionGroupMap() + .forEach( + (storageGroup, regionReplicaSets) -> { + // Enumerate each RegionReplicaSet + regionReplicaSets.forEach( + regionReplicaSet -> { + // Enumerate each Region + regionReplicaSet + .getDataNodeLocations() + .forEach( + dataNodeLocation -> { + // Skip those created successfully + if (!bitSet.get( + indexMap + .get(regionReplicaSet.getRegionId()) + .get(dataNodeLocation.getDataNodeId()))) { + TEndPoint endPoint = dataNodeLocation.getInternalEndPoint(); + CreateRegionHandler handler = + new CreateRegionHandler( + indexMap + .get(regionReplicaSet.getRegionId()) + .get(dataNodeLocation.getDataNodeId()), + bitSet, + latch, + regionReplicaSet.getRegionId(), + dataNodeLocation); + + switch (regionReplicaSet.getRegionId().getType()) { + case SchemaRegion: + createSchemaRegion( + endPoint, + genCreateSchemaRegionReq(storageGroup, regionReplicaSet), + handler); + break; + case DataRegion: + createDataRegion( + endPoint, + genCreateDataRegionReq( + storageGroup, + regionReplicaSet, + ttlMap.get(storageGroup)), + handler); + } + } + }); + }); + }); + + try { + // Waiting until this batch of create requests done + latch.await(); + } catch (InterruptedException e) { + LOGGER.error("ClusterSchemaManager was interrupted during create Regions on DataNodes", e); + } + + if (bitSet.cardinality() == regionNum) { + // Break if all creations success + break; + } + } + + if (bitSet.cardinality() < regionNum) { + LOGGER.error( + "Failed to create some SchemaRegions or DataRegions on DataNodes. Please check former logs."); + } + } + + private TCreateSchemaRegionReq genCreateSchemaRegionReq( + String storageGroup, TRegionReplicaSet regionReplicaSet) { + TCreateSchemaRegionReq req = new TCreateSchemaRegionReq(); + req.setStorageGroup(storageGroup); + req.setRegionReplicaSet(regionReplicaSet); + return req; + } + /** * Create a SchemaRegion on specific DataNode * * @param endPoint The specific DataNode */ - public void createSchemaRegion( + private void createSchemaRegion( TEndPoint endPoint, TCreateSchemaRegionReq req, CreateRegionHandler handler) { AsyncDataNodeInternalServiceClient client; try { @@ -63,6 +187,15 @@ public void createSchemaRegion( } } + private TCreateDataRegionReq genCreateDataRegionReq( + String storageGroup, TRegionReplicaSet regionReplicaSet, long TTL) { + TCreateDataRegionReq req = new TCreateDataRegionReq(); + req.setStorageGroup(storageGroup); + req.setRegionReplicaSet(regionReplicaSet); + req.setTtl(TTL); + return req; + } + /** * Create a DataRegion on specific DataNode * @@ -81,6 +214,21 @@ public void createDataRegion( } } + /** + * Only used in LoadManager + * + * @param endPoint The specific DataNode + */ + public void getHeartBeat(TEndPoint endPoint, THeartbeatReq req, HeartbeatHandler handler) { + AsyncDataNodeInternalServiceClient client; + try { + client = clientManager.borrowClient(endPoint); + client.getHeartBeat(req, handler); + } catch (Exception e) { + LOGGER.error("Asking DataNode: {}, for heartbeat failed", endPoint, e); + } + } + /** * Always call this interface when a DataNode is restarted or removed * @@ -90,6 +238,50 @@ public void resetClient(TEndPoint endPoint) { clientManager.clear(endPoint); } + /** + * Only used in UDFManager + * + * @param endPoint The specific DataNode + */ + public void createFunction( + TEndPoint endPoint, TCreateFunctionRequest request, FunctionManagementHandler handler) { + try { + clientManager.borrowClient(endPoint).createFunction(request, handler); + } catch (Exception e) { + LOGGER.error("Failed to asking DataNode to create function: {}", endPoint, e); + } + } + + /** + * Only used in UDFManager + * + * @param endPoint The specific DataNode + */ + public void dropFunction( + TEndPoint endPoint, TDropFunctionRequest request, FunctionManagementHandler handler) { + try { + clientManager.borrowClient(endPoint).dropFunction(request, handler); + } catch (Exception e) { + LOGGER.error("Failed to asking DataNode to create function: {}", endPoint, e); + } + } + + /** + * Flush on specific DataNode + * + * @param endPoint The specific DataNode + */ + public void flush(TEndPoint endPoint, TFlushReq flushReq, FlushHandler handler) { + for (int retry = 0; retry < 3; retry++) { + try { + clientManager.borrowClient(endPoint).flush(flushReq, handler); + return; + } catch (Exception e) { + LOGGER.error("Failed to asking DataNode to flush: {}", endPoint, e); + } + } + } + // TODO: Is the ClientPool must be a singleton? private static class ClientPoolHolder { diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/ConfigNodeClientPoolFactory.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/ConfigNodeClientPoolFactory.java index 57f9ca6ef6ef..f554a851c533 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/client/ConfigNodeClientPoolFactory.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/ConfigNodeClientPoolFactory.java @@ -26,7 +26,7 @@ import org.apache.iotdb.commons.client.IClientPoolFactory; import org.apache.iotdb.commons.client.async.AsyncDataNodeInternalServiceClient; import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.commons.pool2.KeyedObjectPool; @@ -34,7 +34,7 @@ public class ConfigNodeClientPoolFactory { - private static final ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); + private static final ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); private ConfigNodeClientPoolFactory() {} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncConfigNodeClientPool.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncConfigNodeClientPool.java index 329ce205d4b1..1ac87a6764d2 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncConfigNodeClientPool.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncConfigNodeClientPool.java @@ -18,11 +18,12 @@ */ package org.apache.iotdb.confignode.client; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.IClientManager; import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.commons.utils.StatusUtils; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; import org.apache.iotdb.db.client.DataNodeClientPoolFactory; @@ -31,6 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.List; import java.util.concurrent.TimeUnit; /** Synchronously send RPC requests to ConfigNode. See confignode.thrift for more details. */ @@ -38,54 +40,132 @@ public class SyncConfigNodeClientPool { private static final Logger LOGGER = LoggerFactory.getLogger(SyncConfigNodeClientPool.class); - private static final int retryNum = 5; + private static final int retryNum = 6; private final IClientManager clientManager; + private TEndPoint configNodeLeader; + private SyncConfigNodeClientPool() { clientManager = new IClientManager.Factory() .createClientManager( new DataNodeClientPoolFactory.SyncConfigNodeIServiceClientPoolFactory()); + configNodeLeader = new TEndPoint(); + } + + private void updateConfigNodeLeader(TSStatus status) { + if (status.isSetRedirectNode()) { + configNodeLeader = status.getRedirectNode(); + } else { + configNodeLeader = null; + } } /** Only use registerConfigNode when the ConfigNode is first startup. */ public TConfigNodeRegisterResp registerConfigNode( TEndPoint endPoint, TConfigNodeRegisterReq req) { // TODO: Unified retry logic + Throwable lastException = null; for (int retry = 0; retry < retryNum; retry++) { try (SyncConfigNodeIServiceClient client = clientManager.borrowClient(endPoint)) { return client.registerConfigNode(req); } catch (Exception e) { - LOGGER.warn("Register ConfigNode failed, retrying...", e); - doRetryWait(); + lastException = e; + LOGGER.warn("Register ConfigNode failed because {}, retrying {}...", e.getMessage(), retry); + doRetryWait(retry); } } - LOGGER.error("Register ConfigNode failed"); + LOGGER.error("Register ConfigNode failed", lastException); return new TConfigNodeRegisterResp() .setStatus( new TSStatus(TSStatusCode.ALL_RETRY_FAILED.getStatusCode()) - .setMessage("All retry failed.")); + .setMessage("All retry failed due to " + lastException.getMessage())); } - public TSStatus applyConfigNode(TEndPoint endPoint, TConfigNodeLocation configNodeLocation) { + public TSStatus addConsensusGroup( + TEndPoint endPoint, List configNodeLocation) { // TODO: Unified retry logic + Throwable lastException = null; for (int retry = 0; retry < retryNum; retry++) { try (SyncConfigNodeIServiceClient client = clientManager.borrowClient(endPoint)) { - return client.applyConfigNode(configNodeLocation); + TConfigNodeRegisterResp registerResp = new TConfigNodeRegisterResp(); + registerResp.setConfigNodeList(configNodeLocation); + registerResp.setStatus(StatusUtils.OK); + return client.addConsensusGroup(registerResp); + } catch (Throwable e) { + lastException = e; + LOGGER.warn( + "Add Consensus Group failed because {}, retrying {} ...", e.getMessage(), retry); + doRetryWait(retry); + } + } + LOGGER.error("Add ConsensusGroup failed", lastException); + return new TSStatus(TSStatusCode.ALL_RETRY_FAILED.getStatusCode()) + .setMessage("All retry failed due to " + lastException.getMessage()); + } + + /** + * ConfigNode Leader stop any ConfigNode in the cluster + * + * @param configNodeLocations confignode_list of confignode-system.properties + * @param configNodeLocation To be removed ConfigNode + * @return SUCCESS_STATUS: remove ConfigNode success, other status remove failed + */ + public TSStatus removeConfigNode( + List configNodeLocations, TConfigNodeLocation configNodeLocation) { + // TODO: Unified retry logic + Throwable lastException = null; + for (TConfigNodeLocation nodeLocation : configNodeLocations) { + for (int retry = 0; retry < retryNum; retry++) { + try (SyncConfigNodeIServiceClient client = + clientManager.borrowClient(nodeLocation.getInternalEndPoint())) { + TSStatus status = client.removeConfigNode(configNodeLocation); + while (status.getCode() == TSStatusCode.NEED_REDIRECTION.getStatusCode()) { + TimeUnit.MILLISECONDS.sleep(2000); + updateConfigNodeLeader(status); + try (SyncConfigNodeIServiceClient clientLeader = + clientManager.borrowClient(configNodeLeader)) { + status = clientLeader.removeConfigNode(configNodeLocation); + } + } + return status; + } catch (Throwable e) { + lastException = e; + LOGGER.warn( + "Remove ConfigNode failed because {}, retrying {} ...", e.getMessage(), retry); + doRetryWait(retry); + } + } + } + + LOGGER.error("Remove ConfigNode failed", lastException); + return new TSStatus(TSStatusCode.ALL_RETRY_FAILED.getStatusCode()) + .setMessage("All retry failed due to " + lastException.getMessage()); + } + + /** Only use stopConfigNode when the ConfigNode is removed. */ + public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) { + // TODO: Unified retry logic + Throwable lastException = null; + for (int retry = 0; retry < retryNum; retry++) { + try (SyncConfigNodeIServiceClient client = + clientManager.borrowClient(configNodeLocation.getInternalEndPoint())) { + return client.stopConfigNode(configNodeLocation); } catch (Exception e) { - LOGGER.warn("Apply ConfigNode failed, retrying...", e); - doRetryWait(); + lastException = e; + LOGGER.warn("Stop ConfigNode failed because {}, retrying {}...", e.getMessage(), retry); + doRetryWait(retry); } } - LOGGER.error("Apply ConfigNode failed"); + LOGGER.error("Stop ConfigNode failed", lastException); return new TSStatus(TSStatusCode.ALL_RETRY_FAILED.getStatusCode()) - .setMessage("All retry failed."); + .setMessage("All retry failed due to" + lastException.getMessage()); } - private void doRetryWait() { + private void doRetryWait(int retryNum) { try { - TimeUnit.MILLISECONDS.sleep(100); + TimeUnit.MILLISECONDS.sleep(100L * (long) Math.pow(2, retryNum)); } catch (InterruptedException e) { LOGGER.error("Retry wait failed.", e); } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncDataNodeClientPool.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncDataNodeClientPool.java new file mode 100644 index 000000000000..22f184a3455f --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/SyncDataNodeClientPool.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.client; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidatePermissionCacheReq; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** Synchronously send RPC requests to DataNodes. See mpp.thrift for more details. */ +public class SyncDataNodeClientPool { + + private static final Logger LOGGER = LoggerFactory.getLogger(SyncDataNodeClientPool.class); + + private final IClientManager clientManager; + + private SyncDataNodeClientPool() { + clientManager = + new IClientManager.Factory() + .createClientManager( + new ConfigNodeClientPoolFactory.SyncDataNodeInternalServiceClientPoolFactory()); + } + + public TSStatus invalidatePartitionCache( + TEndPoint endPoint, TInvalidateCacheReq invalidateCacheReq) { + TSStatus status; + try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { + status = client.invalidatePartitionCache(invalidateCacheReq); + LOGGER.info("Invalid Schema Cache {} successfully", invalidateCacheReq); + } catch (IOException e) { + LOGGER.error("Can't connect to DataNode {}", endPoint, e); + status = new TSStatus(TSStatusCode.TIME_OUT.getStatusCode()); + } catch (TException e) { + LOGGER.error("Invalid Schema Cache on DataNode {} failed", endPoint, e); + status = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + } + return status; + } + + public TSStatus invalidateSchemaCache( + TEndPoint endPoint, TInvalidateCacheReq invalidateCacheReq) { + TSStatus status; + try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { + status = client.invalidateSchemaCache(invalidateCacheReq); + } catch (IOException e) { + LOGGER.error("Can't connect to DataNode {}", endPoint, e); + status = new TSStatus(TSStatusCode.TIME_OUT.getStatusCode()); + } catch (TException e) { + LOGGER.error("Invalid Schema Cache on DataNode {} failed", endPoint, e); + status = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + } + return status; + } + + public void deleteRegions(Set deletedRegionSet) { + Map> regionInfoMap = new HashMap<>(); + deletedRegionSet.forEach( + (tRegionReplicaSet) -> { + for (TDataNodeLocation dataNodeLocation : tRegionReplicaSet.getDataNodeLocations()) { + regionInfoMap + .computeIfAbsent(dataNodeLocation, k -> new ArrayList<>()) + .add(tRegionReplicaSet.getRegionId()); + } + }); + LOGGER.info("Current regionInfoMap {} ", regionInfoMap); + regionInfoMap.forEach( + (dataNodeLocation, regionIds) -> + deleteRegions(dataNodeLocation.getInternalEndPoint(), regionIds, deletedRegionSet)); + } + + private void deleteRegions( + TEndPoint endPoint, + List regionIds, + Set deletedRegionSet) { + try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { + for (TConsensusGroupId regionId : regionIds) { + LOGGER.debug("Delete region {} ", regionId); + final TSStatus status = client.deleteRegion(regionId); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.info("DELETE Region {} successfully", regionId); + deletedRegionSet.removeIf(k -> k.getRegionId().equals(regionId)); + } + } + } catch (IOException e) { + LOGGER.error("Can't connect to DataNode {}", endPoint, e); + } catch (TException e) { + LOGGER.error("Delete Region on DataNode {} failed", endPoint, e); + } + } + + public TSStatus invalidatePermissionCache( + TEndPoint endPoint, TInvalidatePermissionCacheReq invalidatePermissionCacheReq) { + TSStatus status; + try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) { + status = client.invalidatePermissionCache(invalidatePermissionCacheReq); + } catch (IOException e) { + LOGGER.error("Can't connect to DataNode {}", endPoint, e); + status = new TSStatus(TSStatusCode.TIME_OUT.getStatusCode()); + } catch (TException e) { + LOGGER.error("Invalid Permission Cache on DataNode {} failed", endPoint, e); + status = new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + } + return status; + } + + // TODO: Is the ClientPool must be a singleton? + private static class ClientPoolHolder { + + private static final SyncDataNodeClientPool INSTANCE = new SyncDataNodeClientPool(); + + private ClientPoolHolder() { + // Empty constructor + } + } + + public static SyncDataNodeClientPool getInstance() { + return ClientPoolHolder.INSTANCE; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FlushHandler.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FlushHandler.java new file mode 100644 index 000000000000..e45ec5852508 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FlushHandler.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.client.handlers; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.async.AsyncMethodCallback; + +import java.util.List; +import java.util.concurrent.CountDownLatch; + +public class FlushHandler implements AsyncMethodCallback { + + private final TDataNodeLocation dataNodeLocation; + private final CountDownLatch countDownLatch; + private final List dataNodeResponseStatus; + + public FlushHandler( + TDataNodeLocation dataNodeLocation, + CountDownLatch countDownLatch, + List dataNodeResponseStatus) { + this.dataNodeLocation = dataNodeLocation; + this.countDownLatch = countDownLatch; + this.dataNodeResponseStatus = dataNodeResponseStatus; + } + + @Override + public void onComplete(TSStatus response) { + countDownLatch.countDown(); + dataNodeResponseStatus.add(response); + } + + @Override + public void onError(Exception exception) { + countDownLatch.countDown(); + dataNodeResponseStatus.add( + new TSStatus( + RpcUtils.getStatus( + TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode(), + "Flush error on DataNode: {id=" + + dataNodeLocation.getDataNodeId() + + ", internalEndPoint=" + + dataNodeLocation.getInternalEndPoint() + + "}" + + exception.getMessage()))); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FunctionManagementHandler.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FunctionManagementHandler.java new file mode 100644 index 000000000000..6ec4b4f06f87 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/FunctionManagementHandler.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.client.handlers; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.async.AsyncMethodCallback; + +import java.util.List; +import java.util.concurrent.CountDownLatch; + +public class FunctionManagementHandler implements AsyncMethodCallback { + + private final CountDownLatch countDownLatch; + private final List dataNodeResponseStatus; + private final String ip; + private final int port; + + public FunctionManagementHandler( + CountDownLatch countDownLatch, List dataNodeResponseStatus, String ip, int port) { + this.countDownLatch = countDownLatch; + this.dataNodeResponseStatus = dataNodeResponseStatus; + this.ip = ip; + this.port = port; + } + + @Override + public void onComplete(TSStatus response) { + countDownLatch.countDown(); + dataNodeResponseStatus.add(response); + } + + @Override + public void onError(Exception exception) { + countDownLatch.countDown(); + dataNodeResponseStatus.add( + new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage("DataNode[" + ip + ":" + port + "] " + exception.getMessage())); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/HeartbeatHandler.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/HeartbeatHandler.java new file mode 100644 index 000000000000..e4a4a27d55b0 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/handlers/HeartbeatHandler.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.client.handlers; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.confignode.manager.load.heartbeat.HeartbeatCache; +import org.apache.iotdb.confignode.manager.load.heartbeat.HeartbeatPackage; +import org.apache.iotdb.mpp.rpc.thrift.THeartbeatResp; + +import org.apache.thrift.async.AsyncMethodCallback; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HeartbeatHandler implements AsyncMethodCallback { + + private static final Logger LOGGER = LoggerFactory.getLogger(HeartbeatHandler.class); + + // Update HeartbeatCache when success + private final TDataNodeLocation dataNodeLocation; + private final HeartbeatCache heartbeatCache; + + public HeartbeatHandler(TDataNodeLocation dataNodeLocation, HeartbeatCache heartbeatCache) { + this.dataNodeLocation = dataNodeLocation; + this.heartbeatCache = heartbeatCache; + } + + @Override + public void onComplete(THeartbeatResp tHeartbeatResp) { + heartbeatCache.cacheHeartBeat( + new HeartbeatPackage(tHeartbeatResp.getHeartbeatTimestamp(), System.currentTimeMillis())); + } + + @Override + public void onError(Exception e) { + LOGGER.warn( + "Heartbeat error on DataNode: {id={}, internalEndPoint={}}", + dataNodeLocation.getDataNodeId(), + dataNodeLocation.getInternalEndPoint(), + e); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConf.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConf.java deleted file mode 100644 index 96c481b02750..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConf.java +++ /dev/null @@ -1,384 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.conf; - -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.commons.conf.IoTDBConstant; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; -import org.apache.iotdb.rpc.RpcUtils; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -public class ConfigNodeConf { - - /** could set ip or hostname */ - private String rpcAddress = "0.0.0.0"; - - /** used for communication between data node and config node */ - private int rpcPort = 22277; - - /** used for communication between config node and config node */ - private int consensusPort = 22278; - - /** Used for connecting to the ConfigNodeGroup */ - private TEndPoint targetConfigNode = new TEndPoint("0.0.0.0", 22277); - - /** Mark if the ConfigNode needs to apply */ - private boolean needApply = false; - - // TODO: Read from iotdb-confignode.properties - private int partitionRegionId = 0; - - /** Used for building the PartitionRegion */ - private List configNodeList = new ArrayList<>(); - - /** Thrift socket and connection timeout between nodes */ - private int connectionTimeoutInMS = (int) TimeUnit.SECONDS.toMillis(20); - - /** ConfigNodeGroup consensus protocol */ - private final String configNodeConsensusProtocolClass = - "org.apache.iotdb.consensus.ratis.RatisConsensus"; - - /** DataNode Regions consensus protocol */ - private String dataNodeConsensusProtocolClass = "org.apache.iotdb.consensus.ratis.RatisConsensus"; - - /** - * ClientManager will have so many selector threads (TAsyncClientManager) to distribute to its - * clients. - */ - private int selectorNumOfClientManager = - Runtime.getRuntime().availableProcessors() / 4 > 0 - ? Runtime.getRuntime().availableProcessors() / 4 - : 1; - - /** Number of SeriesPartitionSlots per StorageGroup */ - private int seriesPartitionSlotNum = 10000; - - /** SeriesPartitionSlot executor class */ - private String seriesPartitionExecutorClass = - "org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor"; - - /** Max concurrent client number */ - private int rpcMaxConcurrentClientNum = 65535; - - /** whether to use thrift compression. */ - private boolean isRpcThriftCompressionEnabled = false; - - /** whether to use Snappy compression before sending data through the network */ - private boolean rpcAdvancedCompressionEnable = false; - - /** max frame size */ - private int thriftMaxFrameSize = 536870912; - - /** buffer size */ - private int thriftDefaultBufferSize = RpcUtils.THRIFT_DEFAULT_BUF_CAPACITY; - - /** just for test wait for 60 second by default. */ - private int thriftServerAwaitTimeForStopService = 60; - - /** System directory, including version file for each storage group and metadata */ - private String systemDir = - ConfigNodeConstant.DATA_DIR + File.separator + IoTDBConstant.SYSTEM_FOLDER_NAME; - - /** Consensus directory, storage consensus protocol logs */ - private String consensusDir = - ConfigNodeConstant.DATA_DIR + File.separator + ConfigNodeConstant.CONSENSUS_FOLDER; - - /** Time partition interval in seconds */ - private long timePartitionInterval = 604800; - - /** Default number of SchemaRegion replicas */ - private int schemaReplicationFactor = 3; - - /** Default number of DataRegion replicas */ - private int dataReplicationFactor = 3; - - /** The maximum number of SchemaRegions of each StorageGroup */ - private int maximumSchemaRegionCount = 4; - - /** The maximum number of DataRegions of each StorageGroup */ - private int maximumDataRegionCount = 20; - - /** Procedure Evict ttl */ - private int procedureCompletedEvictTTL = 800; - - /** Procedure completed clean interval */ - private int procedureCompletedCleanInterval = 30; - - /** Procedure core worker threads size */ - private int procedureCoreWorkerThreadsSize = - Math.max(Runtime.getRuntime().availableProcessors() / 4, 16); - - ConfigNodeConf() { - // empty constructor - } - - public void updatePath() { - formulateFolders(); - } - - private void formulateFolders() { - systemDir = addHomeDir(systemDir); - consensusDir = addHomeDir(consensusDir); - } - - private String addHomeDir(String dir) { - String homeDir = System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null); - if (!new File(dir).isAbsolute() && homeDir != null && homeDir.length() > 0) { - if (!homeDir.endsWith(File.separator)) { - dir = homeDir + File.separatorChar + dir; - } else { - dir = homeDir + dir; - } - } - return dir; - } - - public String getRpcAddress() { - return rpcAddress; - } - - public void setRpcAddress(String rpcAddress) { - this.rpcAddress = rpcAddress; - } - - public int getRpcPort() { - return rpcPort; - } - - public void setRpcPort(int rpcPort) { - this.rpcPort = rpcPort; - } - - public int getConsensusPort() { - return consensusPort; - } - - public void setConsensusPort(int consensusPort) { - this.consensusPort = consensusPort; - } - - public boolean isNeedApply() { - return needApply; - } - - public void setNeedApply(boolean needApply) { - this.needApply = needApply; - } - - public TEndPoint getTargetConfigNode() { - return targetConfigNode; - } - - public void setTargetConfigNode(TEndPoint targetConfigNode) { - this.targetConfigNode = targetConfigNode; - } - - public int getPartitionRegionId() { - return partitionRegionId; - } - - public void setPartitionRegionId(int partitionRegionId) { - this.partitionRegionId = partitionRegionId; - } - - public List getConfigNodeList() { - return configNodeList; - } - - public void setConfigNodeList(List configNodeList) { - this.configNodeList = configNodeList; - } - - public int getSeriesPartitionSlotNum() { - return seriesPartitionSlotNum; - } - - public void setSeriesPartitionSlotNum(int seriesPartitionSlotNum) { - this.seriesPartitionSlotNum = seriesPartitionSlotNum; - } - - public String getSeriesPartitionExecutorClass() { - return seriesPartitionExecutorClass; - } - - public void setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass) { - this.seriesPartitionExecutorClass = seriesPartitionExecutorClass; - } - - public int getSelectorNumOfClientManager() { - return selectorNumOfClientManager; - } - - public long getTimePartitionInterval() { - return timePartitionInterval; - } - - public void setTimePartitionInterval(long timePartitionInterval) { - this.timePartitionInterval = timePartitionInterval; - } - - public int getRpcMaxConcurrentClientNum() { - return rpcMaxConcurrentClientNum; - } - - public void setRpcMaxConcurrentClientNum(int rpcMaxConcurrentClientNum) { - this.rpcMaxConcurrentClientNum = rpcMaxConcurrentClientNum; - } - - public boolean isRpcThriftCompressionEnabled() { - return isRpcThriftCompressionEnabled; - } - - public void setRpcThriftCompressionEnabled(boolean rpcThriftCompressionEnabled) { - isRpcThriftCompressionEnabled = rpcThriftCompressionEnabled; - } - - public boolean isRpcAdvancedCompressionEnable() { - return rpcAdvancedCompressionEnable; - } - - public void setRpcAdvancedCompressionEnable(boolean rpcAdvancedCompressionEnable) { - this.rpcAdvancedCompressionEnable = rpcAdvancedCompressionEnable; - } - - public int getThriftMaxFrameSize() { - return thriftMaxFrameSize; - } - - public void setThriftMaxFrameSize(int thriftMaxFrameSize) { - this.thriftMaxFrameSize = thriftMaxFrameSize; - } - - public int getThriftDefaultBufferSize() { - return thriftDefaultBufferSize; - } - - public void setThriftDefaultBufferSize(int thriftDefaultBufferSize) { - this.thriftDefaultBufferSize = thriftDefaultBufferSize; - } - - public int getConnectionTimeoutInMS() { - return connectionTimeoutInMS; - } - - public ConfigNodeConf setConnectionTimeoutInMS(int connectionTimeoutInMS) { - this.connectionTimeoutInMS = connectionTimeoutInMS; - return this; - } - - public void setSelectorNumOfClientManager(int selectorNumOfClientManager) { - this.selectorNumOfClientManager = selectorNumOfClientManager; - } - - public String getConsensusDir() { - return consensusDir; - } - - public void setConsensusDir(String consensusDir) { - this.consensusDir = consensusDir; - } - - public String getConfigNodeConsensusProtocolClass() { - return configNodeConsensusProtocolClass; - } - - public String getDataNodeConsensusProtocolClass() { - return dataNodeConsensusProtocolClass; - } - - public void setDataNodeConsensusProtocolClass(String dataNodeConsensusProtocolClass) { - this.dataNodeConsensusProtocolClass = dataNodeConsensusProtocolClass; - } - - public int getThriftServerAwaitTimeForStopService() { - return thriftServerAwaitTimeForStopService; - } - - public void setThriftServerAwaitTimeForStopService(int thriftServerAwaitTimeForStopService) { - this.thriftServerAwaitTimeForStopService = thriftServerAwaitTimeForStopService; - } - - public String getSystemDir() { - return systemDir; - } - - public void setSystemDir(String systemDir) { - this.systemDir = systemDir; - } - - public int getSchemaReplicationFactor() { - return schemaReplicationFactor; - } - - public void setSchemaReplicationFactor(int schemaReplicationFactor) { - this.schemaReplicationFactor = schemaReplicationFactor; - } - - public int getDataReplicationFactor() { - return dataReplicationFactor; - } - - public void setDataReplicationFactor(int dataReplicationFactor) { - this.dataReplicationFactor = dataReplicationFactor; - } - - public int getMaximumSchemaRegionCount() { - return maximumSchemaRegionCount; - } - - public void setMaximumSchemaRegionCount(int maximumSchemaRegionCount) { - this.maximumSchemaRegionCount = maximumSchemaRegionCount; - } - - public int getMaximumDataRegionCount() { - return maximumDataRegionCount; - } - - public void setMaximumDataRegionCount(int maximumDataRegionCount) { - this.maximumDataRegionCount = maximumDataRegionCount; - } - - public int getProcedureCompletedEvictTTL() { - return procedureCompletedEvictTTL; - } - - public void setProcedureCompletedEvictTTL(int procedureCompletedEvictTTL) { - this.procedureCompletedEvictTTL = procedureCompletedEvictTTL; - } - - public int getProcedureCompletedCleanInterval() { - return procedureCompletedCleanInterval; - } - - public void setProcedureCompletedCleanInterval(int procedureCompletedCleanInterval) { - this.procedureCompletedCleanInterval = procedureCompletedCleanInterval; - } - - public int getProcedureCoreWorkerThreadsSize() { - return procedureCoreWorkerThreadsSize; - } - - public void setProcedureCoreWorkerThreadsSize(int procedureCoreWorkerThreadsSize) { - this.procedureCoreWorkerThreadsSize = procedureCoreWorkerThreadsSize; - } -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java new file mode 100644 index 000000000000..a9ad34ead083 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConfig.java @@ -0,0 +1,422 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.conf; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.rpc.RpcUtils; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class ConfigNodeConfig { + + /** could set ip or hostname */ + private String rpcAddress = "0.0.0.0"; + + /** used for communication between data node and config node */ + private int rpcPort = 22277; + + /** used for communication between config node and config node */ + private int consensusPort = 22278; + + /** Used for connecting to the ConfigNodeGroup */ + private TEndPoint targetConfigNode = new TEndPoint("0.0.0.0", 22277); + + /** Mark if the ConfigNode needs to apply */ + private boolean needApply = false; + + // TODO: Read from iotdb-confignode.properties + private int partitionRegionId = 0; + + /** Used for building the PartitionRegion */ + private List configNodeList = new ArrayList<>(); + + /** Thrift socket and connection timeout between nodes */ + private int connectionTimeoutInMS = (int) TimeUnit.SECONDS.toMillis(20); + + /** ConfigNodeGroup consensus protocol */ + private String configNodeConsensusProtocolClass = ConsensusFactory.RatisConsensus; + + /** DataNode data region consensus protocol */ + private String dataRegionConsensusProtocolClass = ConsensusFactory.StandAloneConsensus; + + /** DataNode schema region consensus protocol */ + private String schemaRegionConsensusProtocolClass = ConsensusFactory.StandAloneConsensus; + + /** + * ClientManager will have so many selector threads (TAsyncClientManager) to distribute to its + * clients. + */ + private int selectorNumOfClientManager = + Runtime.getRuntime().availableProcessors() / 4 > 0 + ? Runtime.getRuntime().availableProcessors() / 4 + : 1; + + /** Number of SeriesPartitionSlots per StorageGroup */ + private int seriesPartitionSlotNum = 10000; + + /** SeriesPartitionSlot executor class */ + private String seriesPartitionExecutorClass = + "org.apache.iotdb.commons.partition.executor.hash.BKDRHashExecutor"; + + /** Max concurrent client number */ + private int rpcMaxConcurrentClientNum = 65535; + + /** whether to use thrift compression. */ + private boolean isRpcThriftCompressionEnabled = false; + + /** whether to use Snappy compression before sending data through the network */ + private boolean rpcAdvancedCompressionEnable = false; + + /** max frame size */ + private int thriftMaxFrameSize = 536870912; + + /** buffer size */ + private int thriftDefaultBufferSize = RpcUtils.THRIFT_DEFAULT_BUF_CAPACITY; + + /** just for test wait for 60 second by default. */ + private int thriftServerAwaitTimeForStopService = 60; + + /** System directory, including version file for each storage group and metadata */ + private String systemDir = + ConfigNodeConstant.DATA_DIR + File.separator + IoTDBConstant.SYSTEM_FOLDER_NAME; + + /** Consensus directory, storage consensus protocol logs */ + private String consensusDir = + ConfigNodeConstant.DATA_DIR + File.separator + ConfigNodeConstant.CONSENSUS_FOLDER; + + /** External lib directory, stores user-uploaded JAR files */ + private String extLibDir = IoTDBConstant.EXT_FOLDER_NAME; + + /** External lib directory for UDF, stores user-uploaded JAR files */ + private String udfLibDir = + IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.UDF_FOLDER_NAME; + + /** External temporary lib directory for storing downloaded JAR files */ + private String temporaryLibDir = + IoTDBConstant.EXT_FOLDER_NAME + File.separator + IoTDBConstant.TMP_FOLDER_NAME; + + /** Time partition interval in seconds */ + private long timePartitionInterval = 604800; + + /** Default number of SchemaRegion replicas */ + private int schemaReplicationFactor = 1; + + /** Default number of DataRegion replicas */ + private int dataReplicationFactor = 1; + + /** Procedure Evict ttl */ + private int procedureCompletedEvictTTL = 800; + + /** Procedure completed clean interval */ + private int procedureCompletedCleanInterval = 30; + + /** Procedure core worker threads size */ + private int procedureCoreWorkerThreadsSize = + Math.max(Runtime.getRuntime().availableProcessors() / 4, 16); + + /** The heartbeat interval in milliseconds */ + private long heartbeatInterval = 1000; + + ConfigNodeConfig() { + // empty constructor + } + + public void updatePath() { + formulateFolders(); + } + + private void formulateFolders() { + systemDir = addHomeDir(systemDir); + consensusDir = addHomeDir(consensusDir); + extLibDir = addHomeDir(extLibDir); + udfLibDir = addHomeDir(udfLibDir); + temporaryLibDir = addHomeDir(temporaryLibDir); + } + + private String addHomeDir(String dir) { + String homeDir = System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null); + if (!new File(dir).isAbsolute() && homeDir != null && homeDir.length() > 0) { + if (!homeDir.endsWith(File.separator)) { + dir = homeDir + File.separatorChar + dir; + } else { + dir = homeDir + dir; + } + } + return dir; + } + + public String getRpcAddress() { + return rpcAddress; + } + + public void setRpcAddress(String rpcAddress) { + this.rpcAddress = rpcAddress; + } + + public int getRpcPort() { + return rpcPort; + } + + public void setRpcPort(int rpcPort) { + this.rpcPort = rpcPort; + } + + public int getConsensusPort() { + return consensusPort; + } + + public void setConsensusPort(int consensusPort) { + this.consensusPort = consensusPort; + } + + public boolean isNeedApply() { + return needApply; + } + + public void setNeedApply(boolean needApply) { + this.needApply = needApply; + } + + public TEndPoint getTargetConfigNode() { + return targetConfigNode; + } + + public void setTargetConfigNode(TEndPoint targetConfigNode) { + this.targetConfigNode = targetConfigNode; + } + + public int getPartitionRegionId() { + return partitionRegionId; + } + + public void setPartitionRegionId(int partitionRegionId) { + this.partitionRegionId = partitionRegionId; + } + + public List getConfigNodeList() { + return configNodeList; + } + + public void setConfigNodeList(List configNodeList) { + this.configNodeList = configNodeList; + } + + public int getSeriesPartitionSlotNum() { + return seriesPartitionSlotNum; + } + + public void setSeriesPartitionSlotNum(int seriesPartitionSlotNum) { + this.seriesPartitionSlotNum = seriesPartitionSlotNum; + } + + public String getSeriesPartitionExecutorClass() { + return seriesPartitionExecutorClass; + } + + public void setSeriesPartitionExecutorClass(String seriesPartitionExecutorClass) { + this.seriesPartitionExecutorClass = seriesPartitionExecutorClass; + } + + public int getSelectorNumOfClientManager() { + return selectorNumOfClientManager; + } + + public long getTimePartitionInterval() { + return timePartitionInterval; + } + + public void setTimePartitionInterval(long timePartitionInterval) { + this.timePartitionInterval = timePartitionInterval; + } + + public int getRpcMaxConcurrentClientNum() { + return rpcMaxConcurrentClientNum; + } + + public void setRpcMaxConcurrentClientNum(int rpcMaxConcurrentClientNum) { + this.rpcMaxConcurrentClientNum = rpcMaxConcurrentClientNum; + } + + public boolean isRpcThriftCompressionEnabled() { + return isRpcThriftCompressionEnabled; + } + + public void setRpcThriftCompressionEnabled(boolean rpcThriftCompressionEnabled) { + isRpcThriftCompressionEnabled = rpcThriftCompressionEnabled; + } + + public boolean isRpcAdvancedCompressionEnable() { + return rpcAdvancedCompressionEnable; + } + + public void setRpcAdvancedCompressionEnable(boolean rpcAdvancedCompressionEnable) { + this.rpcAdvancedCompressionEnable = rpcAdvancedCompressionEnable; + } + + public int getThriftMaxFrameSize() { + return thriftMaxFrameSize; + } + + public void setThriftMaxFrameSize(int thriftMaxFrameSize) { + this.thriftMaxFrameSize = thriftMaxFrameSize; + } + + public int getThriftDefaultBufferSize() { + return thriftDefaultBufferSize; + } + + public void setThriftDefaultBufferSize(int thriftDefaultBufferSize) { + this.thriftDefaultBufferSize = thriftDefaultBufferSize; + } + + public int getConnectionTimeoutInMS() { + return connectionTimeoutInMS; + } + + public ConfigNodeConfig setConnectionTimeoutInMS(int connectionTimeoutInMS) { + this.connectionTimeoutInMS = connectionTimeoutInMS; + return this; + } + + public void setSelectorNumOfClientManager(int selectorNumOfClientManager) { + this.selectorNumOfClientManager = selectorNumOfClientManager; + } + + public String getConsensusDir() { + return consensusDir; + } + + public void setConsensusDir(String consensusDir) { + this.consensusDir = consensusDir; + } + + public String getConfigNodeConsensusProtocolClass() { + return configNodeConsensusProtocolClass; + } + + public void setConfigNodeConsensusProtocolClass(String configNodeConsensusProtocolClass) { + this.configNodeConsensusProtocolClass = configNodeConsensusProtocolClass; + } + + public String getDataRegionConsensusProtocolClass() { + return dataRegionConsensusProtocolClass; + } + + public void setDataRegionConsensusProtocolClass(String dataRegionConsensusProtocolClass) { + this.dataRegionConsensusProtocolClass = dataRegionConsensusProtocolClass; + } + + public String getSchemaRegionConsensusProtocolClass() { + return schemaRegionConsensusProtocolClass; + } + + public void setSchemaRegionConsensusProtocolClass(String schemaRegionConsensusProtocolClass) { + this.schemaRegionConsensusProtocolClass = schemaRegionConsensusProtocolClass; + } + + public int getThriftServerAwaitTimeForStopService() { + return thriftServerAwaitTimeForStopService; + } + + public void setThriftServerAwaitTimeForStopService(int thriftServerAwaitTimeForStopService) { + this.thriftServerAwaitTimeForStopService = thriftServerAwaitTimeForStopService; + } + + public String getSystemDir() { + return systemDir; + } + + public void setSystemDir(String systemDir) { + this.systemDir = systemDir; + } + + public String getSystemUdfDir() { + return getSystemDir() + File.separator + "udf" + File.separator; + } + + public String getUdfLibDir() { + return udfLibDir; + } + + public void setUdfLibDir(String udfLibDir) { + this.udfLibDir = udfLibDir; + } + + public String getTemporaryLibDir() { + return temporaryLibDir; + } + + public void setTemporaryLibDir(String temporaryLibDir) { + this.temporaryLibDir = temporaryLibDir; + } + + public int getSchemaReplicationFactor() { + return schemaReplicationFactor; + } + + public void setSchemaReplicationFactor(int schemaReplicationFactor) { + this.schemaReplicationFactor = schemaReplicationFactor; + } + + public int getDataReplicationFactor() { + return dataReplicationFactor; + } + + public void setDataReplicationFactor(int dataReplicationFactor) { + this.dataReplicationFactor = dataReplicationFactor; + } + + public int getProcedureCompletedEvictTTL() { + return procedureCompletedEvictTTL; + } + + public void setProcedureCompletedEvictTTL(int procedureCompletedEvictTTL) { + this.procedureCompletedEvictTTL = procedureCompletedEvictTTL; + } + + public int getProcedureCompletedCleanInterval() { + return procedureCompletedCleanInterval; + } + + public void setProcedureCompletedCleanInterval(int procedureCompletedCleanInterval) { + this.procedureCompletedCleanInterval = procedureCompletedCleanInterval; + } + + public int getProcedureCoreWorkerThreadsSize() { + return procedureCoreWorkerThreadsSize; + } + + public void setProcedureCoreWorkerThreadsSize(int procedureCoreWorkerThreadsSize) { + this.procedureCoreWorkerThreadsSize = procedureCoreWorkerThreadsSize; + } + + public long getHeartbeatInterval() { + return heartbeatInterval; + } + + public void setHeartbeatInterval(long heartbeatInterval) { + this.heartbeatInterval = heartbeatInterval; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java index 6725351fd437..da179fc8a71a 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeConstant.java @@ -42,6 +42,7 @@ public class ConfigNodeConstant { public static final String DATA_DIR = "data"; public static final String CONF_DIR = "conf"; public static final String CONSENSUS_FOLDER = "consensus"; + public static final String UDF_FOLDER = "udf"; public static final int MIN_SUPPORTED_JDK_VERSION = 8; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java index b1c4e49328fd..dd887c39dad2 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeDescriptor.java @@ -21,6 +21,7 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.exception.BadNodeUrlException; import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.metrics.config.MetricConfigDescriptor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,13 +38,13 @@ public class ConfigNodeDescriptor { private final CommonDescriptor commonDescriptor = CommonDescriptor.getInstance(); - private final ConfigNodeConf conf = new ConfigNodeConf(); + private final ConfigNodeConfig conf = new ConfigNodeConfig(); private ConfigNodeDescriptor() { loadProps(); } - public ConfigNodeConf getConf() { + public ConfigNodeConfig getConf() { return conf; } @@ -91,7 +92,6 @@ else if (!urlString.endsWith(".properties")) { } private void loadProps() { - commonDescriptor.initCommonConfigDir(conf.getSystemDir()); URL url = getPropsUrl(); if (url == null) { LOGGER.warn( @@ -115,7 +115,7 @@ private void loadProps() { Integer.parseInt( properties.getProperty("consensus_port", String.valueOf(conf.getConsensusPort())))); - String targetConfigNode = properties.getProperty("target_confignode", null); + String targetConfigNode = properties.getProperty("config_nodes", null); if (targetConfigNode != null) { conf.setTargetConfigNode(NodeUrlUtils.parseTEndPointUrl(targetConfigNode)); } @@ -129,9 +129,18 @@ private void loadProps() { properties.getProperty( "series_partition_executor_class", conf.getSeriesPartitionExecutorClass())); - conf.setDataNodeConsensusProtocolClass( + conf.setConfigNodeConsensusProtocolClass( properties.getProperty( - "data_node_consensus_protocol_class", conf.getDataNodeConsensusProtocolClass())); + "config_node_consensus_protocol_class", conf.getConfigNodeConsensusProtocolClass())); + + conf.setDataRegionConsensusProtocolClass( + properties.getProperty( + "data_region_consensus_protocol_class", conf.getDataRegionConsensusProtocolClass())); + + conf.setSchemaRegionConsensusProtocolClass( + properties.getProperty( + "schema_region_consensus_protocol_class", + conf.getSchemaRegionConsensusProtocolClass())); conf.setRpcAdvancedCompressionEnable( Boolean.parseBoolean( @@ -176,6 +185,11 @@ private void loadProps() { conf.setConsensusDir(properties.getProperty("consensus_dir", conf.getConsensusDir())); + conf.setUdfLibDir(properties.getProperty("udf_lib_dir", conf.getUdfLibDir())); + + conf.setTemporaryLibDir( + properties.getProperty("temporary_lib_dir", conf.getTemporaryLibDir())); + conf.setTimePartitionInterval( Long.parseLong( properties.getProperty( @@ -191,16 +205,10 @@ private void loadProps() { properties.getProperty( "data_replication_factor", String.valueOf(conf.getDataReplicationFactor())))); - conf.setMaximumSchemaRegionCount( - Integer.parseInt( - properties.getProperty( - "maximum_schema_region_count", - String.valueOf(conf.getMaximumSchemaRegionCount())))); - - conf.setMaximumDataRegionCount( - Integer.parseInt( + conf.setHeartbeatInterval( + Long.parseLong( properties.getProperty( - "maximum_data_region_count", String.valueOf(conf.getMaximumDataRegionCount())))); + "heartbeat_interval", String.valueOf(conf.getHeartbeatInterval())))); // commons commonDescriptor.loadCommonProps(properties); @@ -231,6 +239,9 @@ private void loadProps() { commonDescriptor .getConfig() .updatePath(System.getProperty(ConfigNodeConstant.CONFIGNODE_HOME, null)); + MetricConfigDescriptor.getInstance() + .getMetricConfig() + .updateRpcInstance(conf.getRpcAddress(), conf.getRpcPort()); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java new file mode 100644 index 000000000000..169e1509f5d9 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeRemoveCheck.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.conf; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.exception.BadNodeUrlException; +import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.confignode.client.SyncConfigNodeClientPool; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.util.List; +import java.util.Properties; + +public class ConfigNodeRemoveCheck { + private static final Logger LOGGER = LoggerFactory.getLogger(ConfigNodeStartupCheck.class); + + private static final ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + + private final File systemPropertiesFile; + private final Properties systemProperties; + + public ConfigNodeRemoveCheck() { + systemPropertiesFile = + new File(conf.getSystemDir() + File.separator + ConfigNodeConstant.SYSTEM_FILE_NAME); + systemProperties = new Properties(); + } + + public TConfigNodeLocation removeCheck(TEndPoint endPoint) { + TConfigNodeLocation nodeLocation = new TConfigNodeLocation(); + if (!systemPropertiesFile.exists()) { + LOGGER.error("The system properties file is not exists. IoTDB-ConfigNode is shutdown."); + return nodeLocation; + } + try (FileInputStream inputStream = new FileInputStream(systemPropertiesFile)) { + systemProperties.load(inputStream); + nodeLocation = + getConfigNodeList().stream() + .filter(e -> e.getInternalEndPoint().equals(endPoint)) + .findFirst() + .get(); + } catch (IOException | BadNodeUrlException e) { + LOGGER.error("Load system properties file failed.", e); + } + + return nodeLocation; + } + + public void removeConfigNode(TConfigNodeLocation nodeLocation) + throws BadNodeUrlException, IOException { + TSStatus status = + SyncConfigNodeClientPool.getInstance().removeConfigNode(getConfigNodeList(), nodeLocation); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + LOGGER.error(status.getMessage()); + throw new IOException("Remove ConfigNode failed:"); + } + } + + public List getConfigNodeList() throws BadNodeUrlException { + return NodeUrlUtils.parseTConfigNodeUrls(systemProperties.getProperty("confignode_list")); + } + + public int getConsensusPort() { + return Integer.parseInt(systemProperties.getProperty("consensus_port")); + } + + private static class ConfigNodeConfRemoveCheckHolder { + + private static final ConfigNodeRemoveCheck INSTANCE = new ConfigNodeRemoveCheck(); + + private ConfigNodeConfRemoveCheckHolder() { + // Empty constructor + } + } + + public static ConfigNodeRemoveCheck getInstance() { + return ConfigNodeRemoveCheck.ConfigNodeConfRemoveCheckHolder.INSTANCE; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java index ca4713adf34a..7a826d20dc5f 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/conf/ConfigNodeStartupCheck.java @@ -18,6 +18,7 @@ */ package org.apache.iotdb.confignode.conf; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.exception.BadNodeUrlException; @@ -25,9 +26,9 @@ import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.utils.NodeUrlUtils; import org.apache.iotdb.confignode.client.SyncConfigNodeClientPool; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; +import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; @@ -49,7 +50,7 @@ public class ConfigNodeStartupCheck { private static final Logger LOGGER = LoggerFactory.getLogger(ConfigNodeStartupCheck.class); - private static final ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); + private static final ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); private final File systemPropertiesFile; private final Properties systemProperties; @@ -82,33 +83,45 @@ public void startUpCheck() throws StartupException, IOException, ConfigurationEx /** Check whether the global configuration of the cluster is correct */ private void checkGlobalConfig() throws ConfigurationException { // When the ConfigNode consensus protocol is set to StandAlone, - // the target_configNode needs to point to itself - if (conf.getConfigNodeConsensusProtocolClass() - .equals("org.apache.iotdb.consensus.standalone.StandAloneConsensus") + // the config_nodes needs to point to itself + if (conf.getConfigNodeConsensusProtocolClass().equals(ConsensusFactory.StandAloneConsensus) && (!conf.getRpcAddress().equals(conf.getTargetConfigNode().getIp()) || conf.getRpcPort() != conf.getTargetConfigNode().getPort())) { throw new ConfigurationException( - "target_confignode", + "config_nodes", conf.getTargetConfigNode().getIp() + ":" + conf.getTargetConfigNode().getPort(), conf.getRpcAddress() + ":" + conf.getRpcPort()); } - // When the DataNode consensus protocol is set to StandAlone, - // the replication factor must be 1 - if (conf.getDataNodeConsensusProtocolClass() - .equals("org.apache.iotdb.consensus.standalone.StandAloneConsensus")) { - if (conf.getSchemaReplicationFactor() != 1) { - throw new ConfigurationException( - "schema_replication_factor", - String.valueOf(conf.getSchemaReplicationFactor()), - String.valueOf(1)); - } - if (conf.getDataReplicationFactor() != 1) { - throw new ConfigurationException( - "data_replication_factor", - String.valueOf(conf.getDataReplicationFactor()), - String.valueOf(1)); - } + // When the data region consensus protocol is set to StandAlone, + // the data replication factor must be 1 + if (conf.getDataRegionConsensusProtocolClass().equals(ConsensusFactory.StandAloneConsensus) + && conf.getDataReplicationFactor() != 1) { + throw new ConfigurationException( + "data_replication_factor", + String.valueOf(conf.getDataReplicationFactor()), + String.valueOf(1)); + } + + // When the schema region consensus protocol is set to StandAlone, + // the schema replication factor must be 1 + if (conf.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.StandAloneConsensus) + && conf.getSchemaReplicationFactor() != 1) { + throw new ConfigurationException( + "schema_replication_factor", + String.valueOf(conf.getSchemaReplicationFactor()), + String.valueOf(1)); + } + + // When the schema region consensus protocol is set to MultiLeaderConsensus, + // we should report an error + if (conf.getSchemaRegionConsensusProtocolClass() + .equals(ConsensusFactory.MultiLeaderConsensus)) { + throw new ConfigurationException( + "schema_region_consensus_protocol_class", + String.valueOf(conf.getSchemaRegionConsensusProtocolClass()), + String.format( + "%s or %s", ConsensusFactory.StandAloneConsensus, ConsensusFactory.RatisConsensus)); } } @@ -118,7 +131,7 @@ private void checkGlobalConfig() throws ConfigurationException { * * @return True if confignode-system.properties doesn't exist. */ - private boolean isFirstStart() throws IOException, StartupException { + private boolean isFirstStart() throws IOException { // If systemDir does not exist, create systemDir File systemDir = new File(conf.getSystemDir()); createDirIfEmpty(systemDir); @@ -130,18 +143,6 @@ private boolean isFirstStart() throws IOException, StartupException { // Check if system properties file exists boolean isFirstStart; if (!systemPropertiesFile.exists()) { - // Create the system properties file when first start the ConfigNode - if (systemPropertiesFile.createNewFile()) { - LOGGER.info( - "System properties file {} for ConfigNode is created.", - systemPropertiesFile.getAbsolutePath()); - } else { - LOGGER.error( - "Can't create the system properties file {} for ConfigNode. IoTDB-ConfigNode is shutdown.", - systemPropertiesFile.getAbsolutePath()); - throw new StartupException("Can't create system properties file"); - } - isFirstStart = true; } else { // Load system properties file @@ -159,7 +160,7 @@ private boolean isFirstStart() throws IOException, StartupException { * Check if the current ConfigNode is SeedConfigNode. If true, do the SeedConfigNode configuration * as well. * - * @return True if the target_confignode points to itself + * @return True if the config_nodes points to itself */ private boolean isSeedConfigNode() { boolean result = @@ -170,6 +171,7 @@ private boolean isSeedConfigNode() { conf.setConfigNodeList( Collections.singletonList( new TConfigNodeLocation( + 0, new TEndPoint(conf.getRpcAddress(), conf.getRpcPort()), new TEndPoint(conf.getRpcAddress(), conf.getConsensusPort())))); } @@ -181,9 +183,11 @@ private void registerConfigNode() throws StartupException { TConfigNodeRegisterReq req = new TConfigNodeRegisterReq( new TConfigNodeLocation( + -1, new TEndPoint(conf.getRpcAddress(), conf.getRpcPort()), new TEndPoint(conf.getRpcAddress(), conf.getConsensusPort())), - conf.getDataNodeConsensusProtocolClass(), + conf.getDataRegionConsensusProtocolClass(), + conf.getSchemaRegionConsensusProtocolClass(), conf.getSeriesPartitionSlotNum(), conf.getSeriesPartitionExecutorClass(), CommonDescriptor.getInstance().getConfig().getDefaultTTL(), @@ -191,13 +195,28 @@ private void registerConfigNode() throws StartupException { conf.getSchemaReplicationFactor(), conf.getDataReplicationFactor()); - TConfigNodeRegisterResp resp = - SyncConfigNodeClientPool.getInstance().registerConfigNode(conf.getTargetConfigNode(), req); - if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - conf.setPartitionRegionId(resp.getPartitionRegionId().getId()); - conf.setConfigNodeList(resp.getConfigNodeList()); - } else { - throw new StartupException("Register ConfigNode failed!"); + TEndPoint targetConfigNode = conf.getTargetConfigNode(); + while (true) { + TConfigNodeRegisterResp resp = + SyncConfigNodeClientPool.getInstance().registerConfigNode(targetConfigNode, req); + if (resp.getStatus().getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + conf.setPartitionRegionId(resp.getPartitionRegionId().getId()); + conf.setConfigNodeList(resp.getConfigNodeList()); + LOGGER.info("ConfigNode registered successfully."); + break; + } else if (resp.getStatus().getCode() == TSStatusCode.NEED_REDIRECTION.getStatusCode()) { + targetConfigNode = resp.getStatus().getRedirectNode(); + LOGGER.info("ConfigNode need redirect to {}.", targetConfigNode); + } else if (resp.getStatus().getCode() == TSStatusCode.ERROR_GLOBAL_CONFIG.getStatusCode()) { + LOGGER.error("Configuration may not be consistent, {}", req); + throw new StartupException("Configuration may not be consistent!"); + } + + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new StartupException("Register ConfigNode failed!"); + } } } @@ -205,7 +224,21 @@ private void registerConfigNode() throws StartupException { * There are some special parameters that can't be changed after a ConfigNode first started. * Therefore, store them in confignode-system.properties during the first startup */ - private void writeSystemProperties() { + private void writeSystemProperties() throws IOException, StartupException { + // Create the system properties file if necessary + if (!systemPropertiesFile.exists()) { + if (systemPropertiesFile.createNewFile()) { + LOGGER.info( + "System properties file {} for ConfigNode is created.", + systemPropertiesFile.getAbsolutePath()); + } else { + LOGGER.error( + "Can't create the system properties file {} for ConfigNode. IoTDB-ConfigNode is shutdown.", + systemPropertiesFile.getAbsolutePath()); + throw new StartupException("Can't create system properties file"); + } + } + // Startup configuration systemProperties.setProperty("rpc_address", String.valueOf(conf.getRpcAddress())); systemProperties.setProperty("rpc_port", String.valueOf(conf.getRpcPort())); @@ -215,7 +248,9 @@ private void writeSystemProperties() { systemProperties.setProperty( "config_node_consensus_protocol_class", conf.getConfigNodeConsensusProtocolClass()); systemProperties.setProperty( - "data_node_consensus_protocol_class", conf.getDataNodeConsensusProtocolClass()); + "data_region_consensus_protocol_class", conf.getDataRegionConsensusProtocolClass()); + systemProperties.setProperty( + "schema_region_consensus_protocol_class", conf.getSchemaRegionConsensusProtocolClass()); // PartitionSlot configuration systemProperties.setProperty( @@ -227,16 +262,24 @@ private void writeSystemProperties() { systemProperties.setProperty( "confignode_list", NodeUrlUtils.convertTConfigNodeUrls(conf.getConfigNodeList())); - try { - systemProperties.store(new FileOutputStream(systemPropertiesFile), ""); + try (FileOutputStream fileOutputStream = new FileOutputStream(systemPropertiesFile)) { + systemProperties.store(fileOutputStream, ""); } catch (IOException e) { + if (!systemPropertiesFile.delete()) { + LOGGER.error( + "Automatically deleting {} failed, please remove it manually.", + systemPropertiesFile.getAbsolutePath()); + } + LOGGER.error( "Can't store system properties file {}.", systemPropertiesFile.getAbsolutePath()); + throw e; } } /** Ensure that special parameters are consistent with each startup except the first one */ - private void checkSystemProperties() throws ConfigurationException { + private void checkSystemProperties() + throws ConfigurationException, IOException, StartupException { boolean needReWrite = false; // Startup configuration @@ -282,15 +325,28 @@ private void checkSystemProperties() throws ConfigurationException { configNodeConsensusProtocolClass); } - String dataNodeConsensusProtocolClass = - systemProperties.getProperty("data_node_consensus_protocol_class", null); - if (dataNodeConsensusProtocolClass == null) { + String dataRegionConsensusProtocolClass = + systemProperties.getProperty("data_region_consensus_protocol_class", null); + if (dataRegionConsensusProtocolClass == null) { + needReWrite = true; + } else if (!dataRegionConsensusProtocolClass.equals( + conf.getDataRegionConsensusProtocolClass())) { + throw new ConfigurationException( + "data_region_consensus_protocol_class", + conf.getDataRegionConsensusProtocolClass(), + dataRegionConsensusProtocolClass); + } + + String schemaRegionConsensusProtocolClass = + systemProperties.getProperty("schema_region_consensus_protocol_class", null); + if (schemaRegionConsensusProtocolClass == null) { needReWrite = true; - } else if (!dataNodeConsensusProtocolClass.equals(conf.getDataNodeConsensusProtocolClass())) { + } else if (!schemaRegionConsensusProtocolClass.equals( + conf.getSchemaRegionConsensusProtocolClass())) { throw new ConfigurationException( - "data_node_consensus_protocol_class", - conf.getDataNodeConsensusProtocolClass(), - dataNodeConsensusProtocolClass); + "schema_region_consensus_protocol_class", + conf.getSchemaRegionConsensusProtocolClass(), + schemaRegionConsensusProtocolClass); } // PartitionSlot configuration @@ -328,7 +384,7 @@ private void checkSystemProperties() throws ConfigurationException { /** Only load ConfigNodeList from confignode-system.properties when restart */ private void loadConfigNodeList() throws StartupException { String addresses = systemProperties.getProperty("confignode_list", null); - if (addresses != null) { + if (addresses != null && !addresses.isEmpty()) { try { conf.setConfigNodeList(NodeUrlUtils.parseTConfigNodeUrls(addresses)); } catch (BadNodeUrlException e) { diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequest.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequest.java index dcef6a91f826..6918470bf056 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequest.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequest.java @@ -22,18 +22,25 @@ import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetNodePathsPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.AdjustMaxRegionGroupCountReq; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.CreateFunctionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteProcedureReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.DropFunctionReq; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; @@ -41,12 +48,14 @@ import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.request.write.UpdateProcedureReq; import org.apache.iotdb.consensus.common.request.IConsensusRequest; +import org.apache.iotdb.db.exception.runtime.SerializationRunTimeException; +import org.apache.iotdb.tsfile.utils.PublicBAOS; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.DataOutputStream; import java.io.IOException; -import java.nio.BufferOverflowException; import java.nio.ByteBuffer; public abstract class ConfigRequest implements IConsensusRequest { @@ -64,29 +73,18 @@ public ConfigRequestType getType() { } @Override - public void serializeRequest(ByteBuffer buffer) { - serialize(buffer); - } - - public final void serialize(ByteBuffer buffer) { - buffer.mark(); - try { - serializeImpl(buffer); - } catch (UnsupportedOperationException e) { - // ignore and throw - throw e; - } catch (BufferOverflowException e) { - buffer.reset(); - throw e; - } catch (Exception e) { - LOGGER.error( - "Rollback buffer entry because error occurs when serializing this physical plan.", e); - buffer.reset(); - throw e; + public ByteBuffer serializeToByteBuffer() { + try (PublicBAOS byteArrayOutputStream = new PublicBAOS(); + DataOutputStream outputStream = new DataOutputStream(byteArrayOutputStream)) { + serializeImpl(outputStream); + return ByteBuffer.wrap(byteArrayOutputStream.getBuf(), 0, byteArrayOutputStream.size()); + } catch (IOException e) { + LOGGER.error("Unexpected error occurs when serializing this ConfigRequest.", e); + throw new SerializationRunTimeException(e); } } - protected abstract void serializeImpl(ByteBuffer buffer); + protected abstract void serializeImpl(DataOutputStream stream) throws IOException; protected abstract void deserializeImpl(ByteBuffer buffer) throws IOException; @@ -121,13 +119,16 @@ public static ConfigRequest create(ByteBuffer buffer) throws IOException { case SetTimePartitionInterval: req = new SetTimePartitionIntervalReq(); break; + case AdjustMaxRegionGroupCount: + req = new AdjustMaxRegionGroupCountReq(); + break; case CountStorageGroup: req = new CountStorageGroupReq(); break; case GetStorageGroup: req = new GetStorageGroupReq(); break; - case CreateRegions: + case CreateRegionGroups: req = new CreateRegionsReq(); break; case DeleteRegions: @@ -157,6 +158,9 @@ public static ConfigRequest create(ByteBuffer buffer) throws IOException { case UpdateProcedure: req = new UpdateProcedureReq(); break; + case PreDeleteStorageGroup: + req = new PreDeleteStorageGroupReq(); + break; case DeleteStorageGroup: req = new DeleteStorageGroupReq(); break; @@ -182,6 +186,21 @@ public static ConfigRequest create(ByteBuffer buffer) throws IOException { case ApplyConfigNode: req = new ApplyConfigNodeReq(); break; + case RemoveConfigNode: + req = new RemoveConfigNodeReq(); + break; + case CreateFunction: + req = new CreateFunctionReq(); + break; + case DropFunction: + req = new DropFunctionReq(); + break; + case GetNodePathsPartition: + req = new GetNodePathsPartitionReq(); + break; + case GetRegionInfoList: + req = new GetRegionInfoListReq(); + break; default: throw new IOException("unknown PhysicalPlan type: " + typeNum); } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestType.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestType.java index d0fd464f4f57..8566e117a79d 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestType.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestType.java @@ -26,10 +26,12 @@ public enum ConfigRequestType { SetSchemaReplicationFactor, SetDataReplicationFactor, SetTimePartitionInterval, + AdjustMaxRegionGroupCount, DeleteStorageGroup, + PreDeleteStorageGroup, GetStorageGroup, CountStorageGroup, - CreateRegions, + CreateRegionGroups, DeleteRegions, GetSchemaPartition, CreateSchemaPartition, @@ -57,5 +59,10 @@ public enum ConfigRequestType { ListRolePrivilege, ListUserRoles, ListRoleUsers, - ApplyConfigNode + ApplyConfigNode, + RemoveConfigNode, + CreateFunction, + DropFunction, + GetNodePathsPartition, + GetRegionInfoList; } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorReq.java index d8f905ea9939..989218565505 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/auth/AuthorReq.java @@ -23,6 +23,8 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashSet; import java.util.Objects; @@ -131,22 +133,22 @@ public void setUserName(String userName) { } @Override - protected void serializeImpl(ByteBuffer buffer) { - BasicStructureSerDeUtil.write(getPlanTypeOrdinal(authorType), buffer); - BasicStructureSerDeUtil.write(userName, buffer); - BasicStructureSerDeUtil.write(roleName, buffer); - BasicStructureSerDeUtil.write(password, buffer); - BasicStructureSerDeUtil.write(newPassword, buffer); + protected void serializeImpl(DataOutputStream stream) throws IOException { + BasicStructureSerDeUtil.write(getPlanTypeOrdinal(authorType), stream); + BasicStructureSerDeUtil.write(userName, stream); + BasicStructureSerDeUtil.write(roleName, stream); + BasicStructureSerDeUtil.write(password, stream); + BasicStructureSerDeUtil.write(newPassword, stream); if (permissions == null) { - buffer.put((byte) 0); + stream.write((byte) 0); } else { - buffer.put((byte) 1); - buffer.putInt(permissions.size()); + stream.write((byte) 1); + stream.writeInt(permissions.size()); for (int permission : permissions) { - buffer.putInt(permission); + stream.writeInt(permission); } } - BasicStructureSerDeUtil.write(nodeName, buffer); + BasicStructureSerDeUtil.write(nodeName, stream); } @Override @@ -245,4 +247,10 @@ public boolean equals(Object o) { && Objects.equals(permissions, that.permissions) && Objects.equals(nodeName, that.nodeName); } + + @Override + public int hashCode() { + return Objects.hash( + authorType, userName, roleName, password, newPassword, permissions, nodeName); + } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/CountStorageGroupReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/CountStorageGroupReq.java index 5b788d65199c..11dbe355752c 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/CountStorageGroupReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/CountStorageGroupReq.java @@ -22,6 +22,8 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; @@ -53,12 +55,12 @@ public String[] getStorageGroupPattern() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - buffer.putInt(storageGroupPattern.length); + stream.writeInt(storageGroupPattern.length); for (String node : storageGroupPattern) { - BasicStructureSerDeUtil.write(node, buffer); + BasicStructureSerDeUtil.write(node, stream); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataNodeInfoReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataNodeInfoReq.java index 8436814bff91..d48adf21356b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataNodeInfoReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataNodeInfoReq.java @@ -21,6 +21,8 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -43,9 +45,9 @@ public Integer getDataNodeID() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.GetDataNodeInfo.ordinal()); - buffer.putInt(dataNodeID); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.GetDataNodeInfo.ordinal()); + stream.writeInt(dataNodeID); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataPartitionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataPartitionReq.java index 9547e32c0ef6..293974b94998 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataPartitionReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetDataPartitionReq.java @@ -27,11 +27,14 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; /** Get or create DataPartition by the specific partitionSlotsMap. */ @@ -89,24 +92,28 @@ public void convertFromRpcTDataPartitionReq(TDataPartitionReq req) { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); - - buffer.putInt(partitionSlotsMap.size()); - partitionSlotsMap.forEach( - ((storageGroup, seriesPartitionTimePartitionSlots) -> { - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putInt(seriesPartitionTimePartitionSlots.size()); - seriesPartitionTimePartitionSlots.forEach( - ((seriesPartitionSlot, timePartitionSlots) -> { - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, buffer); - buffer.putInt(timePartitionSlots.size()); - timePartitionSlots.forEach( - timePartitionSlot -> - ThriftCommonsSerDeUtils.serializeTTimePartitionSlot( - timePartitionSlot, buffer)); - })); - })); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); + + stream.writeInt(partitionSlotsMap.size()); + for (Entry>> entry : + partitionSlotsMap.entrySet()) { + String storageGroup = entry.getKey(); + Map> seriesPartitionTimePartitionSlots = + entry.getValue(); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeInt(seriesPartitionTimePartitionSlots.size()); + for (Entry> e : + seriesPartitionTimePartitionSlots.entrySet()) { + TSeriesPartitionSlot seriesPartitionSlot = e.getKey(); + List timePartitionSlots = e.getValue(); + ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, stream); + stream.writeInt(timePartitionSlots.size()); + timePartitionSlots.forEach( + timePartitionSlot -> + ThriftCommonsSerDeUtils.serializeTTimePartitionSlot(timePartitionSlot, stream)); + } + } } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetNodePathsPartitionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetNodePathsPartitionReq.java new file mode 100644 index 000000000000..16246d13ed8b --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetNodePathsPartitionReq.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read; + +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.db.metadata.path.PathDeserializeUtil; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class GetNodePathsPartitionReq extends ConfigRequest { + private PartialPath partialPath; + private int level = -1; + + public GetNodePathsPartitionReq() { + super(ConfigRequestType.GetNodePathsPartition); + } + + public PartialPath getPartialPath() { + return partialPath; + } + + public void setPartialPath(PartialPath partialPath) { + this.partialPath = partialPath; + } + + public int getLevel() { + return level; + } + + public void setLevel(int level) { + this.level = level; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + partialPath.serialize(stream); + stream.writeInt(level); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + partialPath = (PartialPath) PathDeserializeUtil.deserialize(buffer); + level = buffer.getInt(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GetNodePathsPartitionReq that = (GetNodePathsPartitionReq) o; + return level == that.level && Objects.equals(partialPath, that.partialPath); + } + + @Override + public int hashCode() { + return Objects.hash(partialPath, level); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetRegionInfoListReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetRegionInfoListReq.java new file mode 100644 index 000000000000..3174a16ddce2 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetRegionInfoListReq.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.read; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class GetRegionInfoListReq extends ConfigRequest { + + private TConsensusGroupType regionType; + + public GetRegionInfoListReq() { + super(ConfigRequestType.GetRegionInfoList); + } + + public GetRegionInfoListReq(TConsensusGroupType regionType) { + super(ConfigRequestType.GetRegionInfoList); + this.regionType = regionType; + } + + public TConsensusGroupType getRegionType() { + return regionType; + } + + public void setRegionType(TConsensusGroupType regionType) { + this.regionType = regionType; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); + ReadWriteIOUtils.write(regionType.ordinal(), stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + regionType = TConsensusGroupType.values()[ReadWriteIOUtils.readInt(buffer)]; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetSchemaPartitionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetSchemaPartitionReq.java index 76b957934f83..0cedb15d197c 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetSchemaPartitionReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/read/GetSchemaPartitionReq.java @@ -24,12 +24,14 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; /** Get or create SchemaPartition by the specific partitionSlotsMap. */ @@ -57,19 +59,19 @@ public Map> getPartitionSlotsMap() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - buffer.putInt(partitionSlotsMap.size()); - partitionSlotsMap.forEach( - (storageGroup, seriesPartitionSlots) -> { - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putInt(seriesPartitionSlots.size()); - seriesPartitionSlots.forEach( - seriesPartitionSlot -> - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot( - seriesPartitionSlot, buffer)); - }); + stream.writeInt(partitionSlotsMap.size()); + for (Entry> entry : partitionSlotsMap.entrySet()) { + String storageGroup = entry.getKey(); + List seriesPartitionSlots = entry.getValue(); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeInt(seriesPartitionSlots.size()); + seriesPartitionSlots.forEach( + seriesPartitionSlot -> + ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, stream)); + } } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/AdjustMaxRegionGroupCountReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/AdjustMaxRegionGroupCountReq.java new file mode 100644 index 000000000000..d8c3b9201001 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/AdjustMaxRegionGroupCountReq.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.consensus.request.write; + +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.tsfile.utils.Pair; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class AdjustMaxRegionGroupCountReq extends ConfigRequest { + + // Map> + public final Map> maxRegionGroupCountMap; + + public AdjustMaxRegionGroupCountReq() { + super(ConfigRequestType.AdjustMaxRegionGroupCount); + this.maxRegionGroupCountMap = new HashMap<>(); + } + + public void putEntry(String storageGroup, Pair maxRegionGroupCount) { + maxRegionGroupCountMap.put(storageGroup, maxRegionGroupCount); + } + + public Map> getMaxRegionGroupCountMap() { + return maxRegionGroupCountMap; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(ConfigRequestType.AdjustMaxRegionGroupCount.ordinal(), stream); + + ReadWriteIOUtils.write(maxRegionGroupCountMap.size(), stream); + for (Map.Entry> maxRegionGroupCountEntry : + maxRegionGroupCountMap.entrySet()) { + ReadWriteIOUtils.write(maxRegionGroupCountEntry.getKey(), stream); + ReadWriteIOUtils.write(maxRegionGroupCountEntry.getValue().getLeft(), stream); + ReadWriteIOUtils.write(maxRegionGroupCountEntry.getValue().getRight(), stream); + } + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + int storageGroupNum = buffer.getInt(); + + for (int i = 0; i < storageGroupNum; i++) { + String storageGroup = ReadWriteIOUtils.readString(buffer); + int maxSchemaRegionGroupCount = buffer.getInt(); + int maxDataRegionGroupCount = buffer.getInt(); + maxRegionGroupCountMap.put( + storageGroup, new Pair<>(maxSchemaRegionGroupCount, maxDataRegionGroupCount)); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AdjustMaxRegionGroupCountReq that = (AdjustMaxRegionGroupCountReq) o; + return maxRegionGroupCountMap.equals(that.maxRegionGroupCountMap); + } + + @Override + public int hashCode() { + return Objects.hash(maxRegionGroupCountMap); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ApplyConfigNodeReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ApplyConfigNodeReq.java index 0a00beff6e01..00e55b636983 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ApplyConfigNodeReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/ApplyConfigNodeReq.java @@ -18,11 +18,13 @@ */ package org.apache.iotdb.confignode.consensus.request.write; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -45,10 +47,10 @@ public TConfigNodeLocation getConfigNodeLocation() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.ApplyConfigNode.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(ConfigRequestType.ApplyConfigNode.ordinal(), stream); - ThriftConfigNodeSerDeUtils.serializeTConfigNodeLocation(configNodeLocation, buffer); + ThriftConfigNodeSerDeUtils.serializeTConfigNodeLocation(configNodeLocation, stream); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateDataPartitionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateDataPartitionReq.java index 7169656c6faf..5ceb01f22b2b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateDataPartitionReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateDataPartitionReq.java @@ -18,100 +18,69 @@ */ package org.apache.iotdb.confignode.consensus.request.write; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.transport.TIOStreamTransport; +import org.apache.thrift.transport.TTransport; + +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; /** Create DataPartition by assignedDataPartition */ public class CreateDataPartitionReq extends ConfigRequest { - private Map>>> - assignedDataPartition; + private Map assignedDataPartition; public CreateDataPartitionReq() { super(ConfigRequestType.CreateDataPartition); } - public Map>>> - getAssignedDataPartition() { + public Map getAssignedDataPartition() { return assignedDataPartition; } - public void setAssignedDataPartition( - Map>>> - assignedDataPartition) { + public void setAssignedDataPartition(Map assignedDataPartition) { this.assignedDataPartition = assignedDataPartition; } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.CreateDataPartition.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + try { + TTransport transport = new TIOStreamTransport(stream); + TBinaryProtocol protocol = new TBinaryProtocol(transport); + + stream.writeInt(ConfigRequestType.CreateDataPartition.ordinal()); - buffer.putInt(assignedDataPartition.size()); - for (Map.Entry< - String, Map>>> - seriesPartitionTimePartitionEntry : assignedDataPartition.entrySet()) { - BasicStructureSerDeUtil.write(seriesPartitionTimePartitionEntry.getKey(), buffer); - buffer.putInt(seriesPartitionTimePartitionEntry.getValue().size()); - for (Map.Entry>> - timePartitionEntry : seriesPartitionTimePartitionEntry.getValue().entrySet()) { - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(timePartitionEntry.getKey(), buffer); - buffer.putInt(timePartitionEntry.getValue().size()); - for (Map.Entry> regionReplicaSetEntry : - timePartitionEntry.getValue().entrySet()) { - ThriftCommonsSerDeUtils.serializeTTimePartitionSlot( - regionReplicaSetEntry.getKey(), buffer); - buffer.putInt(regionReplicaSetEntry.getValue().size()); - for (TRegionReplicaSet regionReplicaSet : regionReplicaSetEntry.getValue()) { - ThriftCommonsSerDeUtils.serializeTRegionReplicaSet(regionReplicaSet, buffer); - } - } + stream.writeInt(assignedDataPartition.size()); + for (Map.Entry dataPartitionTableEntry : + assignedDataPartition.entrySet()) { + BasicStructureSerDeUtil.write(dataPartitionTableEntry.getKey(), stream); + dataPartitionTableEntry.getValue().serialize(stream, protocol); } + } catch (TException e) { + throw new IOException(e); } } @Override protected void deserializeImpl(ByteBuffer buffer) throws IOException { assignedDataPartition = new HashMap<>(); + int storageGroupNum = buffer.getInt(); for (int i = 0; i < storageGroupNum; i++) { - String storageGroupName = BasicStructureSerDeUtil.readString(buffer); - assignedDataPartition.put(storageGroupName, new HashMap<>()); - int seriesPartitionSlotNum = buffer.getInt(); - for (int j = 0; j < seriesPartitionSlotNum; j++) { - TSeriesPartitionSlot seriesPartitionSlot = - ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - assignedDataPartition.get(storageGroupName).put(seriesPartitionSlot, new HashMap<>()); - int timePartitionSlotNum = buffer.getInt(); - for (int k = 0; k < timePartitionSlotNum; k++) { - TTimePartitionSlot timePartitionSlot = - ThriftCommonsSerDeUtils.deserializeTTimePartitionSlot(buffer); - assignedDataPartition - .get(storageGroupName) - .get(seriesPartitionSlot) - .put(timePartitionSlot, new ArrayList<>()); - int regionReplicaSetNum = buffer.getInt(); - for (int l = 0; l < regionReplicaSetNum; l++) { - assignedDataPartition - .get(storageGroupName) - .get(seriesPartitionSlot) - .get(timePartitionSlot) - .add(ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(buffer)); - } - } - } + String storageGroup = BasicStructureSerDeUtil.readString(buffer); + DataPartitionTable dataPartitionTable = new DataPartitionTable(); + dataPartitionTable.deserialize(buffer); + assignedDataPartition.put(storageGroup, dataPartitionTable); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateFunctionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateFunctionReq.java new file mode 100644 index 000000000000..32f23a18787b --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateFunctionReq.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write; + +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class CreateFunctionReq extends ConfigRequest { + + private String functionName; + private String className; + private List uris; + + public CreateFunctionReq() { + super(ConfigRequestType.CreateFunction); + } + + public CreateFunctionReq(String functionName, String className, List uris) { + super(ConfigRequestType.CreateFunction); + this.functionName = functionName; + this.className = className; + this.uris = uris; + } + + public String getFunctionName() { + return functionName; + } + + public String getClassName() { + return className; + } + + public List getUris() { + return uris; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); + + ReadWriteIOUtils.write(functionName, stream); + ReadWriteIOUtils.write(className, stream); + + final int size = uris.size(); + ReadWriteIOUtils.write(size, stream); + for (String uri : uris) { + ReadWriteIOUtils.write(uri, stream); + } + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + functionName = ReadWriteIOUtils.readString(buffer); + className = ReadWriteIOUtils.readString(buffer); + + final int size = ReadWriteIOUtils.readInt(buffer); + uris = new ArrayList<>(size); + for (int i = 0; i < size; ++i) { + uris.add(ReadWriteIOUtils.readString(buffer)); + } + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateRegionsReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateRegionsReq.java index cc8b142721f0..25abedc11fe7 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateRegionsReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateRegionsReq.java @@ -24,50 +24,66 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; import java.util.TreeMap; /** Create regions for specific StorageGroups */ public class CreateRegionsReq extends ConfigRequest { - private final Map regionMap; + // Map> + private final Map> regionGroupMap; public CreateRegionsReq() { - super(ConfigRequestType.CreateRegions); - this.regionMap = new TreeMap<>(); + super(ConfigRequestType.CreateRegionGroups); + this.regionGroupMap = new TreeMap<>(); } - public Map getRegionMap() { - return regionMap; + public Map> getRegionGroupMap() { + return regionGroupMap; } - public void addRegion(String storageGroup, TRegionReplicaSet regionReplicaSet) { - regionMap.put(storageGroup, regionReplicaSet); + public void addRegionGroup(String storageGroup, TRegionReplicaSet regionReplicaSet) { + regionGroupMap + .computeIfAbsent(storageGroup, regionReplicaSets -> new ArrayList<>()) + .add(regionReplicaSet); } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.CreateRegions.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.CreateRegionGroups.ordinal()); - buffer.putInt(regionMap.size()); - regionMap.forEach( - (storageGroup, regionReplicaSet) -> { - BasicStructureSerDeUtil.write(storageGroup, buffer); - ThriftCommonsSerDeUtils.serializeTRegionReplicaSet(regionReplicaSet, buffer); - }); + stream.writeInt(regionGroupMap.size()); + for (Entry> entry : regionGroupMap.entrySet()) { + String storageGroup = entry.getKey(); + List regionReplicaSets = entry.getValue(); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeInt(regionReplicaSets.size()); + regionReplicaSets.forEach( + regionReplicaSet -> + ThriftCommonsSerDeUtils.serializeTRegionReplicaSet(regionReplicaSet, stream)); + } } @Override protected void deserializeImpl(ByteBuffer buffer) throws IOException { - int length = buffer.getInt(); - for (int i = 0; i < length; i++) { + int storageGroupNum = buffer.getInt(); + for (int i = 0; i < storageGroupNum; i++) { String storageGroup = BasicStructureSerDeUtil.readString(buffer); - TRegionReplicaSet regionReplicaSet = - ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(buffer); - regionMap.put(storageGroup, regionReplicaSet); + regionGroupMap.put(storageGroup, new ArrayList<>()); + + int regionReplicaSetNum = buffer.getInt(); + for (int j = 0; j < regionReplicaSetNum; j++) { + TRegionReplicaSet regionReplicaSet = + ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(buffer); + regionGroupMap.get(storageGroup).add(regionReplicaSet); + } } } @@ -76,11 +92,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; CreateRegionsReq that = (CreateRegionsReq) o; - return regionMap.equals(that.regionMap); + return regionGroupMap.equals(that.regionGroupMap); } @Override public int hashCode() { - return Objects.hash(regionMap); + return Objects.hash(regionGroupMap); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateSchemaPartitionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateSchemaPartitionReq.java index 859ac98efd38..f92f0de95209 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateSchemaPartitionReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/CreateSchemaPartitionReq.java @@ -18,13 +18,17 @@ */ package org.apache.iotdb.confignode.consensus.request.write; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; -import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.transport.TIOStreamTransport; +import org.apache.thrift.transport.TTransport; + +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashMap; @@ -34,36 +38,39 @@ /** Create SchemaPartition by assignedSchemaPartition */ public class CreateSchemaPartitionReq extends ConfigRequest { - private Map> assignedSchemaPartition; + // TODO: Replace this field whit new SchemaPartition + private Map assignedSchemaPartition; public CreateSchemaPartitionReq() { super(ConfigRequestType.CreateSchemaPartition); } - public Map> getAssignedSchemaPartition() { + public Map getAssignedSchemaPartition() { return assignedSchemaPartition; } public void setAssignedSchemaPartition( - Map> assignedSchemaPartition) { + Map assignedSchemaPartition) { this.assignedSchemaPartition = assignedSchemaPartition; } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.CreateSchemaPartition.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + try { + TTransport transport = new TIOStreamTransport(stream); + TBinaryProtocol protocol = new TBinaryProtocol(transport); + + stream.writeInt(ConfigRequestType.CreateSchemaPartition.ordinal()); - buffer.putInt(assignedSchemaPartition.size()); - assignedSchemaPartition.forEach( - (storageGroup, partitionSlots) -> { - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putInt(partitionSlots.size()); - partitionSlots.forEach( - (seriesPartitionSlot, regionReplicaSet) -> { - ThriftCommonsSerDeUtils.serializeTSeriesPartitionSlot(seriesPartitionSlot, buffer); - ThriftCommonsSerDeUtils.serializeTRegionReplicaSet(regionReplicaSet, buffer); - }); - }); + stream.writeInt(assignedSchemaPartition.size()); + for (Map.Entry schemaPartitionTableEntry : + assignedSchemaPartition.entrySet()) { + BasicStructureSerDeUtil.write(schemaPartitionTableEntry.getKey(), stream); + schemaPartitionTableEntry.getValue().serialize(stream, protocol); + } + } catch (TException e) { + throw new IOException(e); + } } @Override @@ -73,15 +80,9 @@ protected void deserializeImpl(ByteBuffer buffer) throws IOException { int storageGroupNum = buffer.getInt(); for (int i = 0; i < storageGroupNum; i++) { String storageGroup = BasicStructureSerDeUtil.readString(buffer); - assignedSchemaPartition.put(storageGroup, new HashMap<>()); - int seriesPartitionSlotNum = buffer.getInt(); - for (int j = 0; j < seriesPartitionSlotNum; j++) { - TSeriesPartitionSlot seriesPartitionSlot = - ThriftCommonsSerDeUtils.deserializeTSeriesPartitionSlot(buffer); - assignedSchemaPartition - .get(storageGroup) - .put(seriesPartitionSlot, ThriftCommonsSerDeUtils.deserializeTRegionReplicaSet(buffer)); - } + SchemaPartitionTable schemaPartitionTable = new SchemaPartitionTable(); + schemaPartitionTable.deserialize(buffer); + assignedSchemaPartition.put(storageGroup, schemaPartitionTable); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteProcedureReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteProcedureReq.java index 16b6b9fca828..5a160bb757a1 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteProcedureReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteProcedureReq.java @@ -22,6 +22,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -43,9 +44,9 @@ public DeleteProcedureReq() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.DeleteProcedure.ordinal()); - buffer.putLong(procId); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.DeleteProcedure.ordinal()); + stream.writeLong(procId); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteRegionsReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteRegionsReq.java index 31436bd53abe..ffe2e22a7023 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteRegionsReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteRegionsReq.java @@ -19,40 +19,49 @@ package org.apache.iotdb.confignode.consensus.request.write; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; public class DeleteRegionsReq extends ConfigRequest { - private final List consensusGroupIds; + private final Map> deleteRegionMap; public DeleteRegionsReq() { super(ConfigRequestType.DeleteRegions); - this.consensusGroupIds = new ArrayList<>(); + this.deleteRegionMap = new HashMap<>(); } - public void addConsensusGroupId(TConsensusGroupId consensusGroupId) { - consensusGroupIds.add(consensusGroupId); + public void addDeleteRegion(String name, TConsensusGroupId consensusGroupId) { + deleteRegionMap.computeIfAbsent(name, empty -> new ArrayList<>()).add(consensusGroupId); } - public List getConsensusGroupIds() { - return consensusGroupIds; + public Map> getDeleteRegionMap() { + return deleteRegionMap; } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.DeleteRegions.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.DeleteRegions.ordinal()); - buffer.putInt(consensusGroupIds.size()); - for (TConsensusGroupId consensusGroupId : consensusGroupIds) { - ThriftCommonsSerDeUtils.serializeTConsensusGroupId(consensusGroupId, buffer); + stream.writeInt(deleteRegionMap.size()); + for (Map.Entry> consensusGroupIdsEntry : + deleteRegionMap.entrySet()) { + BasicStructureSerDeUtil.write(consensusGroupIdsEntry.getKey(), stream); + stream.writeInt(consensusGroupIdsEntry.getValue().size()); + for (TConsensusGroupId consensusGroupId : consensusGroupIdsEntry.getValue()) { + ThriftCommonsSerDeUtils.serializeTConsensusGroupId(consensusGroupId, stream); + } } } @@ -60,7 +69,12 @@ protected void serializeImpl(ByteBuffer buffer) { protected void deserializeImpl(ByteBuffer buffer) throws IOException { int length = buffer.getInt(); for (int i = 0; i < length; i++) { - consensusGroupIds.add(ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(buffer)); + String name = BasicStructureSerDeUtil.readString(buffer); + deleteRegionMap.put(name, new ArrayList<>()); + int regionNum = buffer.getInt(); + for (int j = 0; j < regionNum; j++) { + deleteRegionMap.get(name).add(ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(buffer)); + } } } @@ -69,11 +83,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DeleteRegionsReq that = (DeleteRegionsReq) o; - return consensusGroupIds.equals(that.consensusGroupIds); + return deleteRegionMap.equals(that.deleteRegionMap); } @Override public int hashCode() { - return Objects.hash(consensusGroupIds); + return Objects.hash(deleteRegionMap); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteStorageGroupReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteStorageGroupReq.java index 4855a2dff2f5..0baddb62bf9a 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteStorageGroupReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DeleteStorageGroupReq.java @@ -18,40 +18,41 @@ */ package org.apache.iotdb.confignode.consensus.request.write; -import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; +import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; -import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; public class DeleteStorageGroupReq extends ConfigRequest { - private TStorageGroupSchema storageGroup; + private String name; public DeleteStorageGroupReq() { super(ConfigRequestType.DeleteStorageGroup); } - public DeleteStorageGroupReq(TStorageGroupSchema storageGroup) { + public DeleteStorageGroupReq(String name) { this(); - this.storageGroup = storageGroup; + this.name = name; } - public TStorageGroupSchema getStorageGroup() { - return storageGroup; + public String getName() { + return name; } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.DeleteStorageGroup.ordinal()); - ThriftConfigNodeSerDeUtils.serializeTStorageGroupSchema(storageGroup, buffer); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.DeleteStorageGroup.ordinal()); + BasicStructureSerDeUtil.write(name, stream); } @Override protected void deserializeImpl(ByteBuffer buffer) { - storageGroup = ThriftConfigNodeSerDeUtils.deserializeTStorageGroupSchema(buffer); + name = BasicStructureSerDeUtil.readString(buffer); } @Override @@ -59,11 +60,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DeleteStorageGroupReq that = (DeleteStorageGroupReq) o; - return storageGroup.equals(that.storageGroup); + return name.equals(that.name); } @Override public int hashCode() { - return Objects.hash(storageGroup); + return Objects.hash(name); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DropFunctionReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DropFunctionReq.java new file mode 100644 index 000000000000..192d395698c6 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/DropFunctionReq.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write; + +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class DropFunctionReq extends ConfigRequest { + + private String functionName; + + public DropFunctionReq() { + super(ConfigRequestType.DropFunction); + } + + public DropFunctionReq(String functionName) { + super(ConfigRequestType.DropFunction); + this.functionName = functionName; + } + + public String getFunctionName() { + return functionName; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); + ReadWriteIOUtils.write(functionName, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + functionName = ReadWriteIOUtils.readString(buffer); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/PreDeleteStorageGroupReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/PreDeleteStorageGroupReq.java new file mode 100644 index 000000000000..e1dbe5609dc9 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/PreDeleteStorageGroupReq.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.request.write; + +import org.apache.iotdb.commons.utils.BasicStructureSerDeUtil; +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class PreDeleteStorageGroupReq extends ConfigRequest { + private String storageGroup; + private PreDeleteType preDeleteType; + + public PreDeleteStorageGroupReq() { + super(ConfigRequestType.PreDeleteStorageGroup); + } + + public PreDeleteStorageGroupReq(String storageGroup, PreDeleteType preDeleteType) { + this(); + this.storageGroup = storageGroup; + this.preDeleteType = preDeleteType; + } + + public String getStorageGroup() { + return storageGroup; + } + + public void setStorageGroup(String storageGroup) { + this.storageGroup = storageGroup; + } + + public PreDeleteType getPreDeleteType() { + return preDeleteType; + } + + public void setPreDeleteType(PreDeleteType preDeleteType) { + this.preDeleteType = preDeleteType; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.PreDeleteStorageGroup.ordinal()); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.write(preDeleteType.getType()); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + this.storageGroup = BasicStructureSerDeUtil.readString(buffer); + this.preDeleteType = buffer.get() == (byte) 1 ? PreDeleteType.ROLLBACK : PreDeleteType.EXECUTE; + } + + public enum PreDeleteType { + EXECUTE((byte) 0), + ROLLBACK((byte) 1); + + private final byte type; + + PreDeleteType(byte type) { + this.type = type; + } + + public byte getType() { + return type; + } + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RegisterDataNodeReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RegisterDataNodeReq.java index b03fef96a4a5..9017e032b73c 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RegisterDataNodeReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RegisterDataNodeReq.java @@ -18,52 +18,54 @@ */ package org.apache.iotdb.confignode.consensus.request.write; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; public class RegisterDataNodeReq extends ConfigRequest { - private TDataNodeLocation location; + private TDataNodeInfo info; public RegisterDataNodeReq() { super(ConfigRequestType.RegisterDataNode); } - public RegisterDataNodeReq(TDataNodeLocation location) { + public RegisterDataNodeReq(TDataNodeInfo info) { this(); - this.location = location; + this.info = info; } - public TDataNodeLocation getLocation() { - return location; + public TDataNodeInfo getInfo() { + return info; } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.RegisterDataNode.ordinal()); - ThriftCommonsSerDeUtils.serializeTDataNodeLocation(location, buffer); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.RegisterDataNode.ordinal()); + ThriftCommonsSerDeUtils.serializeTDataNodeInfo(info, stream); } @Override protected void deserializeImpl(ByteBuffer buffer) { - location = ThriftCommonsSerDeUtils.deserializeTDataNodeLocation(buffer); + info = ThriftCommonsSerDeUtils.deserializeTDataNodeInfo(buffer); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - RegisterDataNodeReq plan = (RegisterDataNodeReq) o; - return location.equals(plan.location); + RegisterDataNodeReq that = (RegisterDataNodeReq) o; + return info.equals(that.info); } @Override public int hashCode() { - return Objects.hash(location); + return Objects.hash(info); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RemoveConfigNodeReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RemoveConfigNodeReq.java new file mode 100644 index 000000000000..dc5f5c6058f7 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/RemoveConfigNodeReq.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.consensus.request.write; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.ConfigRequest; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Objects; + +public class RemoveConfigNodeReq extends ConfigRequest { + + private TConfigNodeLocation configNodeLocation; + + public RemoveConfigNodeReq() { + super(ConfigRequestType.RemoveConfigNode); + } + + public RemoveConfigNodeReq(TConfigNodeLocation configNodeLocation) { + this(); + this.configNodeLocation = configNodeLocation; + } + + public TConfigNodeLocation getConfigNodeLocation() { + return configNodeLocation; + } + + @Override + protected void serializeImpl(DataOutputStream stream) throws IOException { + ReadWriteIOUtils.write(ConfigRequestType.RemoveConfigNode.ordinal(), stream); + + ThriftConfigNodeSerDeUtils.serializeTConfigNodeLocation(configNodeLocation, stream); + } + + @Override + protected void deserializeImpl(ByteBuffer buffer) throws IOException { + configNodeLocation = ThriftConfigNodeSerDeUtils.deserializeTConfigNodeLocation(buffer); + } + + @Override + public int hashCode() { + return Objects.hash(configNodeLocation); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RemoveConfigNodeReq that = (RemoveConfigNodeReq) o; + return configNodeLocation.equals(that.configNodeLocation); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetDataReplicationFactorReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetDataReplicationFactorReq.java index 971d925d2ce7..a5997a613bda 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetDataReplicationFactorReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetDataReplicationFactorReq.java @@ -22,6 +22,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -51,11 +52,11 @@ public int getDataReplicationFactor() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putInt(dataReplicationFactor); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeInt(dataReplicationFactor); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetSchemaReplicationFactorReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetSchemaReplicationFactorReq.java index 95166663d134..c1910e8313be 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetSchemaReplicationFactorReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetSchemaReplicationFactorReq.java @@ -22,6 +22,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -51,11 +52,11 @@ public int getSchemaReplicationFactor() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putInt(schemaReplicationFactor); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeInt(schemaReplicationFactor); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetStorageGroupReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetStorageGroupReq.java index c546f07676c6..8b0b554414be 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetStorageGroupReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetStorageGroupReq.java @@ -23,6 +23,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -46,9 +47,9 @@ public TStorageGroupSchema getSchema() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.SetStorageGroup.ordinal()); - ThriftConfigNodeSerDeUtils.serializeTStorageGroupSchema(schema, buffer); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.SetStorageGroup.ordinal()); + ThriftConfigNodeSerDeUtils.serializeTStorageGroupSchema(schema, stream); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTTLReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTTLReq.java index 8e11cc58b194..cf632a8d1fdf 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTTLReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTTLReq.java @@ -22,6 +22,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -51,11 +52,11 @@ public long getTTL() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putLong(TTL); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeLong(TTL); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTimePartitionIntervalReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTimePartitionIntervalReq.java index 97c99583f858..1394163bec9b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTimePartitionIntervalReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/SetTimePartitionIntervalReq.java @@ -22,6 +22,7 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -51,11 +52,11 @@ public long getTimePartitionInterval() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(getType().ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(getType().ordinal()); - BasicStructureSerDeUtil.write(storageGroup, buffer); - buffer.putLong(timePartitionInterval); + BasicStructureSerDeUtil.write(storageGroup, stream); + stream.writeLong(timePartitionInterval); } @Override diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/UpdateProcedureReq.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/UpdateProcedureReq.java index ba2e47cf1ca2..66a74afe4e6e 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/UpdateProcedureReq.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/request/write/UpdateProcedureReq.java @@ -21,9 +21,10 @@ import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; -import org.apache.iotdb.confignode.procedure.ProcedureFactory; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; @@ -45,10 +46,10 @@ public UpdateProcedureReq() { } @Override - protected void serializeImpl(ByteBuffer buffer) { - buffer.putInt(ConfigRequestType.UpdateProcedure.ordinal()); + protected void serializeImpl(DataOutputStream stream) throws IOException { + stream.writeInt(ConfigRequestType.UpdateProcedure.ordinal()); if (procedure != null) { - procedure.serialize(buffer); + procedure.serialize(stream); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeConfigurationResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeConfigurationResp.java index f21371b46588..c4672cf4b0ac 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeConfigurationResp.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeConfigurationResp.java @@ -18,8 +18,8 @@ */ package org.apache.iotdb.confignode.consensus.response; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterResp; import org.apache.iotdb.confignode.rpc.thrift.TGlobalConfig; import org.apache.iotdb.consensus.common.DataSet; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeInfosResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeInfosResp.java new file mode 100644 index 000000000000..7718d4713118 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeInfosResp.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.consensus.response; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeInfoResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.rpc.TSStatusCode; + +import java.util.Map; + +public class DataNodeInfosResp implements DataSet { + + private TSStatus status; + private Map dataNodeInfoMap; + + public DataNodeInfosResp() { + // empty constructor + } + + public void setStatus(TSStatus status) { + this.status = status; + } + + public TSStatus getStatus() { + return status; + } + + public void setDataNodeInfoMap(Map dataNodeInfoMap) { + this.dataNodeInfoMap = dataNodeInfoMap; + } + + public void convertToRpcDataNodeLocationResp(TDataNodeInfoResp resp) { + resp.setStatus(status); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setDataNodeInfoMap(dataNodeInfoMap); + } + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeLocationsResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeLocationsResp.java deleted file mode 100644 index 0331e40371a6..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataNodeLocationsResp.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.consensus.response; - -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.rpc.thrift.TDataNodeLocationResp; -import org.apache.iotdb.consensus.common.DataSet; -import org.apache.iotdb.rpc.TSStatusCode; - -import java.util.Map; - -public class DataNodeLocationsResp implements DataSet { - - private TSStatus status; - private Map dataNodeLocationMap; - - public DataNodeLocationsResp() { - // empty constructor - } - - public void setStatus(TSStatus status) { - this.status = status; - } - - public TSStatus getStatus() { - return status; - } - - public void setDataNodeLocations(Map dataNodeLocationMap) { - this.dataNodeLocationMap = dataNodeLocationMap; - } - - public void convertToRpcDataNodeLocationResp(TDataNodeLocationResp resp) { - resp.setStatus(status); - if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - resp.setDataNodeLocationMap(dataNodeLocationMap); - } - } -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataPartitionResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataPartitionResp.java index 7b1e01407b5a..2107d8a0d94b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataPartitionResp.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/DataPartitionResp.java @@ -19,20 +19,34 @@ package org.apache.iotdb.confignode.consensus.response; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.partition.DataPartition; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionResp; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import java.util.List; +import java.util.Map; +import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; + public class DataPartitionResp implements DataSet { private TSStatus status; - private DataPartition dataPartition; + private final boolean allPartitionsExist; + + private final Map dataPartition; - public DataPartitionResp() { - // Empty constructor + public DataPartitionResp( + TSStatus status, boolean allPartitionsExist, Map dataPartition) { + this.status = status; + this.allPartitionsExist = allPartitionsExist; + this.dataPartition = dataPartition; } public TSStatus getStatus() { @@ -43,20 +57,53 @@ public void setStatus(TSStatus status) { this.status = status; } - public void setDataPartition(DataPartition dataPartition) { - this.dataPartition = dataPartition; + public boolean isAllPartitionsExist() { + return allPartitionsExist; } - /** - * Convert DataPartitionDataSet to TDataPartitionResp - * - * @param resp TDataPartitionResp - */ - public void convertToRpcDataPartitionResp(TDataPartitionResp resp) { + public TDataPartitionResp convertToTDataPartitionResp( + Map replicaSetMap) { + TDataPartitionResp resp = new TDataPartitionResp(); resp.setStatus(status); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - resp.setDataPartitionMap(dataPartition.getDataPartitionMap()); + Map>>> + dataPartitionMap = new ConcurrentHashMap<>(); + + dataPartition.forEach( + (storageGroup, dataPartitionTable) -> { + Map>> + seriesPartitionSlotMap = new ConcurrentHashMap<>(); + + dataPartitionTable + .getDataPartitionMap() + .forEach( + (seriesPartitionSlot, seriesPartitionTable) -> { + Map> timePartitionSlotMap = + new ConcurrentHashMap<>(); + + seriesPartitionTable + .getSeriesPartitionMap() + .forEach( + (timePartitionSlot, consensusGroupIds) -> { + List regionReplicaSets = new Vector<>(); + + consensusGroupIds.forEach( + consensusGroupId -> + regionReplicaSets.add(replicaSetMap.get(consensusGroupId))); + + timePartitionSlotMap.put(timePartitionSlot, regionReplicaSets); + }); + + seriesPartitionSlotMap.put(seriesPartitionSlot, timePartitionSlotMap); + }); + + dataPartitionMap.put(storageGroup, seriesPartitionSlotMap); + }); + + resp.setDataPartitionMap(dataPartitionMap); } + + return resp; } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/RegionInfoListResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/RegionInfoListResp.java new file mode 100644 index 000000000000..045f4fdce8e7 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/RegionInfoListResp.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response; + +import org.apache.iotdb.common.rpc.thrift.TRegionInfo; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.consensus.common.DataSet; + +import java.util.List; + +public class RegionInfoListResp implements DataSet { + + private TSStatus status; + + private List regionInfoList; + + public TSStatus getStatus() { + return status; + } + + public void setStatus(TSStatus status) { + this.status = status; + } + + public List getRegionInfoList() { + return regionInfoList; + } + + public void setRegionInfoList(List regionInfoList) { + this.regionInfoList = regionInfoList; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaNodeManagementResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaNodeManagementResp.java new file mode 100644 index 000000000000..4706636171af --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaNodeManagementResp.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.consensus.response; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.rpc.TSStatusCode; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public class SchemaNodeManagementResp implements DataSet { + + private TSStatus status; + + // Map + // TODO: Replace this map with new SchemaPartition + private Map schemaPartition; + + private Set matchedNode; + + public SchemaNodeManagementResp() { + // empty constructor + } + + public TSStatus getStatus() { + return status; + } + + public void setStatus(TSStatus status) { + this.status = status; + } + + public void setSchemaPartition(Map schemaPartition) { + this.schemaPartition = schemaPartition; + } + + public void setMatchedNode(Set matchedNode) { + this.matchedNode = matchedNode; + } + + public TSchemaNodeManagementResp convertToRpcSchemaNodeManagementPartitionResp( + Map replicaSetMap) { + TSchemaNodeManagementResp resp = new TSchemaNodeManagementResp(); + resp.setStatus(status); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setMatchedNode(matchedNode); + + Map> schemaPartitionMap = + new ConcurrentHashMap<>(); + + schemaPartition.forEach( + (storageGroup, schemaPartitionTable) -> { + Map seriesPartitionSlotMap = + new ConcurrentHashMap<>(); + + schemaPartitionTable + .getSchemaPartitionMap() + .forEach( + (seriesPartitionSlot, consensusGroupId) -> + seriesPartitionSlotMap.put( + seriesPartitionSlot, replicaSetMap.get(consensusGroupId))); + + schemaPartitionMap.put(storageGroup, seriesPartitionSlotMap); + }); + + resp.setSchemaRegionMap(schemaPartitionMap); + } + + return resp; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaPartitionResp.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaPartitionResp.java index 2b99ea664f49..9c3d593567a0 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaPartitionResp.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/response/SchemaPartitionResp.java @@ -19,20 +19,35 @@ package org.apache.iotdb.confignode.consensus.response; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.partition.SchemaPartition; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionResp; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + public class SchemaPartitionResp implements DataSet { private TSStatus status; - private SchemaPartition schemaPartition; + private final boolean allPartitionsExist; + + // Map + // TODO: Replace this map whit new SchemaPartition + private final Map schemaPartition; - public SchemaPartitionResp() { - // empty constructor + public SchemaPartitionResp( + TSStatus status, + boolean allPartitionsExist, + Map schemaPartition) { + this.status = status; + this.allPartitionsExist = allPartitionsExist; + this.schemaPartition = schemaPartition; } public TSStatus getStatus() { @@ -43,15 +58,37 @@ public void setStatus(TSStatus status) { this.status = status; } - public void setSchemaPartition(SchemaPartition schemaPartition) { - this.schemaPartition = schemaPartition; + public boolean isAllPartitionsExist() { + return allPartitionsExist; } - public void convertToRpcSchemaPartitionResp(TSchemaPartitionResp resp) { + public TSchemaPartitionResp convertToRpcSchemaPartitionResp( + Map replicaSetMap) { + TSchemaPartitionResp resp = new TSchemaPartitionResp(); resp.setStatus(status); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - resp.setSchemaRegionMap(schemaPartition.getSchemaPartitionMap()); + Map> schemaPartitionMap = + new ConcurrentHashMap<>(); + + schemaPartition.forEach( + (storageGroup, schemaPartitionTable) -> { + Map seriesPartitionSlotMap = + new ConcurrentHashMap<>(); + + schemaPartitionTable + .getSchemaPartitionMap() + .forEach( + (seriesPartitionSlot, consensusGroupId) -> + seriesPartitionSlotMap.put( + seriesPartitionSlot, replicaSetMap.get(consensusGroupId))); + + schemaPartitionMap.put(storageGroup, seriesPartitionSlotMap); + }); + + resp.setSchemaRegionMap(schemaPartitionMap); } + + return resp; } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java index 1c87318a6338..398f4db1a257 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/consensus/statemachine/PartitionRegionStateMachine.java @@ -45,10 +45,10 @@ public class PartitionRegionStateMachine implements IStateMachine, IStateMachine private static final Logger LOGGER = LoggerFactory.getLogger(PartitionRegionStateMachine.class); private final ConfigRequestExecutor executor; private ConfigManager configManager; - private TEndPoint currentNode; + private final TEndPoint currentNode; - public PartitionRegionStateMachine(ConfigManager configManager) { - this.executor = new ConfigRequestExecutor(); + public PartitionRegionStateMachine(ConfigManager configManager, ConfigRequestExecutor executor) { + this.executor = executor; this.configManager = configManager; this.currentNode = new TEndPoint() @@ -69,7 +69,7 @@ public TSStatus write(IConsensusRequest request) { ConfigRequest plan; if (request instanceof ByteBufferConsensusRequest) { try { - plan = ConfigRequest.Factory.create(((ByteBufferConsensusRequest) request).getContent()); + plan = ConfigRequest.Factory.create(request.serializeToByteBuffer()); } catch (IOException e) { LOGGER.error("Deserialization error for write plan : {}", request, e); return new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); @@ -87,7 +87,7 @@ public TSStatus write(IConsensusRequest request) { protected TSStatus write(ConfigRequest plan) { TSStatus result; try { - result = executor.executorNonQueryPlan(plan); + result = executor.executeNonQueryPlan(plan); } catch (UnknownPhysicalPlanTypeException | AuthException e) { LOGGER.error(e.getMessage()); result = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); @@ -100,7 +100,7 @@ public DataSet read(IConsensusRequest request) { ConfigRequest plan; if (request instanceof ByteBufferConsensusRequest) { try { - plan = ConfigRequest.Factory.create(((ByteBufferConsensusRequest) request).getContent()); + plan = ConfigRequest.Factory.create(request.serializeToByteBuffer()); } catch (IOException e) { LOGGER.error("Deserialization error for write plan : {}", request); return null; @@ -128,7 +128,7 @@ public void loadSnapshot(File latestSnapshotRootDir) { protected DataSet read(ConfigRequest plan) { DataSet result; try { - result = executor.executorQueryPlan(plan); + result = executor.executeQueryPlan(plan); } catch (UnknownPhysicalPlanTypeException | AuthException e) { LOGGER.error(e.getMessage()); result = null; @@ -141,8 +141,10 @@ public void notifyLeaderChanged(ConsensusGroupId groupId, TEndPoint newLeader) { if (currentNode.equals(newLeader)) { LOGGER.info("Current node {} is Leader, start procedure manager.", newLeader); configManager.getProcedureManager().shiftExecutor(true); + configManager.getLoadManager().start(); } else { configManager.getProcedureManager().shiftExecutor(false); + configManager.getLoadManager().stop(); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/exception/StorageGroupNotExistsException.java b/confignode/src/main/java/org/apache/iotdb/confignode/exception/StorageGroupNotExistsException.java new file mode 100644 index 000000000000..aa1a198496eb --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/exception/StorageGroupNotExistsException.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.exception; + +public class StorageGroupNotExistsException extends ConfigNodeException { + + public StorageGroupNotExistsException(String storageGroup) { + super(String.format("StorageGroup: %s doesn't exist.", storageGroup)); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/exception/TimeoutException.java b/confignode/src/main/java/org/apache/iotdb/confignode/exception/TimeoutException.java new file mode 100644 index 000000000000..f8e895d191e6 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/exception/TimeoutException.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.exception; + +public class TimeoutException extends ConfigNodeException { + + public TimeoutException(String message) { + super(message); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java index 7c0ece755393..a92e3c2b4927 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ClusterSchemaManager.java @@ -18,43 +18,49 @@ */ package org.apache.iotdb.confignode.manager; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.AdjustMaxRegionGroupCountReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.SetTTLReq; import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; -import org.apache.iotdb.confignode.consensus.response.CountStorageGroupResp; -import org.apache.iotdb.confignode.consensus.response.StorageGroupSchemaResp; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; import org.apache.iotdb.confignode.persistence.ClusterSchemaInfo; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; +import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; +import java.util.Map; /** The ClusterSchemaManager Manages cluster schema read and write requests. */ public class ClusterSchemaManager { private static final Logger LOGGER = LoggerFactory.getLogger(ClusterSchemaManager.class); - private static final ClusterSchemaInfo clusterSchemaInfo = ClusterSchemaInfo.getInstance(); + private final IManager configManager; + private final ClusterSchemaInfo clusterSchemaInfo; - private final Manager configManager; - - public ClusterSchemaManager(Manager configManager) { + public ClusterSchemaManager(IManager configManager, ClusterSchemaInfo clusterSchemaInfo) { this.configManager = configManager; + this.clusterSchemaInfo = clusterSchemaInfo; } + // ====================================================== + // Consensus read/write interfaces + // ====================================================== + /** * Set StorageGroup * @@ -64,46 +70,50 @@ public ClusterSchemaManager(Manager configManager) { */ public TSStatus setStorageGroup(SetStorageGroupReq setStorageGroupReq) { TSStatus result; - if (clusterSchemaInfo.containsStorageGroup(setStorageGroupReq.getSchema().getName())) { + try { + clusterSchemaInfo.checkContainsStorageGroup(setStorageGroupReq.getSchema().getName()); + } catch (MetadataException metadataException) { // Reject if StorageGroup already set - result = new TSStatus(TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()); - result.setMessage( - String.format( - "StorageGroup %s is already set.", setStorageGroupReq.getSchema().getName())); - } else { - // Persist StorageGroupSchema - result = getConsensusManager().write(setStorageGroupReq).getStatus(); + if (metadataException instanceof IllegalPathException) { + result = new TSStatus(TSStatusCode.PATH_ILLEGAL.getStatusCode()); + } else { + result = new TSStatus(TSStatusCode.STORAGE_GROUP_ALREADY_EXISTS.getStatusCode()); + } + result.setMessage(metadataException.getMessage()); + return result; } + + // Cache StorageGroupSchema + result = getConsensusManager().write(setStorageGroupReq).getStatus(); + + // Adjust the maximum RegionGroup number of each StorageGroup + adjustMaxRegionGroupCount(); + return result; } public TSStatus deleteStorageGroup(DeleteStorageGroupReq deleteStorageGroupReq) { + // Adjust the maximum RegionGroup number of each StorageGroup + adjustMaxRegionGroupCount(); return getConsensusManager().write(deleteStorageGroupReq).getStatus(); } /** - * Only leader use this interface. Get the SchemaRegionGroupIds or DataRegionGroupIds from the - * specific StorageGroup. + * Count StorageGroups by specific path pattern * - * @param storageGroup StorageGroupName - * @param type SchemaRegion or DataRegion - * @return All SchemaRegionGroupIds when type is SchemaRegion, and all DataRegionGroupIds when - * type is DataRegion + * @return CountStorageGroupResp */ - public List getRegionGroupIds(String storageGroup, TConsensusGroupType type) { - return clusterSchemaInfo.getRegionGroupIds(storageGroup, type); + public DataSet countMatchedStorageGroups(CountStorageGroupReq countStorageGroupReq) { + return getConsensusManager().read(countStorageGroupReq).getDataset(); } /** - * Only leader use this interface. + * Get StorageGroupSchemas by specific path pattern * - * @param storageGroup StorageGroupName - * @return the matched StorageGroupSchema - * @throws MetadataException when the specific StorageGroup doesn't exist + * @return StorageGroupSchemaDataSet */ - public TStorageGroupSchema getStorageGroupSchemaByName(String storageGroup) - throws MetadataException { - return clusterSchemaInfo.getMatchedStorageGroupSchemaByName(storageGroup); + public DataSet getMatchedStorageGroupSchema(GetStorageGroupReq getStorageGroupReq) { + return getConsensusManager().read(getStorageGroupReq).getDataset(); } public TSStatus setTTL(SetTTLReq setTTLReq) { @@ -130,31 +140,113 @@ public TSStatus setTimePartitionInterval( } /** - * Count StorageGroups by specific path pattern + * Only leader use this interface. Adjust the maxSchemaRegionGroupCount and + * maxDataRegionGroupCount of each StorageGroup bases on existing cluster resources + */ + public synchronized void adjustMaxRegionGroupCount() { + // Get all StorageGroupSchemas + Map storageGroupSchemaMap = + getMatchedStorageGroupSchemasByName(getStorageGroupNames()); + int dataNodeNum = getNodeManager().getOnlineDataNodeCount(); + int totalCpuCoreNum = getNodeManager().getTotalCpuCoreCount(); + int storageGroupNum = storageGroupSchemaMap.size(); + + AdjustMaxRegionGroupCountReq adjustMaxRegionGroupCountReq = new AdjustMaxRegionGroupCountReq(); + for (TStorageGroupSchema storageGroupSchema : storageGroupSchemaMap.values()) { + try { + // Adjust maxSchemaRegionGroupCount. + // All StorageGroups share the DataNodes equally. + // Allocated SchemaRegionGroups are not shrunk. + int allocatedSchemaRegionGroupCount = + getPartitionManager() + .getRegionCount(storageGroupSchema.getName(), TConsensusGroupType.SchemaRegion); + int maxSchemaRegionGroupCount = + Math.max( + 1, + Math.max( + dataNodeNum + / (storageGroupNum * storageGroupSchema.getSchemaReplicationFactor()), + allocatedSchemaRegionGroupCount)); + + // Adjust maxDataRegionGroupCount. + // All StorageGroups divide one-third of the total cpu cores equally. + // Allocated DataRegionGroups are not shrunk. + int allocatedDataRegionGroupCount = + getPartitionManager() + .getRegionCount(storageGroupSchema.getName(), TConsensusGroupType.DataRegion); + int maxDataRegionGroupCount = + Math.max( + 2, + Math.max( + totalCpuCoreNum + / (3 * storageGroupNum * storageGroupSchema.getDataReplicationFactor()), + allocatedDataRegionGroupCount)); + + adjustMaxRegionGroupCountReq.putEntry( + storageGroupSchema.getName(), + new Pair<>(maxSchemaRegionGroupCount, maxDataRegionGroupCount)); + } catch (StorageGroupNotExistsException e) { + LOGGER.warn("Adjust maxRegionGroupCount failed because StorageGroup doesn't exist", e); + } + } + getConsensusManager().write(adjustMaxRegionGroupCountReq); + } + + // ====================================================== + // Leader scheduling interfaces + // ====================================================== + + /** + * Only leader use this interface. * - * @return CountStorageGroupResp + * @param storageGroup StorageGroupName + * @return The specific StorageGroupSchema + * @throws StorageGroupNotExistsException When the specific StorageGroup doesn't exist */ - public CountStorageGroupResp countMatchedStorageGroups( - CountStorageGroupReq countStorageGroupReq) { - ConsensusReadResponse readResponse = getConsensusManager().read(countStorageGroupReq); - return (CountStorageGroupResp) readResponse.getDataset(); + public TStorageGroupSchema getStorageGroupSchemaByName(String storageGroup) + throws StorageGroupNotExistsException { + return clusterSchemaInfo.getMatchedStorageGroupSchemaByName(storageGroup); } /** - * Get StorageGroupSchemas by specific path pattern + * Only leader use this interface. * - * @return StorageGroupSchemaDataSet + * @param rawPathList List + * @return the matched StorageGroupSchemas */ - public StorageGroupSchemaResp getMatchedStorageGroupSchema( - GetStorageGroupReq getStorageGroupReq) { - ConsensusReadResponse readResponse = getConsensusManager().read(getStorageGroupReq); - return (StorageGroupSchemaResp) readResponse.getDataset(); + public Map getMatchedStorageGroupSchemasByName( + List rawPathList) { + return clusterSchemaInfo.getMatchedStorageGroupSchemasByName(rawPathList); } + /** + * Only leader use this interface. + * + * @return List, all storageGroups' name + */ public List getStorageGroupNames() { return clusterSchemaInfo.getStorageGroupNames(); } + /** + * Only leader use this interface. Get the maxRegionGroupCount of specific StorageGroup. + * + * @param storageGroup StorageGroupName + * @param consensusGroupType SchemaRegion or DataRegion + * @return maxSchemaRegionGroupCount or maxDataRegionGroupCount + */ + public int getMaxRegionGroupCount(String storageGroup, TConsensusGroupType consensusGroupType) { + return clusterSchemaInfo.getMaxRegionGroupCount(storageGroup, consensusGroupType); + } + + private NodeManager getNodeManager() { + return configManager.getNodeManager(); + } + + private PartitionManager getPartitionManager() { + return configManager.getPartitionManager(); + } + private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java index 130eff010a09..69f5325db07a 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConfigManager.java @@ -19,23 +19,31 @@ package org.apache.iotdb.confignode.manager; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.commons.conf.CommonDescriptor; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.exception.IllegalPathException; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.utils.AuthUtils; +import org.apache.iotdb.commons.utils.PathUtils; +import org.apache.iotdb.commons.utils.StatusUtils; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetNodePathsPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; @@ -43,18 +51,32 @@ import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.response.CountStorageGroupResp; import org.apache.iotdb.confignode.consensus.response.DataNodeConfigurationResp; -import org.apache.iotdb.confignode.consensus.response.DataNodeLocationsResp; +import org.apache.iotdb.confignode.consensus.response.DataNodeInfosResp; import org.apache.iotdb.confignode.consensus.response.DataPartitionResp; import org.apache.iotdb.confignode.consensus.response.PermissionInfoResp; +import org.apache.iotdb.confignode.consensus.response.RegionInfoListResp; +import org.apache.iotdb.confignode.consensus.response.SchemaNodeManagementResp; import org.apache.iotdb.confignode.consensus.response.SchemaPartitionResp; import org.apache.iotdb.confignode.consensus.response.StorageGroupSchemaResp; +import org.apache.iotdb.confignode.consensus.statemachine.PartitionRegionStateMachine; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.persistence.AuthorInfo; import org.apache.iotdb.confignode.persistence.ClusterSchemaInfo; import org.apache.iotdb.confignode.persistence.NodeInfo; +import org.apache.iotdb.confignode.persistence.ProcedureInfo; +import org.apache.iotdb.confignode.persistence.UDFInfo; +import org.apache.iotdb.confignode.persistence.executor.ConfigRequestExecutor; +import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionResp; +import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionResp; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.db.mpp.common.schematree.PathPatternTree; +import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; @@ -62,14 +84,18 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.iotdb.commons.conf.IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD; /** Entry of all management, AssignPartitionManager,AssignRegionManager. */ -public class ConfigManager implements Manager { +public class ConfigManager implements IManager { private static final Logger LOGGER = LoggerFactory.getLogger(ConfigManager.class); @@ -93,18 +119,38 @@ public class ConfigManager implements Manager { /** Manage procedure */ private final ProcedureManager procedureManager; + /** UDF */ + private final UDFManager udfManager; + public ConfigManager() throws IOException { - this.nodeManager = new NodeManager(this); - this.partitionManager = new PartitionManager(this); - this.clusterSchemaManager = new ClusterSchemaManager(this); - this.permissionManager = new PermissionManager(this); + // Build the persistence module + NodeInfo nodeInfo = new NodeInfo(); + ClusterSchemaInfo clusterSchemaInfo = new ClusterSchemaInfo(); + PartitionInfo partitionInfo = new PartitionInfo(); + AuthorInfo authorInfo = new AuthorInfo(); + ProcedureInfo procedureInfo = new ProcedureInfo(); + UDFInfo udfInfo = new UDFInfo(); + + // Build state machine and executor + ConfigRequestExecutor executor = + new ConfigRequestExecutor( + nodeInfo, clusterSchemaInfo, partitionInfo, authorInfo, procedureInfo, udfInfo); + PartitionRegionStateMachine stateMachine = new PartitionRegionStateMachine(this, executor); + + // Build the manager module + this.nodeManager = new NodeManager(this, nodeInfo); + this.clusterSchemaManager = new ClusterSchemaManager(this, clusterSchemaInfo); + this.partitionManager = new PartitionManager(this, partitionInfo); + this.permissionManager = new PermissionManager(this, authorInfo); + this.procedureManager = new ProcedureManager(this, procedureInfo); + this.udfManager = new UDFManager(this, udfInfo); this.loadManager = new LoadManager(this); - this.procedureManager = new ProcedureManager(this); - this.consensusManager = new ConsensusManager(this); + this.consensusManager = new ConsensusManager(this, stateMachine); } public void close() throws IOException { consensusManager.close(); + partitionManager.getRegionCleaner().shutdown(); procedureManager.shiftExecutor(false); } @@ -121,7 +167,7 @@ public DataSet registerDataNode(RegisterDataNodeReq registerDataNodeReq) { } else { DataNodeConfigurationResp dataSet = new DataNodeConfigurationResp(); dataSet.setStatus(status); - dataSet.setConfigNodeList(NodeInfo.getInstance().getOnlineConfigNodes()); + dataSet.setConfigNodeList(nodeManager.getOnlineConfigNodes()); return dataSet; } } @@ -132,7 +178,7 @@ public DataSet getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoReq) { if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return nodeManager.getDataNodeInfo(getDataNodeInfoReq); } else { - DataNodeLocationsResp dataSet = new DataNodeLocationsResp(); + DataNodeInfosResp dataSet = new DataNodeInfosResp(); dataSet.setStatus(status); return dataSet; } @@ -221,16 +267,7 @@ public TSStatus deleteStorageGroups(List deletedPaths) { if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { // remove wild Map deleteStorageSchemaMap = - ClusterSchemaInfo.getInstance().getDeleteStorageGroups(deletedPaths); - for (Map.Entry storageGroupSchemaEntry : - deleteStorageSchemaMap.entrySet()) { - String sgName = storageGroupSchemaEntry.getKey(); - TStorageGroupSchema deleteStorageSchema = storageGroupSchemaEntry.getValue(); - deleteStorageSchema.setSchemaRegionGroupIds( - getClusterSchemaManager().getRegionGroupIds(sgName, TConsensusGroupType.SchemaRegion)); - deleteStorageSchema.setDataRegionGroupIds( - getClusterSchemaManager().getRegionGroupIds(sgName, TConsensusGroupType.DataRegion)); - } + getClusterSchemaManager().getMatchedStorageGroupSchemasByName(deletedPaths); ArrayList parsedDeleteStorageGroups = new ArrayList<>(deleteStorageSchemaMap.values()); return procedureManager.deleteStorageGroups(parsedDeleteStorageGroups); @@ -239,77 +276,98 @@ public TSStatus deleteStorageGroups(List deletedPaths) { } } + private List calculateRelatedSlot( + PartialPath path, PartialPath storageGroup) { + // The path contains `**` + if (path.getFullPath().contains(IoTDBConstant.MULTI_LEVEL_PATH_WILDCARD)) { + return new ArrayList<>(); + } + // path doesn't contain * so the size of innerPathList should be 1 + PartialPath innerPath = path.alterPrefixPath(storageGroup).get(0); + // The innerPath contains `*` and the only `*` is not in last level + if (innerPath.getDevice().contains(IoTDBConstant.ONE_LEVEL_PATH_WILDCARD)) { + return new ArrayList<>(); + } + return Collections.singletonList( + getPartitionManager().getSeriesPartitionSlot(innerPath.getDevice())); + } + @Override - public DataSet getSchemaPartition(PathPatternTree patternTree) { + public TSchemaPartitionResp getSchemaPartition(PathPatternTree patternTree) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - List devicePaths = patternTree.findAllDevicePaths(); - List storageGroups = getClusterSchemaManager().getStorageGroupNames(); GetSchemaPartitionReq getSchemaPartitionReq = new GetSchemaPartitionReq(); - Map> partitionSlotsMap = new HashMap<>(); - - boolean getAll = false; - Set getAllSet = new HashSet<>(); - for (String devicePath : devicePaths) { - boolean matchStorageGroup = false; - for (String storageGroup : storageGroups) { - if (devicePath.startsWith(storageGroup + ".")) { - matchStorageGroup = true; - if (devicePath.contains("*")) { - // Get all SchemaPartitions of this StorageGroup if the devicePath contains "*" - getAllSet.add(storageGroup); - } else { - // Get the specific SchemaPartition - partitionSlotsMap - .computeIfAbsent(storageGroup, key -> new ArrayList<>()) - .add(getPartitionManager().getSeriesPartitionSlot(devicePath)); + Map> partitionSlotsMap = new HashMap<>(); + List relatedPaths = patternTree.getAllPathPatterns(); + List allStorageGroups = getClusterSchemaManager().getStorageGroupNames(); + Map scanAllRegions = new HashMap<>(); + for (PartialPath path : relatedPaths) { + for (String storageGroup : allStorageGroups) { + try { + PartialPath storageGroupPath = new PartialPath(storageGroup); + if (path.overlapWith(storageGroupPath.concatNode(MULTI_LEVEL_PATH_WILDCARD)) + && !scanAllRegions.containsKey(storageGroup)) { + List relatedSlot = calculateRelatedSlot(path, storageGroupPath); + if (relatedSlot.isEmpty()) { + scanAllRegions.put(storageGroup, true); + partitionSlotsMap.put(storageGroup, new HashSet<>()); + } else { + partitionSlotsMap + .computeIfAbsent(storageGroup, k -> new HashSet<>()) + .addAll(relatedSlot); + } } - break; + } catch (IllegalPathException e) { + // this line won't be reached in general + throw new RuntimeException(e); } } - if (!matchStorageGroup && devicePath.contains("**")) { - // Get all SchemaPartitions if there exists one devicePath that contains "**" - getAll = true; - } } - if (getAll) { - partitionSlotsMap = new HashMap<>(); - } else { - for (String storageGroup : getAllSet) { - if (partitionSlotsMap.containsKey(storageGroup)) { - partitionSlotsMap.replace(storageGroup, new ArrayList<>()); - } else { - partitionSlotsMap.put(storageGroup, new ArrayList<>()); - } - } + // return empty partition + if (partitionSlotsMap.isEmpty()) { + TSchemaPartitionResp resp = new TSchemaPartitionResp(); + resp.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + resp.setSchemaRegionMap(new HashMap<>()); + return resp; } - getSchemaPartitionReq.setPartitionSlotsMap(partitionSlotsMap); - return partitionManager.getSchemaPartition(getSchemaPartitionReq); + getSchemaPartitionReq.setPartitionSlotsMap( + partitionSlotsMap.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new ArrayList<>(e.getValue())))); + + SchemaPartitionResp resp = + (SchemaPartitionResp) partitionManager.getSchemaPartition(getSchemaPartitionReq); + TSchemaPartitionResp result = + resp.convertToRpcSchemaPartitionResp(getLoadManager().genRealTimeRoutingPolicy()); + + // TODO: Delete or hide this LOGGER before officially release. + LOGGER.info( + "GetSchemaPartition receive paths: {}, return TSchemaPartitionResp: {}", + relatedPaths, + result); + + return result; } else { - SchemaPartitionResp dataSet = new SchemaPartitionResp(); - dataSet.setStatus(status); - return dataSet; + return new TSchemaPartitionResp().setStatus(status); } } @Override - public DataSet getOrCreateSchemaPartition(PathPatternTree patternTree) { + public TSchemaPartitionResp getOrCreateSchemaPartition(PathPatternTree patternTree) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - List devicePaths = patternTree.findAllDevicePaths(); + List devicePaths = patternTree.getAllDevicePatterns(); List storageGroups = getClusterSchemaManager().getStorageGroupNames(); GetOrCreateSchemaPartitionReq getOrCreateSchemaPartitionReq = new GetOrCreateSchemaPartitionReq(); Map> partitionSlotsMap = new HashMap<>(); - for (String devicePath : devicePaths) { if (!devicePath.contains("*")) { // Only check devicePaths that without "*" for (String storageGroup : storageGroups) { - if (devicePath.startsWith(storageGroup + ".")) { + if (PathUtils.isStartWith(devicePath, storageGroup)) { partitionSlotsMap .computeIfAbsent(storageGroup, key -> new ArrayList<>()) .add(getPartitionManager().getSeriesPartitionSlot(devicePath)); @@ -318,47 +376,117 @@ public DataSet getOrCreateSchemaPartition(PathPatternTree patternTree) { } } } - getOrCreateSchemaPartitionReq.setPartitionSlotsMap(partitionSlotsMap); - return partitionManager.getOrCreateSchemaPartition(getOrCreateSchemaPartitionReq); + + SchemaPartitionResp resp = + (SchemaPartitionResp) + partitionManager.getOrCreateSchemaPartition(getOrCreateSchemaPartitionReq); + TSchemaPartitionResp result = + resp.convertToRpcSchemaPartitionResp(getLoadManager().genRealTimeRoutingPolicy()); + + // TODO: Delete or hide this LOGGER before officially release. + LOGGER.info( + "GetOrCreateSchemaPartition receive devicePaths: {}, return TSchemaPartitionResp: {}", + devicePaths, + result); + + return result; } else { - SchemaPartitionResp dataSet = new SchemaPartitionResp(); - dataSet.setStatus(status); - return dataSet; + return new TSchemaPartitionResp().setStatus(status); } } @Override - public DataSet getDataPartition(GetDataPartitionReq getDataPartitionReq) { + public TSchemaNodeManagementResp getNodePathsPartition(PartialPath partialPath, Integer level) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return partitionManager.getDataPartition(getDataPartitionReq); + GetNodePathsPartitionReq getNodePathsPartitionReq = new GetNodePathsPartitionReq(); + getNodePathsPartitionReq.setPartialPath(partialPath); + if (null != level) { + getNodePathsPartitionReq.setLevel(level); + } + SchemaNodeManagementResp resp = + (SchemaNodeManagementResp) + partitionManager.getNodePathsPartition(getNodePathsPartitionReq); + TSchemaNodeManagementResp result = + resp.convertToRpcSchemaNodeManagementPartitionResp( + getLoadManager().genRealTimeRoutingPolicy()); + + // TODO: Delete or hide this LOGGER before officially release. + LOGGER.info( + "getNodePathsPartition receive devicePaths: {}, level: {}, return TSchemaNodeManagementResp: {}", + partialPath, + level, + result); + + return result; } else { - DataPartitionResp dataSet = new DataPartitionResp(); - dataSet.setStatus(status); - return dataSet; + return new TSchemaNodeManagementResp().setStatus(status); } } @Override - public DataSet getOrCreateDataPartition(GetOrCreateDataPartitionReq getOrCreateDataPartitionReq) { + public TDataPartitionResp getDataPartition(GetDataPartitionReq getDataPartitionReq) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return partitionManager.getOrCreateDataPartition(getOrCreateDataPartitionReq); + DataPartitionResp resp = + (DataPartitionResp) partitionManager.getDataPartition(getDataPartitionReq); + + TDataPartitionResp result = + resp.convertToTDataPartitionResp(getLoadManager().genRealTimeRoutingPolicy()); + + // TODO: Delete or hide this LOGGER before officially release. + LOGGER.info( + "GetDataPartition interface receive PartitionSlotsMap: {}, return TDataPartitionResp: {}", + getDataPartitionReq.getPartitionSlotsMap(), + result); + + return result; } else { - DataPartitionResp dataSet = new DataPartitionResp(); - dataSet.setStatus(status); - return dataSet; + return new TDataPartitionResp().setStatus(status); + } + } + + @Override + public TDataPartitionResp getOrCreateDataPartition( + GetOrCreateDataPartitionReq getOrCreateDataPartitionReq) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + DataPartitionResp resp = + (DataPartitionResp) + partitionManager.getOrCreateDataPartition(getOrCreateDataPartitionReq); + + TDataPartitionResp result = + resp.convertToTDataPartitionResp(getLoadManager().genRealTimeRoutingPolicy()); + + // TODO: Delete or hide this LOGGER before officially release. + LOGGER.info( + "GetOrCreateDataPartition success. receive PartitionSlotsMap: {}, return TDataPartitionResp: {}", + getOrCreateDataPartitionReq.getPartitionSlotsMap(), + result); + + return result; + } else { + return new TDataPartitionResp().setStatus(status); } } private TSStatus confirmLeader() { + TSStatus result = new TSStatus(); + if (getConsensusManager().isLeader()) { - return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + return result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } else { - return new TSStatus(TSStatusCode.NEED_REDIRECTION.getStatusCode()) - .setMessage( - "The current ConfigNode is not leader. And ConfigNodeGroup is in leader election. Please redirect with a random ConfigNode."); + result.setCode(TSStatusCode.NEED_REDIRECTION.getStatusCode()); + result.setMessage( + "The current ConfigNode is not leader, please redirect to a new ConfigNode."); + + TConfigNodeLocation leaderLocation = consensusManager.getLeader(); + if (leaderLocation != null) { + result.setRedirectNode(leaderLocation.getInternalEndPoint()); + } + + return result; } } @@ -388,20 +516,20 @@ public LoadManager getLoadManager() { } @Override - public TSStatus operatePermission(ConfigRequest configRequest) { + public TSStatus operatePermission(AuthorReq authorReq) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return permissionManager.operatePermission((AuthorReq) configRequest); + return permissionManager.operatePermission(authorReq); } else { return status; } } @Override - public DataSet queryPermission(ConfigRequest configRequest) { + public DataSet queryPermission(AuthorReq authorReq) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return permissionManager.queryPermission((AuthorReq) configRequest); + return permissionManager.queryPermission(authorReq); } else { PermissionInfoResp dataSet = new PermissionInfoResp(); dataSet.setStatus(status); @@ -410,36 +538,66 @@ public DataSet queryPermission(ConfigRequest configRequest) { } @Override - public TSStatus login(String username, String password) { + public TPermissionInfoResp login(String username, String password) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return permissionManager.login(username, password); } else { - return status; + TPermissionInfoResp resp = AuthUtils.generateEmptyPermissionInfoResp(); + resp.setStatus(status); + return resp; } } @Override - public TSStatus checkUserPrivileges(String username, List paths, int permission) { + public TPermissionInfoResp checkUserPrivileges( + String username, List paths, int permission) { TSStatus status = confirmLeader(); if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { return permissionManager.checkUserPrivileges(username, paths, permission); } else { - return status; + TPermissionInfoResp resp = AuthUtils.generateEmptyPermissionInfoResp(); + resp.setStatus(status); + return resp; } } @Override public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) { // Check global configuration - ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); + TSStatus status = confirmLeader(); + + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + TConfigNodeRegisterResp errorResp1 = checkConfigNodeRegisterResp(req); + if (errorResp1 != null) return errorResp1; + + procedureManager.addConfigNode(req); + return nodeManager.registerConfigNode(req); + } + + return new TConfigNodeRegisterResp().setStatus(status); + } + + private TConfigNodeRegisterResp checkConfigNodeRegisterResp(TConfigNodeRegisterReq req) { + ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); TConfigNodeRegisterResp errorResp = new TConfigNodeRegisterResp(); errorResp.setStatus(new TSStatus(TSStatusCode.ERROR_GLOBAL_CONFIG.getStatusCode())); - if (!req.getDataNodeConsensusProtocolClass().equals(conf.getDataNodeConsensusProtocolClass())) { + if (!req.getDataRegionConsensusProtocolClass() + .equals(conf.getDataRegionConsensusProtocolClass())) { + errorResp + .getStatus() + .setMessage( + "Reject register, please ensure that the data_region_consensus_protocol_class " + + "are consistent."); + return errorResp; + } + if (!req.getSchemaRegionConsensusProtocolClass() + .equals(conf.getSchemaRegionConsensusProtocolClass())) { errorResp .getStatus() .setMessage( - "Reject register, please ensure that the data_node_consensus_protocol_class are consistent."); + "Reject register, please ensure that the schema_region_consensus_protocol_class " + + "are consistent."); return errorResp; } if (req.getSeriesPartitionSlotNum() != conf.getSeriesPartitionSlotNum()) { @@ -483,16 +641,90 @@ public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) { "Reject register, please ensure that the data_replication_factor are consistent."); return errorResp; } + return null; + } + + @Override + public TSStatus addConsensusGroup(List configNodeLocations) { + consensusManager.addConsensusGroup(configNodeLocations); + return StatusUtils.OK; + } + + @Override + public TSStatus removeConfigNode(RemoveConfigNodeReq removeConfigNodeReq) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return nodeManager.removeConfigNode(removeConfigNodeReq); + } else { + return status; + } + } + + @Override + public TSStatus createFunction(String udfName, String className, List uris) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? udfManager.createFunction(udfName, className, uris) + : status; + } + + @Override + public TSStatus dropFunction(String udfName) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? udfManager.dropFunction(udfName) + : status; + } + + @Override + public TSStatus flush(TFlushReq req) { + TSStatus status = confirmLeader(); + return status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode() + ? RpcUtils.squashResponseStatusList(nodeManager.flush(req)) + : status; + } - return nodeManager.registerConfigNode(req); + @Override + public UDFManager getUDFManager() { + return udfManager; } @Override - public TSStatus applyConfigNode(ApplyConfigNodeReq applyConfigNodeReq) { - return nodeManager.applyConfigNode(applyConfigNodeReq); + public DataSet showRegion(GetRegionInfoListReq getRegionsinfoReq) { + TSStatus status = confirmLeader(); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return partitionManager.getRegionInfoList(getRegionsinfoReq); + } else { + RegionInfoListResp regionResp = new RegionInfoListResp(); + regionResp.setStatus(status); + return regionResp; + } } public ProcedureManager getProcedureManager() { return procedureManager; } + + /** + * @param storageGroups the storage groups to check + * @return List of PartialPath the storage groups that not exist + */ + public List checkStorageGroupExist(List storageGroups) { + List noExistSg = new ArrayList<>(); + if (storageGroups == null) { + return noExistSg; + } + for (PartialPath storageGroup : storageGroups) { + if (!clusterSchemaManager.getStorageGroupNames().contains(storageGroup.toString())) { + noExistSg.add(storageGroup); + } + } + return noExistSg; + } + + @Override + public void addMetrics() { + partitionManager.addMetrics(); + nodeManager.addMetrics(); + } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java index 8d093e326c48..496c92172a0c 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ConsensusManager.java @@ -18,44 +18,47 @@ */ package org.apache.iotdb.confignode.manager; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.consensus.PartitionRegionId; -import org.apache.iotdb.confignode.client.SyncConfigNodeClientPool; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.statemachine.PartitionRegionStateMachine; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.common.Peer; import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; -import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; /** ConsensusManager maintains consensus class, request will redirect to consensus layer */ public class ConsensusManager { private static final Logger LOGGER = LoggerFactory.getLogger(ConsensusManager.class); - private static final ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); - private ConfigManager configManager; + private static final ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + + private final IManager configManager; + private ConsensusGroupId consensusGroupId; private IConsensus consensusImpl; - public ConsensusManager(ConfigManager configManager) throws IOException { + public ConsensusManager(IManager configManager, PartitionRegionStateMachine stateMachine) + throws IOException { this.configManager = configManager; - setConsensusLayer(); + setConsensusLayer(stateMachine); } public void close() throws IOException { @@ -63,7 +66,7 @@ public void close() throws IOException { } /** Build ConfigNodeGroup ConsensusLayer */ - private void setConsensusLayer() throws IOException { + private void setConsensusLayer(PartitionRegionStateMachine stateMachine) throws IOException { // There is only one ConfigNodeGroup consensusGroupId = new PartitionRegionId(conf.getPartitionRegionId()); @@ -71,9 +74,11 @@ private void setConsensusLayer() throws IOException { consensusImpl = ConsensusFactory.getConsensusImpl( conf.getConfigNodeConsensusProtocolClass(), - new TEndPoint(conf.getRpcAddress(), conf.getConsensusPort()), - new File(conf.getConsensusDir()), - gid -> new PartitionRegionStateMachine(configManager)) + ConsensusConfig.newBuilder() + .setThisNode(new TEndPoint(conf.getRpcAddress(), conf.getConsensusPort())) + .setStorageDir(conf.getConsensusDir()) + .build(), + gid -> stateMachine) .orElseThrow( () -> new IllegalArgumentException( @@ -82,28 +87,32 @@ private void setConsensusLayer() throws IOException { conf.getConfigNodeConsensusProtocolClass()))); consensusImpl.start(); - // Build consensus group from iotdb-confignode.properties - LOGGER.info("Set ConfigNode consensus group {}...", conf.getConfigNodeList()); + // if does not start firstly, or is seed-node. will add ConsensusGroup. + if (!conf.isNeedApply()) { + addConsensusGroup(conf.getConfigNodeList()); + } + } + + /** + * after register config node, leader will call addConsensusGroup remotely. execute in new node + * + * @param configNodeLocations all config node + */ + public void addConsensusGroup(List configNodeLocations) { + if (configNodeLocations.size() == 0) { + LOGGER.warn("configNodeLocations is null"); + return; + } + + LOGGER.info("Set ConfigNode consensus group {}...", configNodeLocations); List peerList = new ArrayList<>(); - for (TConfigNodeLocation configNodeLocation : conf.getConfigNodeList()) { + for (TConfigNodeLocation configNodeLocation : configNodeLocations) { peerList.add(new Peer(consensusGroupId, configNodeLocation.getConsensusEndPoint())); } consensusImpl.addConsensusGroup(consensusGroupId, peerList); - // Apply ConfigNode if necessary - if (conf.isNeedApply()) { - TSStatus status = - SyncConfigNodeClientPool.getInstance() - .applyConfigNode( - conf.getTargetConfigNode(), - new TConfigNodeLocation( - new TEndPoint(conf.getRpcAddress(), conf.getRpcPort()), - new TEndPoint(conf.getRpcAddress(), conf.getConsensusPort()))); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - LOGGER.error(status.getMessage()); - throw new IOException("Apply ConfigNode failed:"); - } - } + // Set config node list + conf.setConfigNodeList(configNodeLocations); } /** @@ -123,6 +132,23 @@ public boolean addConfigNodePeer(ApplyConfigNodeReq applyConfigNodeReq) { .isSuccess(); } + /** + * Remove a ConfigNode Peer out of PartitionRegion + * + * @param removeConfigNodeReq RemoveConfigNodeReq + * @return True if successfully removePeer. False if another ConfigNode is being removed to the + * PartitionRegion + */ + public boolean removeConfigNodePeer(RemoveConfigNodeReq removeConfigNodeReq) { + return consensusImpl + .removePeer( + consensusGroupId, + new Peer( + consensusGroupId, + removeConfigNodeReq.getConfigNodeLocation().getConsensusEndPoint())) + .isSuccess(); + } + /** Transmit PhysicalPlan to confignode.consensus.statemachine */ public ConsensusWriteResponse write(ConfigRequest req) { return consensusImpl.write(consensusGroupId, req); @@ -137,9 +163,58 @@ public boolean isLeader() { return consensusImpl.isLeader(consensusGroupId); } + /** @return ConfigNode-leader's location if leader exists, null otherwise. */ + public TConfigNodeLocation getLeader() { + for (int retry = 0; retry < 50; retry++) { + Peer leaderPeer = consensusImpl.getLeader(consensusGroupId); + if (leaderPeer != null) { + List onlineConfigNodes = getNodeManager().getOnlineConfigNodes(); + TConfigNodeLocation leaderLocation = + onlineConfigNodes.stream() + .filter(leader -> leader.getConsensusEndPoint().equals(leaderPeer.getEndpoint())) + .findFirst() + .orElse(null); + if (leaderLocation != null) { + return leaderLocation; + } + } + + try { + TimeUnit.MILLISECONDS.sleep(100); + } catch (InterruptedException e) { + LOGGER.warn("ConsensusManager getLeader been interrupted, ", e); + } + } + return null; + } + public ConsensusGroupId getConsensusGroupId() { return consensusGroupId; } - // TODO: Interfaces for LoadBalancer control + public IConsensus getConsensusImpl() { + return consensusImpl; + } + + private NodeManager getNodeManager() { + return configManager.getNodeManager(); + } + + @TestOnly + public void singleCopyMayWaitUntilLeaderReady() { + if (conf.getConfigNodeList().size() == 1) { + long startTime = System.currentTimeMillis(); + long maxWaitTime = 1000 * 60; // milliseconds, which is 60s + try { + while (!consensusImpl.isLeader(consensusGroupId)) { + TimeUnit.MILLISECONDS.sleep(100); + long elapsed = System.currentTimeMillis() - startTime; + if (elapsed > maxWaitTime) { + return; + } + } + } catch (InterruptedException ignored) { + } + } + } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java new file mode 100644 index 000000000000..65ce70ade361 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/IManager.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; +import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; +import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; +import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; +import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; +import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.SetTTLReq; +import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; +import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; +import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionResp; +import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionResp; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.db.mpp.common.schematree.PathPatternTree; + +import java.util.List; + +/** + * a subset of services provided by {@ConfigManager}. For use internally only, passed to Managers, + * services. + */ +public interface IManager { + + /** + * if a service stop + * + * @return true if service stopped + */ + boolean isStopped(); + + /** + * Get DataManager + * + * @return DataNodeManager instance + */ + NodeManager getNodeManager(); + + /** + * Get ConsensusManager + * + * @return ConsensusManager instance + */ + ConsensusManager getConsensusManager(); + + /** + * Get ClusterSchemaManager + * + * @return ClusterSchemaManager instance + */ + ClusterSchemaManager getClusterSchemaManager(); + + /** + * Get PartitionManager + * + * @return PartitionManager instance + */ + PartitionManager getPartitionManager(); + + /** + * Get LoadManager + * + * @return LoadManager instance + */ + LoadManager getLoadManager(); + + /** + * Get UDFManager + * + * @return UDFManager instance + */ + UDFManager getUDFManager(); + + /** + * Register DataNode + * + * @return DataNodeConfigurationDataSet + */ + DataSet registerDataNode(RegisterDataNodeReq registerDataNodeReq); + + /** + * Get DataNode info + * + * @return DataNodesInfoDataSet + */ + DataSet getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoReq); + + TSStatus setTTL(SetTTLReq configRequest); + + TSStatus setSchemaReplicationFactor(SetSchemaReplicationFactorReq configRequest); + + TSStatus setDataReplicationFactor(SetDataReplicationFactorReq configRequest); + + TSStatus setTimePartitionInterval(SetTimePartitionIntervalReq configRequest); + + /** + * Count StorageGroups + * + * @return The number of matched StorageGroups + */ + DataSet countMatchedStorageGroups(CountStorageGroupReq countStorageGroupReq); + + /** + * Get StorageGroupSchemas + * + * @return StorageGroupSchemaDataSet + */ + DataSet getMatchedStorageGroupSchemas(GetStorageGroupReq getOrCountStorageGroupReq); + + /** + * Set StorageGroup + * + * @return status + */ + TSStatus setStorageGroup(SetStorageGroupReq setStorageGroupReq); + + /** + * Delete StorageGroups + * + * @param deletedPaths List + * @return status + */ + TSStatus deleteStorageGroups(List deletedPaths); + + /** + * Get SchemaPartition + * + * @return TSchemaPartitionResp + */ + TSchemaPartitionResp getSchemaPartition(PathPatternTree patternTree); + + /** + * Get or create SchemaPartition + * + * @return TSchemaPartitionResp + */ + TSchemaPartitionResp getOrCreateSchemaPartition(PathPatternTree patternTree); + + /** + * create SchemaNodeManagementPartition for child paths node management + * + * @return TSchemaNodeManagementResp + */ + TSchemaNodeManagementResp getNodePathsPartition(PartialPath partialPath, Integer level); + + /** + * Get DataPartition + * + * @return TDataPartitionResp + */ + TDataPartitionResp getDataPartition(GetDataPartitionReq getDataPartitionReq); + + /** + * Get or create DataPartition + * + * @return TDataPartitionResp + */ + TDataPartitionResp getOrCreateDataPartition( + GetOrCreateDataPartitionReq getOrCreateDataPartitionReq); + + /** + * Operate Permission + * + * @return status + */ + TSStatus operatePermission(AuthorReq authorReq); + + /** + * Query Permission + * + * @return PermissionInfoDataSet + */ + DataSet queryPermission(AuthorReq authorReq); + + /** login */ + TPermissionInfoResp login(String username, String password); + + /** Check User Privileges */ + TPermissionInfoResp checkUserPrivileges(String username, List paths, int permission); + + /** + * Register ConfigNode when it is first startup + * + * @return TConfigNodeRegisterResp + */ + TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req); + + /** + * Add Consensus Group in new node. + * + * @return status + */ + TSStatus addConsensusGroup(List configNodeLocations); + + /** + * Remove ConfigNode + * + * @return status + */ + TSStatus removeConfigNode(RemoveConfigNodeReq removeConfigNodeReq); + + TSStatus createFunction(String udfName, String className, List uris); + + TSStatus dropFunction(String udfName); + + TSStatus flush(TFlushReq req); + + void addMetrics(); + + /** Show (data/schema) regions */ + DataSet showRegion(GetRegionInfoListReq getRegionsinfoReq); +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/LoadManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/LoadManager.java deleted file mode 100644 index 67aa1e7ffb9f..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/LoadManager.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.manager; - -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.commons.exception.MetadataException; -import org.apache.iotdb.confignode.client.AsyncDataNodeClientPool; -import org.apache.iotdb.confignode.client.handlers.CreateRegionHandler; -import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; -import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException; -import org.apache.iotdb.confignode.manager.allocator.CopySetRegionAllocator; -import org.apache.iotdb.confignode.manager.allocator.IRegionAllocator; -import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.mpp.rpc.thrift.TCreateDataRegionReq; -import org.apache.iotdb.mpp.rpc.thrift.TCreateSchemaRegionReq; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.BitSet; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; - -/** - * The LoadManager at ConfigNodeGroup-Leader is active. It proactively implements the cluster - * dynamic load balancing policy and passively accepts the PartitionTable expansion request. - */ -public class LoadManager { - - private static final Logger LOGGER = LoggerFactory.getLogger(LoadManager.class); - - private final Manager configManager; - - private final IRegionAllocator regionAllocator; - - // TODO: Interfaces for active, interrupt and reset LoadBalancer - - public LoadManager(Manager configManager) { - this.configManager = configManager; - this.regionAllocator = new CopySetRegionAllocator(); - } - - /** - * Allocate and create one Region on DataNode for each StorageGroup. - * - * @param storageGroups List - * @param consensusGroupType TConsensusGroupType of Region to be allocated - */ - public void allocateAndCreateRegions( - List storageGroups, TConsensusGroupType consensusGroupType) - throws NotEnoughDataNodeException { - CreateRegionsReq createRegionsReq = null; - - // TODO: use procedure to protect create Regions process - try { - createRegionsReq = allocateRegions(storageGroups, consensusGroupType); - createRegionsOnDataNodes(createRegionsReq); - } catch (MetadataException e) { - LOGGER.error("Meet error when create Regions", e); - } - - getConsensusManager().write(createRegionsReq); - } - - private CreateRegionsReq allocateRegions( - List storageGroups, TConsensusGroupType consensusGroupType) - throws NotEnoughDataNodeException, MetadataException { - CreateRegionsReq createRegionsReq = new CreateRegionsReq(); - - List onlineDataNodes = getNodeManager().getOnlineDataNodes(); - List allocatedRegions = getPartitionManager().getAllocatedRegions(); - - for (String storageGroup : storageGroups) { - TStorageGroupSchema storageGroupSchema = - getClusterSchemaManager().getStorageGroupSchemaByName(storageGroup); - int replicationFactor = - consensusGroupType == TConsensusGroupType.SchemaRegion - ? storageGroupSchema.getSchemaReplicationFactor() - : storageGroupSchema.getDataReplicationFactor(); - - if (onlineDataNodes.size() < replicationFactor) { - throw new NotEnoughDataNodeException(); - } - - TRegionReplicaSet newRegion = - regionAllocator.allocateRegion( - onlineDataNodes, - allocatedRegions, - replicationFactor, - new TConsensusGroupId( - consensusGroupType, getPartitionManager().generateNextRegionGroupId())); - createRegionsReq.addRegion(storageGroup, newRegion); - } - - return createRegionsReq; - } - - private void createRegionsOnDataNodes(CreateRegionsReq createRegionsReq) - throws MetadataException { - // Index of each Region - int index = 0; - // Number of regions to be created - int regionNum = 0; - Map> indexMap = new HashMap<>(); - Map ttlMap = new HashMap<>(); - for (Map.Entry entry : createRegionsReq.getRegionMap().entrySet()) { - regionNum += entry.getValue().getDataNodeLocationsSize(); - ttlMap.put( - entry.getKey(), - getClusterSchemaManager().getStorageGroupSchemaByName(entry.getKey()).getTTL()); - for (TDataNodeLocation dataNodeLocation : entry.getValue().getDataNodeLocations()) { - indexMap - .computeIfAbsent(entry.getKey(), sg -> new HashMap<>()) - .put(dataNodeLocation.getDataNodeId(), index); - index += 1; - } - } - - BitSet bitSet = new BitSet(regionNum); - - for (int retry = 0; retry < 3; retry++) { - CountDownLatch latch = new CountDownLatch(regionNum - bitSet.cardinality()); - - createRegionsReq - .getRegionMap() - .forEach( - (storageGroup, regionReplicaSet) -> { - // Enumerate each Region - regionReplicaSet - .getDataNodeLocations() - .forEach( - dataNodeLocation -> { - // Skip those created successfully - if (!bitSet.get( - indexMap.get(storageGroup).get(dataNodeLocation.getDataNodeId()))) { - TEndPoint endPoint = dataNodeLocation.getInternalEndPoint(); - CreateRegionHandler handler = - new CreateRegionHandler( - indexMap - .get(storageGroup) - .get(dataNodeLocation.getDataNodeId()), - bitSet, - latch, - regionReplicaSet.getRegionId(), - dataNodeLocation); - - switch (regionReplicaSet.getRegionId().getType()) { - case SchemaRegion: - AsyncDataNodeClientPool.getInstance() - .createSchemaRegion( - endPoint, - genCreateSchemaRegionReq(storageGroup, regionReplicaSet), - handler); - break; - case DataRegion: - AsyncDataNodeClientPool.getInstance() - .createDataRegion( - endPoint, - genCreateDataRegionReq( - storageGroup, - regionReplicaSet, - ttlMap.get(storageGroup)), - handler); - } - } - }); - }); - - try { - latch.await(); - } catch (InterruptedException e) { - LOGGER.error("ClusterSchemaManager was interrupted during create Regions on DataNodes", e); - } - - if (bitSet.cardinality() == regionNum) { - break; - } - } - - if (bitSet.cardinality() < regionNum) { - LOGGER.error( - "Failed to create some SchemaRegions or DataRegions on DataNodes. Please check former logs."); - } - } - - private TCreateSchemaRegionReq genCreateSchemaRegionReq( - String storageGroup, TRegionReplicaSet regionReplicaSet) { - TCreateSchemaRegionReq req = new TCreateSchemaRegionReq(); - req.setStorageGroup(storageGroup); - req.setRegionReplicaSet(regionReplicaSet); - return req; - } - - private TCreateDataRegionReq genCreateDataRegionReq( - String storageGroup, TRegionReplicaSet regionReplicaSet, long TTL) { - TCreateDataRegionReq req = new TCreateDataRegionReq(); - req.setStorageGroup(storageGroup); - req.setRegionReplicaSet(regionReplicaSet); - req.setTtl(TTL); - return req; - } - - private ConsensusManager getConsensusManager() { - return configManager.getConsensusManager(); - } - - private NodeManager getNodeManager() { - return configManager.getNodeManager(); - } - - private ClusterSchemaManager getClusterSchemaManager() { - return configManager.getClusterSchemaManager(); - } - - private PartitionManager getPartitionManager() { - return configManager.getPartitionManager(); - } -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/Manager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/Manager.java deleted file mode 100644 index 32020b0575f9..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/Manager.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.manager; - -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.confignode.consensus.request.ConfigRequest; -import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; -import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; -import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; -import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; -import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; -import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; -import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; -import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.write.SetTTLReq; -import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; -import org.apache.iotdb.consensus.common.DataSet; -import org.apache.iotdb.db.mpp.common.schematree.PathPatternTree; - -import java.util.List; - -/** - * a subset of services provided by {@ConfigManager}. For use internally only, passed to Managers, - * services. - */ -public interface Manager { - - /** - * if a service stop - * - * @return true if service stopped - */ - boolean isStopped(); - - /** - * Get DataManager - * - * @return DataNodeManager instance - */ - NodeManager getNodeManager(); - - /** - * Get ConsensusManager - * - * @return ConsensusManager instance - */ - ConsensusManager getConsensusManager(); - - /** - * Get ClusterSchemaManager - * - * @return ClusterSchemaManager instance - */ - ClusterSchemaManager getClusterSchemaManager(); - - /** - * Get PartitionManager - * - * @return PartitionManager instance - */ - PartitionManager getPartitionManager(); - - /** - * Get LoadManager - * - * @return LoadManager instance - */ - LoadManager getLoadManager(); - - /** - * Register DataNode - * - * @return DataNodeConfigurationDataSet - */ - DataSet registerDataNode(RegisterDataNodeReq registerDataNodeReq); - - /** - * Get DataNode info - * - * @return DataNodesInfoDataSet - */ - DataSet getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoReq); - - TSStatus setTTL(SetTTLReq configRequest); - - TSStatus setSchemaReplicationFactor(SetSchemaReplicationFactorReq configRequest); - - TSStatus setDataReplicationFactor(SetDataReplicationFactorReq configRequest); - - TSStatus setTimePartitionInterval(SetTimePartitionIntervalReq configRequest); - - /** - * Count StorageGroups - * - * @return The number of matched StorageGroups - */ - DataSet countMatchedStorageGroups(CountStorageGroupReq countStorageGroupReq); - - /** - * Get StorageGroupSchemas - * - * @return StorageGroupSchemaDataSet - */ - DataSet getMatchedStorageGroupSchemas(GetStorageGroupReq getOrCountStorageGroupReq); - - /** - * Set StorageGroup - * - * @return status - */ - TSStatus setStorageGroup(SetStorageGroupReq setStorageGroupReq); - - /** - * Delete StorageGroup - * - * @param deleteStorageGroupsReq deleteStorageGroupReq - * @return status - */ - TSStatus deleteStorageGroups(List deletedPaths); - - /** - * Get SchemaPartition - * - * @return SchemaPartitionDataSet - */ - DataSet getSchemaPartition(PathPatternTree patternTree); - - /** - * Get or create SchemaPartition - * - * @return SchemaPartitionDataSet - */ - DataSet getOrCreateSchemaPartition(PathPatternTree patternTree); - - /** - * Get DataPartition - * - * @return DataPartitionDataSet - */ - DataSet getDataPartition(GetDataPartitionReq getDataPartitionReq); - - /** - * Get or create DataPartition - * - * @return DataPartitionDataSet - */ - DataSet getOrCreateDataPartition(GetOrCreateDataPartitionReq getOrCreateDataPartitionReq); - - /** - * Operate Permission - * - * @param configRequest AuthorPlan - * @return status - */ - TSStatus operatePermission(ConfigRequest configRequest); - - /** - * Query Permission - * - * @param configRequest AuthorPlan - * @return PermissionInfoDataSet - */ - DataSet queryPermission(ConfigRequest configRequest); - - /** login */ - TSStatus login(String username, String password); - - /** Check User Privileges */ - TSStatus checkUserPrivileges(String username, List paths, int permission); - - /** - * Register ConfigNode when it is first startup - * - * @return TConfigNodeRegisterResp - */ - TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req); - - /** - * Apply ConfigNode when it is first startup - * - * @return status - */ - TSStatus applyConfigNode(ApplyConfigNodeReq applyConfigNodeReq); -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/NodeManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/NodeManager.java index b5d778e8b568..b448d10cca84 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/NodeManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/NodeManager.java @@ -18,51 +18,67 @@ */ package org.apache.iotdb.confignode.manager; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.confignode.client.AsyncDataNodeClientPool; +import org.apache.iotdb.confignode.client.handlers.FlushHandler; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.response.DataNodeConfigurationResp; -import org.apache.iotdb.confignode.consensus.response.DataNodeLocationsResp; +import org.apache.iotdb.confignode.consensus.response.DataNodeInfosResp; import org.apache.iotdb.confignode.persistence.NodeInfo; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; import org.apache.iotdb.confignode.rpc.thrift.TGlobalConfig; import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.locks.ReentrantLock; /** NodeManager manages cluster node addition and removal requests */ public class NodeManager { private static final Logger LOGGER = LoggerFactory.getLogger(NodeManager.class); - private static final NodeInfo nodeInfo = NodeInfo.getInstance(); + private final IManager configManager; + private final NodeInfo nodeInfo; - private final Manager configManager; + private final ReentrantLock removeConfigNodeLock; /** TODO:do some operate after add node or remove node */ private final List listeners = new CopyOnWriteArrayList<>(); - public NodeManager(Manager configManager) { + public NodeManager(IManager configManager, NodeInfo nodeInfo) { this.configManager = configManager; + this.nodeInfo = nodeInfo; + this.removeConfigNodeLock = new ReentrantLock(); } private void setGlobalConfig(DataNodeConfigurationResp dataSet) { // Set TGlobalConfig TGlobalConfig globalConfig = new TGlobalConfig(); - globalConfig.setDataNodeConsensusProtocolClass( - ConfigNodeDescriptor.getInstance().getConf().getDataNodeConsensusProtocolClass()); + globalConfig.setDataRegionConsensusProtocolClass( + ConfigNodeDescriptor.getInstance().getConf().getDataRegionConsensusProtocolClass()); + globalConfig.setSchemaRegionConsensusProtocolClass( + ConfigNodeDescriptor.getInstance().getConf().getSchemaRegionConsensusProtocolClass()); globalConfig.setSeriesPartitionSlotNum( ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum()); globalConfig.setSeriesPartitionExecutorClass( @@ -82,21 +98,25 @@ private void setGlobalConfig(DataNodeConfigurationResp dataSet) { public DataSet registerDataNode(RegisterDataNodeReq req) { DataNodeConfigurationResp dataSet = new DataNodeConfigurationResp(); - if (NodeInfo.getInstance().containsValue(req.getLocation())) { + if (nodeInfo.isOnlineDataNode(req.getInfo().getLocation())) { // Reset client - AsyncDataNodeClientPool.getInstance().resetClient(req.getLocation().getInternalEndPoint()); + AsyncDataNodeClientPool.getInstance() + .resetClient(req.getInfo().getLocation().getInternalEndPoint()); TSStatus status = new TSStatus(TSStatusCode.DATANODE_ALREADY_REGISTERED.getStatusCode()); status.setMessage("DataNode already registered."); dataSet.setStatus(status); } else { // Persist DataNodeInfo - req.getLocation().setDataNodeId(NodeInfo.getInstance().generateNextDataNodeId()); + req.getInfo().getLocation().setDataNodeId(nodeInfo.generateNextNodeId()); ConsensusWriteResponse resp = getConsensusManager().write(req); dataSet.setStatus(resp.getStatus()); + + // Adjust the maximum RegionGroup number of each StorageGroup + getClusterSchemaManager().adjustMaxRegionGroupCount(); } - dataSet.setDataNodeId(req.getLocation().getDataNodeId()); + dataSet.setDataNodeId(req.getInfo().getLocation().getDataNodeId()); dataSet.setConfigNodeList(nodeInfo.getOnlineConfigNodes()); setGlobalConfig(dataSet); return dataSet; @@ -109,31 +129,37 @@ public DataSet registerDataNode(RegisterDataNodeReq req) { * @return The specific DataNode's info or all DataNode info if dataNodeId in * QueryDataNodeInfoPlan is -1 */ - public DataNodeLocationsResp getDataNodeInfo(GetDataNodeInfoReq req) { - return (DataNodeLocationsResp) getConsensusManager().read(req).getDataset(); + public DataNodeInfosResp getDataNodeInfo(GetDataNodeInfoReq req) { + return (DataNodeInfosResp) getConsensusManager().read(req).getDataset(); } + /** + * Only leader use this interface + * + * @return The number of online DataNodes + */ public int getOnlineDataNodeCount() { return nodeInfo.getOnlineDataNodeCount(); } /** - * Only leader use this interface. + * Only leader use this interface * - * @return all online DataNodes + * @return The number of total cpu cores in online DataNodes */ - public List getOnlineDataNodes() { - return nodeInfo.getOnlineDataNodes(); + public int getTotalCpuCoreCount() { + return nodeInfo.getTotalCpuCoreCount(); } /** - * Only leader use this interface. + * Only leader use this interface * - * @param dataNodeId the specific DataNodeId - * @return the specific DataNodeLocation + * @param dataNodeId Specific DataNodeId + * @return All online DataNodes if dataNodeId equals -1. And return the specific DataNode + * otherwise. */ - public TDataNodeLocation getOnlineDataNode(int dataNodeId) { - return nodeInfo.getOnlineDataNode(dataNodeId); + public List getOnlineDataNodes(int dataNodeId) { + return nodeInfo.getOnlineDataNodes(dataNodeId); } /** @@ -149,17 +175,16 @@ public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) { // Return PartitionRegionId resp.setPartitionRegionId( - ConsensusGroupId.convertToTConsensusGroupId(getConsensusManager().getConsensusGroupId())); + getConsensusManager().getConsensusGroupId().convertToTConsensusGroupId()); - // Return online ConfigNodes resp.setConfigNodeList(nodeInfo.getOnlineConfigNodes()); - resp.getConfigNodeList().add(req.getConfigNodeLocation()); - return resp; } public TSStatus applyConfigNode(ApplyConfigNodeReq applyConfigNodeReq) { if (getConsensusManager().addConfigNodePeer(applyConfigNodeReq)) { + // Generate new ConfigNode's index + applyConfigNodeReq.getConfigNodeLocation().setConfigNodeId(nodeInfo.generateNextNodeId()); return getConsensusManager().write(applyConfigNodeReq).getStatus(); } else { return new TSStatus(TSStatusCode.APPLY_CONFIGNODE_FAILED.getStatusCode()) @@ -167,10 +192,93 @@ public TSStatus applyConfigNode(ApplyConfigNodeReq applyConfigNodeReq) { } } + public void addMetrics() { + nodeInfo.addMetrics(); + } + + public TSStatus removeConfigNode(RemoveConfigNodeReq removeConfigNodeReq) { + if (removeConfigNodeLock.tryLock()) { + try { + // Check ConfigNodes number + if (getOnlineConfigNodes().size() <= 1) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage( + "Remove ConfigNode failed because there is only one ConfigNode in current Cluster."); + } + + // Check whether the onlineConfigNodes contain the ConfigNode to be removed. + if (!getOnlineConfigNodes().contains(removeConfigNodeReq.getConfigNodeLocation())) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage( + "Remove ConfigNode failed because the ConfigNode not in current Cluster."); + } + + // Check whether the remove ConfigNode is leader + TConfigNodeLocation leader = getConsensusManager().getLeader(); + if (leader == null) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage( + "Remove ConfigNode failed because the ConfigNodeGroup is on leader election, please retry."); + } + if (leader + .getInternalEndPoint() + .equals(removeConfigNodeReq.getConfigNodeLocation().getInternalEndPoint())) { + // transfer leader + return transferLeader(removeConfigNodeReq, getConsensusManager().getConsensusGroupId()); + } + + // Execute removePeer + if (getConsensusManager().removeConfigNodePeer(removeConfigNodeReq)) { + return getConsensusManager().write(removeConfigNodeReq).getStatus(); + } else { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage( + "Remove ConfigNode failed because update ConsensusGroup peer information failed."); + } + } finally { + removeConfigNodeLock.unlock(); + } + } else { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage("A ConfigNode is removing. Please wait or try again."); + } + } + + private TSStatus transferLeader( + RemoveConfigNodeReq removeConfigNodeReq, ConsensusGroupId groupId) { + TConfigNodeLocation newLeader = + getOnlineConfigNodes().stream() + .filter(e -> !e.equals(removeConfigNodeReq.getConfigNodeLocation())) + .findAny() + .get(); + ConsensusGenericResponse resp = + getConsensusManager() + .getConsensusImpl() + .transferLeader(groupId, new Peer(groupId, newLeader.getConsensusEndPoint())); + if (!resp.isSuccess()) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage("Remove ConfigNode failed because transfer ConfigNode leader failed."); + } + return new TSStatus(TSStatusCode.NEED_REDIRECTION.getStatusCode()) + .setRedirectNode(newLeader.getInternalEndPoint()) + .setMessage( + "The ConfigNode to be removed is leader, already transfer Leader to " + + newLeader + + "."); + } + + public List getOnlineConfigNodes() { + return nodeInfo.getOnlineConfigNodes(); + } + private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } + private ClusterSchemaManager getClusterSchemaManager() { + return configManager.getClusterSchemaManager(); + } + public void registerListener(final ChangeServerListener serverListener) { listeners.add(serverListener); } @@ -232,4 +340,26 @@ public interface ChangeServerListener { */ void removeDataNode(final TDataNodeLocation dataNodeInfo); } + + public List flush(TFlushReq req) { + List onlineDataNodes = + configManager.getNodeManager().getOnlineDataNodes(req.dataNodeId); + List dataNodeResponseStatus = + Collections.synchronizedList(new ArrayList<>(onlineDataNodes.size())); + CountDownLatch countDownLatch = new CountDownLatch(onlineDataNodes.size()); + for (TDataNodeInfo dataNodeInfo : onlineDataNodes) { + AsyncDataNodeClientPool.getInstance() + .flush( + dataNodeInfo.getLocation().getInternalEndPoint(), + req, + new FlushHandler(dataNodeInfo.getLocation(), countDownLatch, dataNodeResponseStatus)); + } + try { + countDownLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOGGER.error("NodeManager was interrupted during flushing on data nodes", e); + } + return dataNodeResponseStatus; + } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java index e9ec2470070f..c3868cccc022 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PartitionManager.java @@ -24,23 +24,36 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; +import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; import org.apache.iotdb.commons.partition.executor.SeriesPartitionExecutor; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.confignode.client.SyncDataNodeClientPool; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetNodePathsPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; -import org.apache.iotdb.confignode.consensus.request.write.DeleteRegionsReq; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.response.DataPartitionResp; +import org.apache.iotdb.confignode.consensus.response.SchemaNodeManagementResp; import org.apache.iotdb.confignode.consensus.response.SchemaPartitionResp; import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException; -import org.apache.iotdb.confignode.persistence.PartitionInfo; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; +import org.apache.iotdb.confignode.exception.TimeoutException; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; +import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,229 +62,453 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Random; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; /** The PartitionManager Manages cluster PartitionTable read and write requests. */ public class PartitionManager { private static final Logger LOGGER = LoggerFactory.getLogger(PartitionManager.class); - private static final PartitionInfo partitionInfo = PartitionInfo.getInstance(); - - private final Manager configManager; + private final IManager configManager; + private final PartitionInfo partitionInfo; + private static final int REGION_CLEANER_WORK_INTERVAL = 300; + private static final int REGION_CLEANER_WORK_INITIAL_DELAY = 10; private SeriesPartitionExecutor executor; + private final ScheduledExecutorService regionCleaner; - public PartitionManager(Manager configManager) { + public PartitionManager(IManager configManager, PartitionInfo partitionInfo) { this.configManager = configManager; + this.partitionInfo = partitionInfo; + this.regionCleaner = + IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor("IoTDB-Region-Cleaner"); + ScheduledExecutorUtil.safelyScheduleAtFixedRate( + regionCleaner, + this::clearDeletedRegions, + REGION_CLEANER_WORK_INITIAL_DELAY, + REGION_CLEANER_WORK_INTERVAL, + TimeUnit.SECONDS); setSeriesPartitionExecutor(); } + /** Construct SeriesPartitionExecutor by iotdb-confignode.propertis */ + private void setSeriesPartitionExecutor() { + ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); + this.executor = + SeriesPartitionExecutor.getSeriesPartitionExecutor( + conf.getSeriesPartitionExecutorClass(), conf.getSeriesPartitionSlotNum()); + } + + // ====================================================== + // Consensus read/write interfaces + // ====================================================== + /** - * Get SchemaPartition + * Thread-safely get SchemaPartition * - * @param physicalPlan SchemaPartitionPlan with partitionSlotsMap + * @param req SchemaPartitionPlan with partitionSlotsMap * @return SchemaPartitionDataSet that contains only existing SchemaPartition */ - public DataSet getSchemaPartition(GetSchemaPartitionReq physicalPlan) { - SchemaPartitionResp schemaPartitionResp; - ConsensusReadResponse consensusReadResponse = getConsensusManager().read(physicalPlan); - schemaPartitionResp = (SchemaPartitionResp) consensusReadResponse.getDataset(); - return schemaPartitionResp; + public DataSet getSchemaPartition(GetSchemaPartitionReq req) { + return getConsensusManager().read(req).getDataset(); + } + + /** + * Thread-safely get DataPartition + * + * @param req DataPartitionPlan with Map>> + * @return DataPartitionDataSet that contains only existing DataPartition + */ + public DataSet getDataPartition(GetDataPartitionReq req) { + return getConsensusManager().read(req).getDataset(); } /** * Get SchemaPartition and create a new one if it does not exist * - * @param physicalPlan SchemaPartitionPlan with partitionSlotsMap + * @param req SchemaPartitionPlan with partitionSlotsMap * @return SchemaPartitionResp with DataPartition and TSStatus. SUCCESS_STATUS if all process - * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. + * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. TIME_OUT + * if waiting other threads to create Regions for too long. STORAGE_GROUP_NOT_EXIST if some + * StorageGroup doesn't exist. */ - public DataSet getOrCreateSchemaPartition(GetOrCreateSchemaPartitionReq physicalPlan) { - Map> noAssignedSchemaPartitionSlots = - partitionInfo.filterNoAssignedSchemaPartitionSlots(physicalPlan.getPartitionSlotsMap()); + public DataSet getOrCreateSchemaPartition(GetOrCreateSchemaPartitionReq req) { + // After all the SchemaPartitions are allocated, + // all the read requests about SchemaPartitionTable are parallel. + SchemaPartitionResp resp = (SchemaPartitionResp) getSchemaPartition(req); + if (resp.isAllPartitionsExist()) { + return resp; + } - if (noAssignedSchemaPartitionSlots.size() > 0) { + // Otherwise, fist ensure that each StorageGroup has at least one SchemaRegion. + // This block of code is still parallel and concurrent safe. + // Thus, we can prepare the SchemaRegions with maximum efficiency. + TSStatus status = + initializeRegionsIfNecessary( + new ArrayList<>(req.getPartitionSlotsMap().keySet()), TConsensusGroupType.SchemaRegion); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setStatus(status); + return resp; + } - // Make sure each StorageGroup has at least one SchemaRegion - try { - checkAndAllocateRegionsIfNecessary( - new ArrayList<>(noAssignedSchemaPartitionSlots.keySet()), - TConsensusGroupType.SchemaRegion); - } catch (NotEnoughDataNodeException e) { - SchemaPartitionResp resp = new SchemaPartitionResp(); - resp.setStatus(new TSStatus(TSStatusCode.NOT_ENOUGH_DATA_NODE.getStatusCode())); - return resp; + // Next, we serialize the creation process of SchemaPartitions to + // ensure that each SchemaPartition is created by a unique CreateSchemaPartitionReq. + // Because the number of SchemaPartitions per storage group is limited by the number of + // SeriesPartitionSlots, + // the number of serialized CreateSchemaPartitionReqs is acceptable. + synchronized (this) { + // Filter unassigned SchemaPartitionSlots + Map> unassignedSchemaPartitionSlots = + partitionInfo.filterUnassignedSchemaPartitionSlots(req.getPartitionSlotsMap()); + if (unassignedSchemaPartitionSlots.size() > 0) { + // Allocate SchemaPartitions + Map assignedSchemaPartition = + getLoadManager().allocateSchemaPartition(unassignedSchemaPartitionSlots); + // Cache allocating result + CreateSchemaPartitionReq createPlan = new CreateSchemaPartitionReq(); + createPlan.setAssignedSchemaPartition(assignedSchemaPartition); + getConsensusManager().write(createPlan); } + } + + // Finally, if some StorageGroups own too many slots, extend SchemaRegion for them. + extendRegionsIfNecessary( + new ArrayList<>(req.getPartitionSlotsMap().keySet()), TConsensusGroupType.SchemaRegion); - // Allocate SchemaPartition - Map> assignedSchemaPartition = - allocateSchemaPartition(noAssignedSchemaPartitionSlots); + return getSchemaPartition(req); + } + + /** + * Get DataPartition and create a new one if it does not exist + * + * @param req DataPartitionPlan with Map>> + * @return DataPartitionResp with DataPartition and TSStatus. SUCCESS_STATUS if all process + * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. TIME_OUT + * if waiting other threads to create Regions for too long. STORAGE_GROUP_NOT_EXIST if some + * StorageGroup doesn't exist. + */ + public DataSet getOrCreateDataPartition(GetOrCreateDataPartitionReq req) { + // After all the SchemaPartitions are allocated, + // all the read requests about SchemaPartitionTable are parallel. + DataPartitionResp resp = (DataPartitionResp) getDataPartition(req); + if (resp.isAllPartitionsExist()) { + return resp; + } - // Persist SchemaPartition - CreateSchemaPartitionReq createPlan = new CreateSchemaPartitionReq(); - createPlan.setAssignedSchemaPartition(assignedSchemaPartition); - getConsensusManager().write(createPlan); + // Otherwise, fist ensure that each StorageGroup has at least one DataRegion. + // This block of code is still parallel and concurrent safe. + // Thus, we can prepare the DataRegions with maximum efficiency. + TSStatus status = + initializeRegionsIfNecessary( + new ArrayList<>(req.getPartitionSlotsMap().keySet()), TConsensusGroupType.DataRegion); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + resp.setStatus(status); + return resp; + } - // TODO: Allocate more Regions if necessary + // Next, we serialize the creation process of DataPartitions to + // ensure that each DataPartition is created by a unique CreateDataPartitionReq. + // Because the number of DataPartitions per storage group per day is limited by the number of + // SeriesPartitionSlots, + // the number of serialized CreateDataPartitionReqs is acceptable. + synchronized (this) { + // Filter unassigned DataPartitionSlots + Map>> + unassignedDataPartitionSlots = + partitionInfo.filterUnassignedDataPartitionSlots(req.getPartitionSlotsMap()); + if (unassignedDataPartitionSlots.size() > 0) { + // Allocate DataPartitions + Map assignedDataPartition = + getLoadManager().allocateDataPartition(unassignedDataPartitionSlots); + // Cache allocating result + CreateDataPartitionReq createPlan = new CreateDataPartitionReq(); + createPlan.setAssignedDataPartition(assignedDataPartition); + getConsensusManager().write(createPlan); + } } - return getSchemaPartition(physicalPlan); + // Finally, if some StorageGroups own too many slots, extend DataRegion for them. + extendRegionsIfNecessary( + new ArrayList<>(req.getPartitionSlotsMap().keySet()), TConsensusGroupType.DataRegion); + + return getDataPartition(req); + } + + // ====================================================== + // Leader scheduling interfaces + // ====================================================== + + /** Handle the exceptions from initializeRegions */ + private TSStatus initializeRegionsIfNecessary( + List storageGroups, TConsensusGroupType consensusGroupType) { + try { + initializeRegions(storageGroups, consensusGroupType); + } catch (NotEnoughDataNodeException e) { + return new TSStatus(TSStatusCode.NOT_ENOUGH_DATA_NODE.getStatusCode()) + .setMessage( + "ConfigNode failed to allocate Partition because there are not enough DataNodes"); + } catch (TimeoutException e) { + return new TSStatus(TSStatusCode.TIME_OUT.getStatusCode()) + .setMessage( + "ConfigNode failed to allocate Partition because waiting for another thread's Region allocation timeout."); + } catch (StorageGroupNotExistsException e) { + return new TSStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()) + .setMessage( + "ConfigNode failed to allocate DataPartition because some StorageGroup doesn't exist."); + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } /** - * TODO: allocate schema partition by LoadManager + * Initialize one Region for each StorageGroup who doesn't have any. * - * @param noAssignedSchemaPartitionSlotsMap Map> - * @return assign result, Map> + * @param storageGroups List + * @param consensusGroupType SchemaRegion or DataRegion + * @throws NotEnoughDataNodeException When the number of online DataNodes are too small to + * allocate Regions + * @throws TimeoutException When waiting other threads to allocate Regions for too long + * @throws StorageGroupNotExistsException When some StorageGroups don't exist */ - private Map> allocateSchemaPartition( - Map> noAssignedSchemaPartitionSlotsMap) { - Map> result = new HashMap<>(); - - for (String storageGroup : noAssignedSchemaPartitionSlotsMap.keySet()) { - List noAssignedPartitionSlots = - noAssignedSchemaPartitionSlotsMap.get(storageGroup); - List schemaRegionReplicaSets = - partitionInfo.getRegionReplicaSets( - getClusterSchemaManager() - .getRegionGroupIds(storageGroup, TConsensusGroupType.SchemaRegion)); - Random random = new Random(); - - Map allocateResult = new HashMap<>(); - noAssignedPartitionSlots.forEach( - seriesPartitionSlot -> - allocateResult.put( - seriesPartitionSlot, - schemaRegionReplicaSets.get(random.nextInt(schemaRegionReplicaSets.size())))); - - result.put(storageGroup, allocateResult); + private void initializeRegions(List storageGroups, TConsensusGroupType consensusGroupType) + throws NotEnoughDataNodeException, TimeoutException, StorageGroupNotExistsException { + + int leastDataNode = 0; + Map unreadyStorageGroupMap = new HashMap<>(); + for (String storageGroup : storageGroups) { + if (getRegionCount(storageGroup, consensusGroupType) == 0) { + // Update leastDataNode + TStorageGroupSchema storageGroupSchema = + getClusterSchemaManager().getStorageGroupSchemaByName(storageGroup); + switch (consensusGroupType) { + case SchemaRegion: + leastDataNode = + Math.max(leastDataNode, storageGroupSchema.getSchemaReplicationFactor()); + break; + case DataRegion: + leastDataNode = Math.max(leastDataNode, storageGroupSchema.getDataReplicationFactor()); + } + + // Recording StorageGroups without Region + unreadyStorageGroupMap.put(storageGroup, 1); + } + } + if (getNodeManager().getOnlineDataNodeCount() < leastDataNode) { + // Make sure DataNodes enough + throw new NotEnoughDataNodeException(); } - return result; + doOrWaitRegionCreation(unreadyStorageGroupMap, consensusGroupType); + } + + /** Handle the exceptions from extendRegions */ + private void extendRegionsIfNecessary( + List storageGroups, TConsensusGroupType consensusGroupType) { + try { + extendRegions(storageGroups, consensusGroupType); + } catch (NotEnoughDataNodeException e) { + LOGGER.error("ConfigNode failed to extend Region because there are not enough DataNodes"); + } catch (TimeoutException e) { + LOGGER.error( + "ConfigNode failed to extend Region because waiting for another thread's Region allocation timeout."); + } catch (StorageGroupNotExistsException e) { + LOGGER.error("ConfigNode failed to extend Region because some StorageGroup doesn't exist."); + } } /** - * Get DataPartition + * Allocate more Regions to StorageGroups who have too many slots. * - * @param physicalPlan DataPartitionPlan with Map>> - * @return DataPartitionDataSet that contains only existing DataPartition + * @param storageGroups List + * @param consensusGroupType SchemaRegion or DataRegion + * @throws StorageGroupNotExistsException When some StorageGroups don't exist + * @throws NotEnoughDataNodeException When the number of online DataNodes are too small to + * allocate Regions + * @throws TimeoutException When waiting other threads to allocate Regions for too long */ - public DataSet getDataPartition(GetDataPartitionReq physicalPlan) { - DataPartitionResp dataPartitionResp; - ConsensusReadResponse consensusReadResponse = getConsensusManager().read(physicalPlan); - dataPartitionResp = (DataPartitionResp) consensusReadResponse.getDataset(); - return dataPartitionResp; + private void extendRegions(List storageGroups, TConsensusGroupType consensusGroupType) + throws StorageGroupNotExistsException, NotEnoughDataNodeException, TimeoutException { + // Map + Map filledStorageGroupMap = new HashMap<>(); + for (String storageGroup : storageGroups) { + float regionCount = partitionInfo.getRegionCount(storageGroup, consensusGroupType); + float slotCount = partitionInfo.getSlotCount(storageGroup); + float maxRegionCount = + getClusterSchemaManager().getMaxRegionGroupCount(storageGroup, consensusGroupType); + float maxSlotCount = ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum(); + + // Need extension + if (regionCount < maxRegionCount && slotCount / regionCount > maxSlotCount / maxRegionCount) { + // The delta is equal to the smallest integer solution that satisfies the inequality: + // slotCount / (regionCount + delta) < maxSlotCount / maxRegionCount + int delta = + Math.min( + (int) (maxRegionCount - regionCount), + Math.max( + 1, (int) Math.ceil(slotCount * maxRegionCount / maxSlotCount - regionCount))); + filledStorageGroupMap.put(storageGroup, delta); + } + } + + doOrWaitRegionCreation(filledStorageGroupMap, consensusGroupType); } /** - * Get DataPartition and create a new one if it does not exist + * Do Region creation for those StorageGroups who get the allocation particle, for those who + * doesn't, waiting until other threads finished the creation process. * - * @param physicalPlan DataPartitionPlan with Map>> - * @return DataPartitionResp with DataPartition and TSStatus. SUCCESS_STATUS if all process - * finish. NOT_ENOUGH_DATA_NODE if the DataNodes is not enough to create new Regions. + * @param allotmentMap Map + * @param consensusGroupType SchemaRegion or DataRegion + * @throws NotEnoughDataNodeException When the number of online DataNodes are too small to * + * allocate Regions + * @throws StorageGroupNotExistsException When some StorageGroups don't exist + * @throws TimeoutException When waiting other threads to allocate Regions for too long */ - public DataSet getOrCreateDataPartition(GetOrCreateDataPartitionReq physicalPlan) { - Map>> noAssignedDataPartitionSlots = - partitionInfo.filterNoAssignedDataPartitionSlots(physicalPlan.getPartitionSlotsMap()); + private void doOrWaitRegionCreation( + Map allotmentMap, TConsensusGroupType consensusGroupType) + throws NotEnoughDataNodeException, StorageGroupNotExistsException, TimeoutException { + // StorageGroups who get the allocation particle + Map allocateMap = new HashMap<>(); + // StorageGroups who doesn't get the allocation particle + List waitingList = new ArrayList<>(); + for (String storageGroup : allotmentMap.keySet()) { + // Try to get the allocation particle + if (partitionInfo.contendRegionAllocationParticle(storageGroup, consensusGroupType)) { + // Initialize one Region + allocateMap.put(storageGroup, allotmentMap.get(storageGroup)); + } else { + waitingList.add(storageGroup); + } + } - if (noAssignedDataPartitionSlots.size() > 0) { + // TODO: Use procedure to protect the following process + // Do Region allocation and creation for those StorageGroups who get the particle + getLoadManager().doRegionCreation(allocateMap, consensusGroupType); + // Put back particles after that + for (String storageGroup : allocateMap.keySet()) { + partitionInfo.putBackRegionAllocationParticle(storageGroup, consensusGroupType); + } + + // Waiting Region creation for those StorageGroups who don't get the particle + waitRegionCreation(waitingList, consensusGroupType); + } + + /** Waiting Region creation for those StorageGroups who don't get the particle */ + private void waitRegionCreation(List waitingList, TConsensusGroupType consensusGroupType) + throws TimeoutException { + for (int retry = 0; retry < 100; retry++) { + boolean allocationFinished = true; + for (String storageGroup : waitingList) { + if (!partitionInfo.getRegionAllocationParticle(storageGroup, consensusGroupType)) { + // If a StorageGroup's Region allocation particle doesn't return, + // the Region creation process is not complete + allocationFinished = false; + break; + } + } + if (allocationFinished) { + return; + } - // Make sure each StorageGroup has at least one DataRegion try { - checkAndAllocateRegionsIfNecessary( - new ArrayList<>(noAssignedDataPartitionSlots.keySet()), TConsensusGroupType.DataRegion); - } catch (NotEnoughDataNodeException e) { - DataPartitionResp resp = new DataPartitionResp(); - resp.setStatus(new TSStatus(TSStatusCode.NOT_ENOUGH_DATA_NODE.getStatusCode())); - return resp; + // Sleep 200ms to wait Region allocation + TimeUnit.MILLISECONDS.sleep(200); + } catch (InterruptedException e) { + LOGGER.warn("The PartitionManager is interrupted.", e); } + } + throw new TimeoutException(""); + } - // Allocate DataPartition - Map>>> - assignedDataPartition = allocateDataPartition(noAssignedDataPartitionSlots); + /** + * Only leader use this interface + * + * @return All Regions' RegionReplicaSet + */ + public List getAllReplicaSets() { + return partitionInfo.getAllReplicaSets(); + } - // Persist DataPartition - CreateDataPartitionReq createPlan = new CreateDataPartitionReq(); - createPlan.setAssignedDataPartition(assignedDataPartition); - getConsensusManager().write(createPlan); + /** + * Only leader use this interface. Get the number of Regions currently owned by the specific + * StorageGroup + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + * @return Number of Regions currently owned by the specific StorageGroup + * @throws StorageGroupNotExistsException When the specific StorageGroup doesn't exist + */ + public int getRegionCount(String storageGroup, TConsensusGroupType type) + throws StorageGroupNotExistsException { + return partitionInfo.getRegionCount(storageGroup, type); + } - // TODO: Allocate more Regions if necessary - } + /** + * Only leader use this interface. + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + * @return The specific StorageGroup's Regions that sorted by the number of allocated slots + */ + public List> getSortedRegionSlotsCounter( + String storageGroup, TConsensusGroupType type) { + return partitionInfo.getSortedRegionSlotsCounter(storageGroup, type); + } - return getDataPartition(physicalPlan); + /** + * Only leader use this interface + * + * @return the next RegionGroupId + */ + public int generateNextRegionGroupId() { + return partitionInfo.generateNextRegionGroupId(); } /** - * TODO: allocate by LoadManager + * GetNodePathsPartition * - * @param noAssignedDataPartitionSlotsMap Map>> - * @return assign result, Map>>> + * @param physicalPlan GetNodesPathsPartitionReq + * @return SchemaNodeManagementPartitionDataSet that contains only existing matched + * SchemaPartition and matched child paths aboveMtree */ - private Map>>> - allocateDataPartition( - Map>> - noAssignedDataPartitionSlotsMap) { - - Map>>> - result = new HashMap<>(); - - for (String storageGroup : noAssignedDataPartitionSlotsMap.keySet()) { - Map> noAssignedPartitionSlotsMap = - noAssignedDataPartitionSlotsMap.get(storageGroup); - List dataRegionEndPoints = - partitionInfo.getRegionReplicaSets( - getClusterSchemaManager() - .getRegionGroupIds(storageGroup, TConsensusGroupType.DataRegion)); - Random random = new Random(); - - Map>> allocateResult = - new HashMap<>(); - for (Map.Entry> seriesPartitionEntry : - noAssignedPartitionSlotsMap.entrySet()) { - allocateResult.put(seriesPartitionEntry.getKey(), new HashMap<>()); - for (TTimePartitionSlot timePartitionSlot : seriesPartitionEntry.getValue()) { - allocateResult - .get(seriesPartitionEntry.getKey()) - .computeIfAbsent(timePartitionSlot, key -> new ArrayList<>()) - .add(dataRegionEndPoints.get(random.nextInt(dataRegionEndPoints.size()))); - } - } + public DataSet getNodePathsPartition(GetNodePathsPartitionReq physicalPlan) { + SchemaNodeManagementResp schemaNodeManagementResp; + ConsensusReadResponse consensusReadResponse = getConsensusManager().read(physicalPlan); + schemaNodeManagementResp = (SchemaNodeManagementResp) consensusReadResponse.getDataset(); + return schemaNodeManagementResp; + } - result.put(storageGroup, allocateResult); - } - return result; + public void preDeleteStorageGroup( + String storageGroup, PreDeleteStorageGroupReq.PreDeleteType preDeleteType) { + final PreDeleteStorageGroupReq preDeleteStorageGroupReq = + new PreDeleteStorageGroupReq(storageGroup, preDeleteType); + getConsensusManager().write(preDeleteStorageGroupReq); } - private void checkAndAllocateRegionsIfNecessary( - List storageGroups, TConsensusGroupType consensusGroupType) - throws NotEnoughDataNodeException { - List storageGroupWithoutRegion = new ArrayList<>(); - for (String storageGroup : storageGroups) { - List groupIds = - getClusterSchemaManager().getRegionGroupIds(storageGroup, consensusGroupType); - if (groupIds.size() == 0) { - storageGroupWithoutRegion.add(storageGroup); + /** + * Called by {@link PartitionManager#regionCleaner} Delete regions of logical deleted storage + * groups periodically. + */ + public void clearDeletedRegions() { + if (getConsensusManager().isLeader()) { + final Set deletedRegionSet = partitionInfo.getDeletedRegionSet(); + if (!deletedRegionSet.isEmpty()) { + LOGGER.info( + "DELETE REGIONS {} START", + deletedRegionSet.stream() + .map(TRegionReplicaSet::getRegionId) + .collect(Collectors.toList())); + SyncDataNodeClientPool.getInstance().deleteRegions(deletedRegionSet); } } - getLoadManager().allocateAndCreateRegions(storageGroupWithoutRegion, consensusGroupType); } - /** Get all allocated RegionReplicaSets */ - public List getAllocatedRegions() { - return partitionInfo.getAllocatedRegions(); - } - - /** Construct SeriesPartitionExecutor by iotdb-confignode.propertis */ - private void setSeriesPartitionExecutor() { - ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); - this.executor = - SeriesPartitionExecutor.getSeriesPartitionExecutor( - conf.getSeriesPartitionExecutorClass(), conf.getSeriesPartitionSlotNum()); + public void addMetrics() { + partitionInfo.addMetrics(); } /** @@ -284,19 +521,22 @@ public TSeriesPartitionSlot getSeriesPartitionSlot(String devicePath) { return executor.getSeriesPartitionSlot(devicePath); } - /** - * Only leader use this interface. - * - * @return the next RegionGroupId - */ - public int generateNextRegionGroupId() { - return partitionInfo.generateNextRegionGroupId(); + public DataSet getRegionInfoList(GetRegionInfoListReq req) { + return getConsensusManager().read(req).getDataset(); + } + + public ScheduledExecutorService getRegionCleaner() { + return regionCleaner; } private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } + private NodeManager getNodeManager() { + return configManager.getNodeManager(); + } + private ClusterSchemaManager getClusterSchemaManager() { return configManager.getClusterSchemaManager(); } @@ -304,8 +544,4 @@ private ClusterSchemaManager getClusterSchemaManager() { private LoadManager getLoadManager() { return configManager.getLoadManager(); } - - public TSStatus deleteRegions(DeleteRegionsReq deleteRegionsReq) { - return getConsensusManager().write(deleteRegionsReq).getStatus(); - } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java index 61ef6a614a5d..0d682a570f8b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/PermissionManager.java @@ -19,20 +19,34 @@ package org.apache.iotdb.confignode.manager; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.client.SyncDataNodeClientPool; +import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; import org.apache.iotdb.confignode.consensus.response.PermissionInfoResp; import org.apache.iotdb.confignode.persistence.AuthorInfo; +import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidatePermissionCacheReq; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.util.List; /** manager permission query and operation */ public class PermissionManager { - private final Manager configManager; + private static final Logger logger = LoggerFactory.getLogger(PermissionManager.class); + + private final ConfigManager configManager; + private final AuthorInfo authorInfo; - public PermissionManager(Manager configManager) { + public PermissionManager(ConfigManager configManager, AuthorInfo authorInfo) { this.configManager = configManager; + this.authorInfo = authorInfo; } /** @@ -42,7 +56,18 @@ public PermissionManager(Manager configManager) { * @return TSStatus */ public TSStatus operatePermission(AuthorReq authorReq) { - return getConsensusManager().write(authorReq).getStatus(); + TSStatus tsStatus; + // If the permissions change, clear the cache content affected by the operation + if (authorReq.getAuthorType() == ConfigRequestType.CreateUser + || authorReq.getAuthorType() == ConfigRequestType.CreateRole) { + tsStatus = getConsensusManager().write(authorReq).getStatus(); + } else { + tsStatus = invalidateCache(authorReq.getUserName(), authorReq.getRoleName()); + if (tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + tsStatus = getConsensusManager().write(authorReq).getStatus(); + } + } + return tsStatus; } /** @@ -59,11 +84,33 @@ private ConsensusManager getConsensusManager() { return configManager.getConsensusManager(); } - public TSStatus login(String username, String password) { - return AuthorInfo.getInstance().login(username, password); + public TPermissionInfoResp login(String username, String password) { + return authorInfo.login(username, password); + } + + public TPermissionInfoResp checkUserPrivileges( + String username, List paths, int permission) { + return authorInfo.checkUserPrivileges(username, paths, permission); } - public TSStatus checkUserPrivileges(String username, List paths, int permission) { - return AuthorInfo.getInstance().checkUserPrivileges(username, paths, permission); + /** + * When the permission information of a user or role is changed will clear all datanode + * permissions related to the user or role + */ + public TSStatus invalidateCache(String username, String roleName) { + List allDataNodes = configManager.getNodeManager().getOnlineDataNodes(-1); + TInvalidatePermissionCacheReq req = new TInvalidatePermissionCacheReq(); + TSStatus status; + req.setUsername(username); + req.setRoleName(roleName); + for (TDataNodeInfo dataNodeInfo : allDataNodes) { + status = + SyncDataNodeClientPool.getInstance() + .invalidatePermissionCache(dataNodeInfo.getLocation().getInternalEndPoint(), req); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + } + return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java index d120a9ca04b4..3addeddb5d66 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/ProcedureManager.java @@ -21,18 +21,21 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.utils.StatusUtils; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.procedure.ConfigProcedureStore; -import org.apache.iotdb.confignode.procedure.DeleteStorageGroupProcedure; +import org.apache.iotdb.confignode.persistence.ProcedureInfo; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.ProcedureExecutor; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.impl.AddConfigNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.DeleteStorageGroupProcedure; +import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; +import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; +import org.apache.iotdb.confignode.procedure.store.ConfigProcedureStore; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; +import org.apache.iotdb.confignode.procedure.store.ProcedureStore; +import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.ProcedureExecutor; -import org.apache.iotdb.procedure.scheduler.ProcedureScheduler; -import org.apache.iotdb.procedure.scheduler.SimpleProcedureScheduler; -import org.apache.iotdb.procedure.store.IProcedureStore; -import org.apache.iotdb.procedure.store.ProcedureStore; import org.apache.iotdb.rpc.RpcUtils; import org.slf4j.Logger; @@ -44,31 +47,35 @@ public class ProcedureManager { private static final Logger LOGGER = LoggerFactory.getLogger(ProcedureManager.class); + + private static final ConfigNodeConfig CONFIG_NODE_CONFIG = + ConfigNodeDescriptor.getInstance().getConf(); + private static final int procedureWaitTimeOut = 30; private static final int procedureWaitRetryTimeout = 250; - private final ConfigManager configNodeManager; + + private final ConfigManager configManager; private ProcedureExecutor executor; private ProcedureScheduler scheduler; private IProcedureStore store; private ConfigNodeProcedureEnv env; - private ConfigNodeConf configNodeConf = ConfigNodeDescriptor.getInstance().getConf(); - public ProcedureManager(ConfigManager configManager) { - this.configNodeManager = configManager; + public ProcedureManager(ConfigManager configManager, ProcedureInfo procedureInfo) { + this.configManager = configManager; this.scheduler = new SimpleProcedureScheduler(); - this.store = new ConfigProcedureStore(configManager); - this.env = new ConfigNodeProcedureEnv(configManager); + this.store = new ConfigProcedureStore(configManager, procedureInfo); + this.env = new ConfigNodeProcedureEnv(configManager, scheduler); this.executor = new ProcedureExecutor<>(env, store, scheduler); } public void shiftExecutor(boolean running) { if (running) { if (!executor.isRunning()) { - executor.init(configNodeConf.getSchemaReplicationFactor()); + executor.init(CONFIG_NODE_CONFIG.getProcedureCoreWorkerThreadsSize()); executor.startWorkers(); executor.startCompletedCleaner( - configNodeConf.getProcedureCompletedCleanInterval(), - configNodeConf.getProcedureCompletedEvictTTL()); + CONFIG_NODE_CONFIG.getProcedureCompletedCleanInterval(), + CONFIG_NODE_CONFIG.getProcedureCompletedEvictTTL()); store.start(); } } else { @@ -92,6 +99,9 @@ public TSStatus deleteStorageGroups(ArrayList deleteSgSchem } List procedureStatus = new ArrayList<>(); boolean isSucceed = getProcedureStatus(this.executor, procIdList, procedureStatus); + // clear the previously deleted regions + final PartitionManager partitionManager = getConfigManager().getPartitionManager(); + partitionManager.getRegionCleaner().submit(partitionManager::clearDeletedRegions); if (isSucceed) { return StatusUtils.OK; } else { @@ -99,6 +109,17 @@ public TSStatus deleteStorageGroups(ArrayList deleteSgSchem } } + /** + * generate a procedure, and execute by one by one + * + * @param req new config node + */ + public void addConfigNode(TConfigNodeRegisterReq req) { + AddConfigNodeProcedure addConfigNodeProcedure = + new AddConfigNodeProcedure(req.getConfigNodeLocation()); + this.executor.submitProcedure(addConfigNodeProcedure); + } + private static boolean getProcedureStatus( ProcedureExecutor executor, List procIds, List statusList) { boolean isSucceed = true; @@ -145,8 +166,8 @@ public static void sleepWithoutInterrupt(final long timeToSleep) { GET-SET Region */ // ====================================================== - public Manager getConfigNodeManager() { - return configNodeManager; + public IManager getConfigManager() { + return configManager; } public ProcedureExecutor getExecutor() { diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java new file mode 100644 index 000000000000..8e55db7208c2 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/UDFManager.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.manager; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.client.AsyncDataNodeClientPool; +import org.apache.iotdb.confignode.client.handlers.FunctionManagementHandler; +import org.apache.iotdb.confignode.consensus.request.write.CreateFunctionReq; +import org.apache.iotdb.confignode.consensus.request.write.DropFunctionReq; +import org.apache.iotdb.confignode.persistence.UDFInfo; +import org.apache.iotdb.mpp.rpc.thrift.TCreateFunctionRequest; +import org.apache.iotdb.mpp.rpc.thrift.TDropFunctionRequest; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; + +public class UDFManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(UDFManager.class); + + private final ConfigManager configManager; + private final UDFInfo udfInfo; + + public UDFManager(ConfigManager configManager, UDFInfo udfInfo) { + this.configManager = configManager; + this.udfInfo = udfInfo; + } + + public TSStatus createFunction(String functionName, String className, List uris) { + try { + udfInfo.validateBeforeRegistration(functionName, className, uris); + + final TSStatus configNodeStatus = + configManager + .getConsensusManager() + .write(new CreateFunctionReq(functionName, className, uris)) + .getStatus(); + if (configNodeStatus.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return configNodeStatus; + } + + return RpcUtils.squashResponseStatusList( + createFunctionOnDataNodes(functionName, className, uris)); + } catch (Exception e) { + final String errorMessage = + String.format( + "Failed to register UDF %s(class name: %s, uris: %s), because of exception: %s", + functionName, className, uris, e); + LOGGER.warn(errorMessage, e); + return new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(errorMessage); + } + } + + private List createFunctionOnDataNodes( + String functionName, String className, List uris) { + final List onlineDataNodes = + configManager.getNodeManager().getOnlineDataNodes(-1); + final List dataNodeResponseStatus = + Collections.synchronizedList(new ArrayList<>(onlineDataNodes.size())); + final CountDownLatch countDownLatch = new CountDownLatch(onlineDataNodes.size()); + final TCreateFunctionRequest request = + new TCreateFunctionRequest(functionName, className, uris); + + for (TDataNodeInfo dataNodeInfo : onlineDataNodes) { + final TEndPoint endPoint = dataNodeInfo.getLocation().getInternalEndPoint(); + AsyncDataNodeClientPool.getInstance() + .createFunction( + endPoint, + request, + new FunctionManagementHandler( + countDownLatch, dataNodeResponseStatus, endPoint.getIp(), endPoint.getPort())); + } + + try { + countDownLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOGGER.error("UDFManager was interrupted during creating functions on data nodes", e); + } + + return dataNodeResponseStatus; + } + + public TSStatus dropFunction(String functionName) { + try { + final List nodeResponseList = dropFunctionOnDataNodes(functionName); + final TSStatus configNodeStatus = + configManager.getConsensusManager().write(new DropFunctionReq(functionName)).getStatus(); + nodeResponseList.add(configNodeStatus); + return RpcUtils.squashResponseStatusList(nodeResponseList); + } catch (Exception e) { + final String errorMessage = + String.format("Failed to deregister UDF %s, because of exception: %s", functionName, e); + LOGGER.warn(errorMessage, e); + return new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(errorMessage); + } + } + + private List dropFunctionOnDataNodes(String functionName) { + final List onlineDataNodes = + configManager.getNodeManager().getOnlineDataNodes(-1); + final List dataNodeResponseStatus = + Collections.synchronizedList(new ArrayList<>(onlineDataNodes.size())); + final CountDownLatch countDownLatch = new CountDownLatch(onlineDataNodes.size()); + final TDropFunctionRequest request = new TDropFunctionRequest(functionName); + + for (TDataNodeInfo dataNodeInfo : onlineDataNodes) { + final TEndPoint endPoint = dataNodeInfo.getLocation().getInternalEndPoint(); + AsyncDataNodeClientPool.getInstance() + .dropFunction( + endPoint, + request, + new FunctionManagementHandler( + countDownLatch, dataNodeResponseStatus, endPoint.getIp(), endPoint.getPort())); + } + + try { + countDownLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + LOGGER.error("UDFManager was interrupted during dropping functions on data nodes", e); + } + + return dataNodeResponseStatus; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/RegionBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/RegionBalancer.java deleted file mode 100644 index bf7e24527bb5..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/RegionBalancer.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.manager.balancer; - -public class RegionBalancer {} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/SeriesPartitionSlotBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/SeriesPartitionSlotBalancer.java deleted file mode 100644 index 441f732264e7..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/balancer/SeriesPartitionSlotBalancer.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.manager.balancer; - -public class SeriesPartitionSlotBalancer {} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java new file mode 100644 index 000000000000..3d76fea1030a --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/LoadManager.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; +import org.apache.iotdb.commons.concurrent.threadpool.ScheduledExecutorUtil; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.confignode.client.AsyncDataNodeClientPool; +import org.apache.iotdb.confignode.client.handlers.HeartbeatHandler; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; +import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; +import org.apache.iotdb.confignode.manager.ClusterSchemaManager; +import org.apache.iotdb.confignode.manager.ConsensusManager; +import org.apache.iotdb.confignode.manager.IManager; +import org.apache.iotdb.confignode.manager.NodeManager; +import org.apache.iotdb.confignode.manager.PartitionManager; +import org.apache.iotdb.confignode.manager.load.balancer.PartitionBalancer; +import org.apache.iotdb.confignode.manager.load.balancer.RegionBalancer; +import org.apache.iotdb.confignode.manager.load.balancer.RouteBalancer; +import org.apache.iotdb.confignode.manager.load.heartbeat.HeartbeatCache; +import org.apache.iotdb.confignode.manager.load.heartbeat.IHeartbeatStatistic; +import org.apache.iotdb.mpp.rpc.thrift.THeartbeatReq; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * The LoadManager at ConfigNodeGroup-Leader is active. It proactively implements the cluster + * dynamic load balancing policy and passively accepts the PartitionTable expansion request. + */ +public class LoadManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoadManager.class); + + private final IManager configManager; + + private final long heartbeatInterval = + ConfigNodeDescriptor.getInstance().getConf().getHeartbeatInterval(); + // Map + private final Map heartbeatCacheMap; + + // Balancers + private final RegionBalancer regionBalancer; + private final PartitionBalancer partitionBalancer; + private final RouteBalancer routeBalancer; + + /** heartbeat executor service */ + private final ScheduledExecutorService heartBeatExecutor = + IoTDBThreadPoolFactory.newSingleThreadScheduledExecutor(LoadManager.class.getSimpleName()); + + /** monitor for heartbeat state change */ + private final Object heartbeatMonitor = new Object(); + + private Future currentHeartbeatFuture; + private int balanceCount = 0; + + public LoadManager(IManager configManager) { + this.configManager = configManager; + this.heartbeatCacheMap = new ConcurrentHashMap<>(); + + this.regionBalancer = new RegionBalancer(configManager); + this.partitionBalancer = new PartitionBalancer(configManager); + this.routeBalancer = new RouteBalancer(configManager); + } + + /** + * Allocate and create Regions for each StorageGroup. + * + * @param allotmentMap Map + * @param consensusGroupType TConsensusGroupType of Region to be allocated + */ + public void doRegionCreation( + Map allotmentMap, TConsensusGroupType consensusGroupType) + throws NotEnoughDataNodeException, StorageGroupNotExistsException { + CreateRegionsReq createRegionGroupsReq = + regionBalancer.genRegionsAllocationPlan(allotmentMap, consensusGroupType); + + // TODO: Use procedure to protect the following process + // Create Regions on DataNodes + Map ttlMap = new HashMap<>(); + for (String storageGroup : createRegionGroupsReq.getRegionGroupMap().keySet()) { + ttlMap.put( + storageGroup, + getClusterSchemaManager().getStorageGroupSchemaByName(storageGroup).getTTL()); + } + AsyncDataNodeClientPool.getInstance().createRegions(createRegionGroupsReq, ttlMap); + // Persist the allocation result + getConsensusManager().write(createRegionGroupsReq); + } + + /** + * Allocate SchemaPartitions + * + * @param unassignedSchemaPartitionSlotsMap SchemaPartitionSlots that should be assigned + * @return Map, the allocating result + */ + public Map allocateSchemaPartition( + Map> unassignedSchemaPartitionSlotsMap) { + return partitionBalancer.allocateSchemaPartition(unassignedSchemaPartitionSlotsMap); + } + + /** + * Allocate DataPartitions + * + * @param unassignedDataPartitionSlotsMap DataPartitionSlots that should be assigned + * @return Map, the allocating result + */ + public Map allocateDataPartition( + Map>> + unassignedDataPartitionSlotsMap) { + return partitionBalancer.allocateDataPartition(unassignedDataPartitionSlotsMap); + } + + /** + * Generate an optimal real-time read/write requests routing policy. + * + * @return Map, The routing policy of read/write requests + * for each Region is based on the order in the TRegionReplicaSet. The replica with higher + * sorting result have higher priority. + */ + public Map genRealTimeRoutingPolicy() { + return routeBalancer.genRealTimeRoutingPolicy(getPartitionManager().getAllReplicaSets()); + } + + /** + * Get the loadScore of each DataNode + * + * @return Map + */ + public Map getAllLoadScores() { + Map result = new ConcurrentHashMap<>(); + + heartbeatCacheMap.forEach( + (dataNodeId, heartbeatCache) -> result.put(dataNodeId, heartbeatCache.getLoadScore())); + + return result; + } + + /** Start the heartbeat service */ + public void start() { + LOGGER.debug("Start Heartbeat Service of LoadManager"); + synchronized (heartbeatMonitor) { + if (currentHeartbeatFuture == null) { + currentHeartbeatFuture = + ScheduledExecutorUtil.safelyScheduleWithFixedDelay( + heartBeatExecutor, + this::heartbeatLoopBody, + 0, + heartbeatInterval, + TimeUnit.MILLISECONDS); + } + } + } + + /** Stop the heartbeat service */ + public void stop() { + LOGGER.debug("Stop Heartbeat Service of LoadManager"); + synchronized (heartbeatMonitor) { + if (currentHeartbeatFuture != null) { + currentHeartbeatFuture.cancel(false); + currentHeartbeatFuture = null; + } + } + } + + /** loop body of the heartbeat thread */ + private void heartbeatLoopBody() { + if (getConsensusManager().isLeader()) { + // Send heartbeat requests to all the online DataNodes + pingOnlineDataNodes(getNodeManager().getOnlineDataNodes(-1)); + // TODO: Send heartbeat requests to all the online ConfigNodes + + // Do load balancing + doLoadBalancing(balanceCount); + balanceCount += 1; + } + } + + private THeartbeatReq genHeartbeatReq() { + return new THeartbeatReq(System.currentTimeMillis()); + } + + private void doLoadBalancing(int balanceCount) { + if (balanceCount % 5 == 0) { + // We update nodes' load statistic in every 5s + updateNodeLoadStatistic(); + } + } + + private void updateNodeLoadStatistic() { + heartbeatCacheMap.values().forEach(IHeartbeatStatistic::updateLoadStatistic); + } + + /** + * Send heartbeat requests to all the online DataNodes + * + * @param onlineDataNodes DataNodes that currently online + */ + private void pingOnlineDataNodes(List onlineDataNodes) { + // Send heartbeat requests + for (TDataNodeInfo dataNodeInfo : onlineDataNodes) { + HeartbeatHandler handler = + new HeartbeatHandler( + dataNodeInfo.getLocation(), + heartbeatCacheMap.computeIfAbsent( + dataNodeInfo.getLocation().getDataNodeId(), empty -> new HeartbeatCache())); + AsyncDataNodeClientPool.getInstance() + .getHeartBeat( + dataNodeInfo.getLocation().getInternalEndPoint(), genHeartbeatReq(), handler); + } + } + + private ConsensusManager getConsensusManager() { + return configManager.getConsensusManager(); + } + + private NodeManager getNodeManager() { + return configManager.getNodeManager(); + } + + private ClusterSchemaManager getClusterSchemaManager() { + return configManager.getClusterSchemaManager(); + } + + private PartitionManager getPartitionManager() { + return configManager.getPartitionManager(); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java new file mode 100644 index 000000000000..2c71f84a2980 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/PartitionBalancer.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer; + +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.confignode.manager.IManager; +import org.apache.iotdb.confignode.manager.load.balancer.partition.GreedyPartitionAllocator; +import org.apache.iotdb.confignode.manager.load.balancer.partition.IPartitionAllocator; + +import java.util.List; +import java.util.Map; + +/** + * The SeriesPartitionSlotBalancer provides interfaces to generate optimal Partition allocation and + * migration plans + */ +public class PartitionBalancer { + + private final IManager configManager; + + public PartitionBalancer(IManager configManager) { + this.configManager = configManager; + } + + /** + * Allocate SchemaPartitions + * + * @param unassignedSchemaPartitionSlotsMap SchemaPartitionSlots that should be assigned + * @return Map, the allocating result + */ + public Map allocateSchemaPartition( + Map> unassignedSchemaPartitionSlotsMap) { + return genPartitionAllocator().allocateSchemaPartition(unassignedSchemaPartitionSlotsMap); + } + + /** + * Allocate DataPartitions + * + * @param unassignedDataPartitionSlotsMap DataPartitionSlots that should be assigned + * @return Map, the allocating result + */ + public Map allocateDataPartition( + Map>> + unassignedDataPartitionSlotsMap) { + return genPartitionAllocator().allocateDataPartition(unassignedDataPartitionSlotsMap); + } + + private IPartitionAllocator genPartitionAllocator() { + // TODO: The type of PartitionAllocator should be configurable + return new GreedyPartitionAllocator(configManager); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java new file mode 100644 index 000000000000..70c264bfa528 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RegionBalancer.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; +import org.apache.iotdb.confignode.exception.NotEnoughDataNodeException; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; +import org.apache.iotdb.confignode.manager.ClusterSchemaManager; +import org.apache.iotdb.confignode.manager.IManager; +import org.apache.iotdb.confignode.manager.NodeManager; +import org.apache.iotdb.confignode.manager.PartitionManager; +import org.apache.iotdb.confignode.manager.load.balancer.region.CopySetRegionAllocator; +import org.apache.iotdb.confignode.manager.load.balancer.region.IRegionAllocator; +import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; + +import java.util.List; +import java.util.Map; + +/** + * The RegionBalancer provides interfaces to generate optimal Region allocation and migration plans + */ +public class RegionBalancer { + + private final IManager configManager; + + public RegionBalancer(IManager configManager) { + this.configManager = configManager; + } + + /** + * Generate a Regions allocation plan(CreateRegionsReq) + * + * @param allotmentMap Map + * @param consensusGroupType TConsensusGroupType of the new Regions + * @return CreateRegionsReq + * @throws NotEnoughDataNodeException When the number of DataNodes is not enough for allocation + * @throws StorageGroupNotExistsException When some StorageGroups don't exist + */ + public CreateRegionsReq genRegionsAllocationPlan( + Map allotmentMap, TConsensusGroupType consensusGroupType) + throws NotEnoughDataNodeException, StorageGroupNotExistsException { + CreateRegionsReq createRegionGroupsReq = new CreateRegionsReq(); + IRegionAllocator regionAllocator = genRegionAllocator(); + + List onlineDataNodes = getNodeManager().getOnlineDataNodes(-1); + List allocatedRegions = getPartitionManager().getAllReplicaSets(); + + for (Map.Entry entry : allotmentMap.entrySet()) { + String storageGroup = entry.getKey(); + int allotment = entry.getValue(); + + // Get schema + TStorageGroupSchema storageGroupSchema = + getClusterSchemaManager().getStorageGroupSchemaByName(storageGroup); + int replicationFactor = + consensusGroupType == TConsensusGroupType.SchemaRegion + ? storageGroupSchema.getSchemaReplicationFactor() + : storageGroupSchema.getDataReplicationFactor(); + + // Check validity + if (onlineDataNodes.size() < replicationFactor) { + throw new NotEnoughDataNodeException(); + } + + for (int i = 0; i < allotment; i++) { + // Generate allocation plan + TRegionReplicaSet newRegion = + regionAllocator.allocateRegion( + onlineDataNodes, + allocatedRegions, + replicationFactor, + new TConsensusGroupId( + consensusGroupType, getPartitionManager().generateNextRegionGroupId())); + createRegionGroupsReq.addRegionGroup(storageGroup, newRegion); + + allocatedRegions.add(newRegion); + } + } + + return createRegionGroupsReq; + } + + private IRegionAllocator genRegionAllocator() { + // TODO: The RegionAllocator should be configurable + return new CopySetRegionAllocator(); + } + + private NodeManager getNodeManager() { + return configManager.getNodeManager(); + } + + private ClusterSchemaManager getClusterSchemaManager() { + return configManager.getClusterSchemaManager(); + } + + private PartitionManager getPartitionManager() { + return configManager.getPartitionManager(); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java new file mode 100644 index 000000000000..aebb25a43d8a --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.confignode.manager.IManager; +import org.apache.iotdb.confignode.manager.load.LoadManager; +import org.apache.iotdb.confignode.manager.load.balancer.router.IRouter; +import org.apache.iotdb.confignode.manager.load.balancer.router.LoadScoreGreedyRouter; + +import java.util.List; +import java.util.Map; + +/** + * The RouteBalancer plays the role of load information collector since different routing policy + * need different load information. + */ +public class RouteBalancer { + + private final IManager configManager; + + public RouteBalancer(IManager configManager) { + this.configManager = configManager; + } + + public Map genRealTimeRoutingPolicy( + List regionReplicaSets) { + return genRouter().genRealTimeRoutingPolicy(regionReplicaSets); + } + + private IRouter genRouter() { + // TODO: The Router should be configurable + return new LoadScoreGreedyRouter(getLoadManager().getAllLoadScores()); + } + + private LoadManager getLoadManager() { + return configManager.getLoadManager(); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/GreedyPartitionAllocator.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/GreedyPartitionAllocator.java new file mode 100644 index 000000000000..f92c02694aef --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/GreedyPartitionAllocator.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer.partition; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.commons.partition.SeriesPartitionTable; +import org.apache.iotdb.confignode.manager.IManager; +import org.apache.iotdb.confignode.manager.PartitionManager; +import org.apache.iotdb.tsfile.utils.Pair; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** Allocating new Partitions by greedy algorithm */ +public class GreedyPartitionAllocator implements IPartitionAllocator { + + private final IManager configManager; + + public GreedyPartitionAllocator(IManager configManager) { + this.configManager = configManager; + } + + @Override + public Map allocateSchemaPartition( + Map> unassignedSchemaPartitionSlotsMap) { + Map result = new ConcurrentHashMap<>(); + + unassignedSchemaPartitionSlotsMap.forEach( + (storageGroup, unassignedPartitionSlots) -> { + // List> + List> regionSlotsCounter = + getPartitionManager() + .getSortedRegionSlotsCounter(storageGroup, TConsensusGroupType.SchemaRegion); + + // Enumerate SeriesPartitionSlot + Map schemaPartitionMap = + new ConcurrentHashMap<>(); + for (TSeriesPartitionSlot seriesPartitionSlot : unassignedPartitionSlots) { + // Greedy allocation + schemaPartitionMap.put(seriesPartitionSlot, regionSlotsCounter.get(0).getRight()); + // Bubble sort + bubbleSort(regionSlotsCounter); + } + result.put(storageGroup, new SchemaPartitionTable(schemaPartitionMap)); + }); + + return result; + } + + @Override + public Map allocateDataPartition( + Map>> + unassignedDataPartitionSlotsMap) { + Map result = new ConcurrentHashMap<>(); + + unassignedDataPartitionSlotsMap.forEach( + (storageGroup, unassignedPartitionSlotsMap) -> { + // List> + List> regionSlotsCounter = + getPartitionManager() + .getSortedRegionSlotsCounter(storageGroup, TConsensusGroupType.DataRegion); + + // Enumerate SeriesPartitionSlot + Map dataPartitionMap = + new ConcurrentHashMap<>(); + for (Map.Entry> seriesPartitionEntry : + unassignedPartitionSlotsMap.entrySet()) { + // Enumerate TimePartitionSlot + Map> seriesPartitionMap = + new ConcurrentHashMap<>(); + for (TTimePartitionSlot timePartitionSlot : seriesPartitionEntry.getValue()) { + // Greedy allocation + seriesPartitionMap.put( + timePartitionSlot, + Collections.singletonList(regionSlotsCounter.get(0).getRight())); + // Bubble sort + bubbleSort(regionSlotsCounter); + } + dataPartitionMap.put( + seriesPartitionEntry.getKey(), new SeriesPartitionTable(seriesPartitionMap)); + } + result.put(storageGroup, new DataPartitionTable(dataPartitionMap)); + }); + + return result; + } + + private void bubbleSort(List> regionSlotsCounter) { + int index = 0; + regionSlotsCounter.get(0).setLeft(regionSlotsCounter.get(0).getLeft() + 1); + while (index < regionSlotsCounter.size() - 1 + && regionSlotsCounter.get(index).getLeft() > regionSlotsCounter.get(index + 1).getLeft()) { + Collections.swap(regionSlotsCounter, index, index + 1); + index += 1; + } + } + + private PartitionManager getPartitionManager() { + return configManager.getPartitionManager(); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/IPartitionAllocator.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/IPartitionAllocator.java new file mode 100644 index 000000000000..93d0a40c74bd --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/partition/IPartitionAllocator.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer.partition; + +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; + +import java.util.List; +import java.util.Map; + +/** + * The IPartitionAllocator is a functional interface, which means a new functional class who + * implements the IPartitionAllocator must be created for each Partition allocation. + */ +public interface IPartitionAllocator { + + /** + * Allocate SchemaPartitions + * + * @param unassignedSchemaPartitionSlotsMap SchemaPartitionSlots that should be assigned + * @return Map, the allocating result + */ + Map allocateSchemaPartition( + Map> unassignedSchemaPartitionSlotsMap); + + /** + * Allocate DataPartitions + * + * @param unassignedDataPartitionSlotsMap DataPartitionSlots that should be assigned + * @return Map, the allocating result + */ + Map allocateDataPartition( + Map>> + unassignedDataPartitionSlotsMap); +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/CopySetRegionAllocator.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/CopySetRegionAllocator.java similarity index 82% rename from confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/CopySetRegionAllocator.java rename to confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/CopySetRegionAllocator.java index 00da9dc05f6d..384c919e83ce 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/CopySetRegionAllocator.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/CopySetRegionAllocator.java @@ -16,9 +16,10 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.iotdb.confignode.manager.allocator; +package org.apache.iotdb.confignode.manager.load.balancer.region; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; @@ -41,15 +42,15 @@ public class CopySetRegionAllocator implements IRegionAllocator { private int maxId = 0; private int intersectionSize = 0; - private List weightList; + private final List weightList; public CopySetRegionAllocator() { - // Empty constructor + this.weightList = new ArrayList<>(); } @Override public TRegionReplicaSet allocateRegion( - List onlineDataNodes, + List onlineDataNodes, List allocatedRegions, int replicationFactor, TConsensusGroupId consensusGroupId) { @@ -73,18 +74,20 @@ public TRegionReplicaSet allocateRegion( intersectionSize += 1; } - clear(); result.setRegionId(consensusGroupId); return result; } private void buildWeightList( - List onlineDataNodes, List allocatedRegions) { + List onlineDataNodes, List allocatedRegions) { + + // TODO: The remaining disk capacity of DataNode can also be calculated into the weightList + int maximumRegionNum = 0; Map countMap = new HashMap<>(); - for (TDataNodeLocation dataNodeLocation : onlineDataNodes) { - maxId = Math.max(maxId, dataNodeLocation.getDataNodeId()); - countMap.put(dataNodeLocation, 0); + for (TDataNodeInfo dataNodeInfo : onlineDataNodes) { + maxId = Math.max(maxId, dataNodeInfo.getLocation().getDataNodeId()); + countMap.put(dataNodeInfo.getLocation(), 0); } for (TRegionReplicaSet regionReplicaSet : allocatedRegions) { for (TDataNodeLocation dataNodeLocation : regionReplicaSet.getDataNodeLocations()) { @@ -93,7 +96,6 @@ private void buildWeightList( } } - weightList = new ArrayList<>(); for (Map.Entry countEntry : countMap.entrySet()) { int weight = maximumRegionNum - countEntry.getValue() + 1; // Repeatedly add DataNode copies equal to the number of their weights @@ -103,6 +105,7 @@ private void buildWeightList( } } + /** @return A new CopySet based on weighted random */ private TRegionReplicaSet genWeightedRandomRegion(int replicationFactor) { Set checkSet = new HashSet<>(); TRegionReplicaSet randomRegion = new TRegionReplicaSet(); @@ -124,6 +127,14 @@ private TRegionReplicaSet genWeightedRandomRegion(int replicationFactor) { return randomRegion; } + /** + * Do intersection check. + * + * @param allocatedRegions Allocated CopySets. + * @param newRegion A new CopySet. + * @return True if the intersection size between every allocatedRegions and the newRegion are not + * exceed intersectionSize. + */ private boolean intersectionCheck( List allocatedRegions, TRegionReplicaSet newRegion) { BitSet newBit = new BitSet(maxId + 1); @@ -145,11 +156,4 @@ private boolean intersectionCheck( } return true; } - - private void clear() { - maxId = 0; - intersectionSize = 0; - weightList.clear(); - weightList = null; - } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/IRegionAllocator.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionAllocator.java similarity index 82% rename from confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/IRegionAllocator.java rename to confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionAllocator.java index 4874f5a18c8a..ead09395cb82 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/allocator/IRegionAllocator.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/region/IRegionAllocator.java @@ -16,14 +16,18 @@ * specific language governing permissions and limitations * under the License. */ -package org.apache.iotdb.confignode.manager.allocator; +package org.apache.iotdb.confignode.manager.load.balancer.region; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import java.util.List; +/** + * The IRegionAllocator is a functional interface, which means a new functional class who implements + * the IRegionAllocator must be created for each Region allocation. + */ public interface IRegionAllocator { /** @@ -37,7 +41,7 @@ public interface IRegionAllocator { * @return The optimal TRegionReplicaSet derived by the specific algorithm */ TRegionReplicaSet allocateRegion( - List onlineDataNodes, + List onlineDataNodes, List allocatedRegions, int replicationFactor, TConsensusGroupId consensusGroupId); diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/IRouter.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/IRouter.java new file mode 100644 index 000000000000..938f132a3e26 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/IRouter.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer.router; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; + +import java.util.List; +import java.util.Map; + +/** + * The IRouter is a functional interface, which means a new functional class who implements the + * IRouter must be created for generating the latest real-time routing policy. + */ +public interface IRouter { + + /** + * Generate an optimal real-time read/write requests routing policy. + * + * @param replicaSets All RegionReplicasEts currently owned by the cluster + * @return Map, The routing policy of read/write requests + * for each Region is based on the order in the TRegionReplicaSet. The replica with higher + * sorting result have higher priority. + */ + Map genRealTimeRoutingPolicy( + List replicaSets); +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/LoadScoreGreedyRouter.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/LoadScoreGreedyRouter.java new file mode 100644 index 000000000000..d3b64e3999fe --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/LoadScoreGreedyRouter.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.balancer.router; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.tsfile.utils.Pair; + +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; + +/** The LoadScoreGreedyRouter always pick the Replica with the lowest loadScore */ +public class LoadScoreGreedyRouter implements IRouter { + + // Map + private final Map loadScoreMap; + + /** The constructor is used to pass in the load information needed by the algorithm */ + public LoadScoreGreedyRouter(Map loadScoreMap) { + this.loadScoreMap = loadScoreMap; + } + + @Override + public Map genRealTimeRoutingPolicy( + List replicaSets) { + Map result = new ConcurrentHashMap<>(); + + replicaSets.forEach( + replicaSet -> { + TRegionReplicaSet sortedReplicaSet = new TRegionReplicaSet(); + sortedReplicaSet.setRegionId(replicaSet.getRegionId()); + + // List> for sorting + List> sortList = new Vector<>(); + replicaSet + .getDataNodeLocations() + .forEach( + dataNodeLocation -> { + // The absenteeism of loadScoreMap means ConfigNode-leader doesn't receive any + // heartbeat from that DataNode. + // In this case we put a maximum loadScore into the sortList. + sortList.add( + new Pair<>( + (double) + loadScoreMap.computeIfAbsent( + dataNodeLocation.getDataNodeId(), empty -> Float.MAX_VALUE), + dataNodeLocation)); + }); + + sortList.sort(Comparator.comparingDouble(Pair::getLeft)); + for (Pair entry : sortList) { + sortedReplicaSet.addToDataNodeLocations(entry.getRight()); + } + + result.put(sortedReplicaSet.getRegionId(), sortedReplicaSet); + }); + + return result; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatCache.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatCache.java new file mode 100644 index 000000000000..734d1868337b --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatCache.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.heartbeat; + +import org.apache.iotdb.commons.cluster.NodeStatus; + +import java.util.LinkedList; + +/** HeartbeatCache caches and maintains all the heartbeat data */ +public class HeartbeatCache implements IHeartbeatStatistic { + + // Cache heartbeat samples + private static final int maximumWindowSize = 100; + private final LinkedList slidingWindow; + + // For guiding queries, the higher the score the higher the load + private volatile float loadScore; + // For showing cluster + private volatile NodeStatus status; + + public HeartbeatCache() { + this.slidingWindow = new LinkedList<>(); + + this.loadScore = 0; + this.status = NodeStatus.Running; + } + + @Override + public void cacheHeartBeat(HeartbeatPackage newHeartbeat) { + synchronized (slidingWindow) { + // Only sequential heartbeats are accepted. + // And un-sequential heartbeats will be discarded. + if (slidingWindow.size() == 0 + || slidingWindow.getLast().getSendTimestamp() < newHeartbeat.getSendTimestamp()) { + slidingWindow.add(newHeartbeat); + } + + while (slidingWindow.size() > maximumWindowSize) { + slidingWindow.removeFirst(); + } + } + } + + @Override + public void updateLoadStatistic() { + long lastSendTime = 0; + synchronized (slidingWindow) { + if (slidingWindow.size() > 0) { + lastSendTime = slidingWindow.getLast().getSendTimestamp(); + } + } + + // TODO: Optimize + loadScore = -lastSendTime; + if (System.currentTimeMillis() - lastSendTime > 20_000) { + status = NodeStatus.Unknown; + } else { + status = NodeStatus.Running; + } + } + + @Override + public float getLoadScore() { + // Return a copy of loadScore + switch (status) { + case Running: + return loadScore; + case Unknown: + default: + // The Unknown Node will get the highest loadScore + return Float.MAX_VALUE; + } + } + + @Override + public NodeStatus getNodeStatus() { + // Return a copy of status + switch (status) { + case Running: + return NodeStatus.Running; + case Unknown: + default: + return NodeStatus.Unknown; + } + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatPackage.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatPackage.java new file mode 100644 index 000000000000..66327611aacb --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/HeartbeatPackage.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.heartbeat; + +public class HeartbeatPackage { + + // Unit: ms + private final long sendTimestamp; + private final long receiveTimestamp; + + public HeartbeatPackage(long sendTimestamp, long receiveTimestamp) { + this.sendTimestamp = sendTimestamp; + this.receiveTimestamp = receiveTimestamp; + } + + public long getSendTimestamp() { + return sendTimestamp; + } + + public long getReceiveTimestamp() { + return receiveTimestamp; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/IHeartbeatStatistic.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/IHeartbeatStatistic.java new file mode 100644 index 000000000000..413f54792a01 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/heartbeat/IHeartbeatStatistic.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.manager.load.heartbeat; + +import org.apache.iotdb.commons.cluster.NodeStatus; + +/** All the statistic interfaces that provided by HeartbeatCache */ +public interface IHeartbeatStatistic { + + /** + * Cache the newest HeartbeatPackage + * + * @param newHeartbeat The newest HeartbeatData + */ + void cacheHeartBeat(HeartbeatPackage newHeartbeat); + + /** Invoking periodically to update node load statistics */ + void updateLoadStatistic(); + + /** @return The latest load score of a node, the higher the score the higher the load */ + float getLoadScore(); + + /** @return The latest status of a node for showing cluster */ + NodeStatus getNodeStatus(); +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/partition/DataPartitionRule.java b/confignode/src/main/java/org/apache/iotdb/confignode/partition/DataPartitionRule.java deleted file mode 100644 index 1593c6e919c1..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/partition/DataPartitionRule.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.iotdb.confignode.partition; - -import org.apache.iotdb.tsfile.utils.Pair; - -import java.util.ArrayList; -import java.util.List; - -/** - * DataPartitionRule is used to hold real-time write-load allocation rules i.e. rules = [(0, 0.3), - * (1, 0.2), (2. 0.5)] means allocate 30% of the write-load to DataRegion-0 and 20% to DataRegion-1 - * and 50% to DataRegion-2 - */ -public class DataPartitionRule { - // List> - private final List> rules; - - public DataPartitionRule() { - this.rules = new ArrayList<>(); - } - - public DataPartitionRule(List> rules) { - this.rules = rules; - } - - public void addDataPartitionRule(int dataRegionID, double ratio) { - this.rules.add(new Pair<>(dataRegionID, ratio)); - } - - public List> getDataPartitionRule() { - return this.rules; - } -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java index a27c81fd4377..6fa365235d9d 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/AuthorInfo.java @@ -26,74 +26,104 @@ import org.apache.iotdb.commons.auth.entity.PrivilegeType; import org.apache.iotdb.commons.auth.entity.Role; import org.apache.iotdb.commons.auth.entity.User; +import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.commons.utils.AuthUtils; +import org.apache.iotdb.commons.utils.FileUtils; +import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; import org.apache.iotdb.confignode.consensus.response.PermissionInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TRoleResp; +import org.apache.iotdb.confignode.rpc.thrift.TUserResp; import org.apache.iotdb.rpc.RpcUtils; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -public class AuthorInfo { +public class AuthorInfo implements SnapshotProcessor { private static final Logger logger = LoggerFactory.getLogger(AuthorInfo.class); + private static final CommonConfig commonConfig = CommonDescriptor.getInstance().getConfig(); private IAuthorizer authorizer; - { + public AuthorInfo() { try { authorizer = BasicAuthorizer.getInstance(); } catch (AuthException e) { - logger.error("get user or role permissionInfo failed", e); + logger.error("get user or role permissionInfo failed because ", e); } } - public TSStatus login(String username, String password) { + public TPermissionInfoResp login(String username, String password) { boolean status; String loginMessage = null; TSStatus tsStatus = new TSStatus(); + TPermissionInfoResp result = new TPermissionInfoResp(); try { status = authorizer.login(username, password); + if (status) { + // Bring this user's permission information back to the datanode for caching + result = getUserPermissionInfo(username); + result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS, "Login successfully")); + } else { + result = AuthUtils.generateEmptyPermissionInfoResp(); + } } catch (AuthException e) { - logger.info("meet error while logging in.", e); + logger.error("meet error while logging in.", e); status = false; loginMessage = e.getMessage(); } - if (status) { - tsStatus.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - tsStatus.setMessage("Login successfully"); - } else { + if (!status) { tsStatus.setMessage(loginMessage != null ? loginMessage : "Authentication failed."); tsStatus.setCode(TSStatusCode.WRONG_LOGIN_PASSWORD_ERROR.getStatusCode()); + result.setStatus(tsStatus); } - return tsStatus; + return result; } - public TSStatus checkUserPrivileges(String username, List paths, int permission) { + public TPermissionInfoResp checkUserPrivileges( + String username, List paths, int permission) { boolean status = true; + TPermissionInfoResp result = new TPermissionInfoResp(); try { for (String path : paths) { if (!checkOnePath(username, path, permission)) { status = false; + break; } } } catch (AuthException e) { status = false; } if (status) { - return RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS); + try { + // Bring this user's permission information back to the datanode for caching + result = getUserPermissionInfo(username); + result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + } catch (AuthException e) { + result.setStatus( + RpcUtils.getStatus(TSStatusCode.EXECUTE_PERMISSION_EXCEPTION_ERROR, e.getMessage())); + } } else { - return RpcUtils.getStatus(TSStatusCode.NO_PERMISSION_ERROR); + result = AuthUtils.generateEmptyPermissionInfoResp(); + result.setStatus(RpcUtils.getStatus(TSStatusCode.NO_PERMISSION_ERROR)); } + return result; } private boolean checkOnePath(String username, String path, int permission) throws AuthException { @@ -297,10 +327,6 @@ public PermissionInfoResp executeListUserPrivileges(AuthorReq plan) throws AuthE for (PrivilegeType privilegeType : PrivilegeType.values()) { userPrivilegesList.add(privilegeType.toString()); } - permissionInfo.put(IoTDBConstant.COLUMN_PRIVILEGE, userPrivilegesList); - result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); - result.setPermissionInfo(permissionInfo); - return result; } else { List rolePrivileges = new ArrayList<>(); for (PathPrivilege pathPrivilege : user.getPrivilegeList()) { @@ -324,23 +350,79 @@ public PermissionInfoResp executeListUserPrivileges(AuthorReq plan) throws AuthE } } permissionInfo.put(IoTDBConstant.COLUMN_ROLE, rolePrivileges); - permissionInfo.put(IoTDBConstant.COLUMN_PRIVILEGE, userPrivilegesList); - result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); - result.setPermissionInfo(permissionInfo); - return result; } + permissionInfo.put(IoTDBConstant.COLUMN_PRIVILEGE, userPrivilegesList); + result.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + result.setPermissionInfo(permissionInfo); + return result; } - private static class AuthorInfoHolder { + @Override + public boolean processTakeSnapshot(File snapshotDir) throws TException, IOException { + return authorizer.processTakeSnapshot(snapshotDir); + } - private static final AuthorInfo INSTANCE = new AuthorInfo(); + @Override + public void processLoadSnapshot(File snapshotDir) throws TException, IOException { + authorizer.processLoadSnapshot(snapshotDir); + } - private AuthorInfoHolder() { - // empty constructor + @TestOnly + public void clear() throws AuthException { + File userFolder = new File(commonConfig.getUserFolder()); + if (userFolder.exists()) { + FileUtils.deleteDirectory(userFolder); + } + File roleFolder = new File(commonConfig.getRoleFolder()); + if (roleFolder.exists()) { + FileUtils.deleteDirectory(roleFolder); } + authorizer.reset(); } - public static AuthorInfo getInstance() { - return AuthorInfo.AuthorInfoHolder.INSTANCE; + /** + * Save the user's permission information,Bring back the DataNode for caching + * + * @param username The username of the user that needs to be cached + */ + public TPermissionInfoResp getUserPermissionInfo(String username) throws AuthException { + TPermissionInfoResp result = new TPermissionInfoResp(); + TUserResp tUserResp = new TUserResp(); + TRoleResp tRoleResp = new TRoleResp(); + Map tRoleRespMap = new HashMap(); + List userPrivilegeList = new ArrayList<>(); + List rolePrivilegeList = new ArrayList<>(); + + // User permission information + User user = authorizer.getUser(username); + if (user.getPrivilegeList() != null) { + for (PathPrivilege pathPrivilege : user.getPrivilegeList()) { + userPrivilegeList.add(pathPrivilege.getPath()); + String privilegeIdList = pathPrivilege.getPrivileges().toString(); + userPrivilegeList.add(privilegeIdList.substring(1, privilegeIdList.length() - 1)); + } + tUserResp.setUsername(user.getName()); + tUserResp.setPassword(user.getPassword()); + tUserResp.setPrivilegeList(userPrivilegeList); + tUserResp.setRoleList(user.getRoleList()); + } + + // Permission information for roles owned by users + if (user.getRoleList() != null) { + for (String roleName : user.getRoleList()) { + Role role = authorizer.getRole(roleName); + tRoleResp.setRoleName(roleName); + for (PathPrivilege pathPrivilege : role.getPrivilegeList()) { + rolePrivilegeList.add(pathPrivilege.getPath()); + String privilegeIdList = pathPrivilege.getPrivileges().toString(); + rolePrivilegeList.add(privilegeIdList.substring(1, privilegeIdList.length() - 1)); + } + tRoleResp.setPrivilegeList(rolePrivilegeList); + tRoleRespMap.put(roleName, tRoleResp); + } + } + result.setUserInfo(tUserResp); + result.setRoleInfo(tRoleRespMap); + return result; } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfo.java index 42e8976494b5..bd292f106105 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfo.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfo.java @@ -18,17 +18,15 @@ */ package org.apache.iotdb.confignode.persistence; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.MetadataException; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; +import org.apache.iotdb.confignode.consensus.request.write.AdjustMaxRegionGroupCountReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; @@ -37,24 +35,27 @@ import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.response.CountStorageGroupResp; import org.apache.iotdb.confignode.consensus.response.StorageGroupSchemaResp; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode; import org.apache.iotdb.db.metadata.mtree.MTreeAboveSG; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -68,30 +69,31 @@ public class ClusterSchemaInfo implements SnapshotProcessor { // StorageGroup read write lock private final ReentrantReadWriteLock storageGroupReadWriteLock; - - private MTreeAboveSG mTree; - - // The size of the buffer used for snapshot(temporary value) - private final int bufferSize = 10 * 1024 * 1024; + private final MTreeAboveSG mTree; private final String snapshotFileName = "cluster_schema.bin"; - private ClusterSchemaInfo() { + public ClusterSchemaInfo() throws IOException { storageGroupReadWriteLock = new ReentrantReadWriteLock(); try { mTree = new MTreeAboveSG(); } catch (MetadataException e) { LOGGER.error("Can't construct StorageGroupInfo", e); + throw new IOException(e); } } + // ====================================================== + // Consensus read/write interfaces + // ====================================================== + /** - * Persistence new StorageGroupSchema + * Cache StorageGroupSchema * * @param req SetStorageGroupReq - * @return SUCCESS_STATUS if the StorageGroup is set successfully. PERSISTENCE_FAILURE if fail to - * set StorageGroup in MTreeAboveSG. + * @return SUCCESS_STATUS if the StorageGroup is set successfully. CACHE_FAILURE if fail to set + * StorageGroup in MTreeAboveSG. */ public TSStatus setStorageGroup(SetStorageGroupReq req) { TSStatus result = new TSStatus(); @@ -108,12 +110,10 @@ public TSStatus setStorageGroup(SetStorageGroupReq req) { .setStorageGroupSchema(storageGroupSchema); result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - - LOGGER.info("Successfully set StorageGroup: {}", storageGroupSchema); } catch (MetadataException e) { LOGGER.error("Error StorageGroup name", e); result - .setCode(TSStatusCode.PERSISTENCE_FAILURE.getStatusCode()) + .setCode(TSStatusCode.SET_STORAGE_GROUP_FAILED.getStatusCode()) .setMessage("Error StorageGroup name"); } finally { storageGroupReadWriteLock.writeLock().unlock(); @@ -132,10 +132,10 @@ public TSStatus deleteStorageGroup(DeleteStorageGroupReq req) { storageGroupReadWriteLock.writeLock().lock(); try { // Delete StorageGroup - TStorageGroupSchema storageGroupSchema = req.getStorageGroup(); - PartialPath partialPathName = new PartialPath(storageGroupSchema.getName()); + String storageGroup = req.getName(); + PartialPath partialPathName = new PartialPath(storageGroup); mTree.deleteStorageGroup(partialPathName); - PartitionInfo.getInstance().deleteStorageGroup(req); + result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } catch (MetadataException e) { LOGGER.warn("Storage group not exist", e); @@ -148,39 +148,48 @@ public TSStatus deleteStorageGroup(DeleteStorageGroupReq req) { return result; } - /** - * Persistence new RegionGroupIds on specific StorageGroupSchema - * - * @param req CreateRegionsReq - * @return SUCCESS_STATUS if the new RegionGroupIds is persistence successfully. - * PERSISTENCE_FAILURE if fail to find StorageGroup in MTreeAboveSG. - */ - public TSStatus createRegions(CreateRegionsReq req) { - TSStatus result = new TSStatus(); - storageGroupReadWriteLock.writeLock().lock(); + /** @return The number of matched StorageGroups by the specific StorageGroup pattern */ + public CountStorageGroupResp countMatchedStorageGroups(CountStorageGroupReq req) { + CountStorageGroupResp result = new CountStorageGroupResp(); + storageGroupReadWriteLock.readLock().lock(); + try { + PartialPath patternPath = new PartialPath(req.getStorageGroupPattern()); + result.setCount(mTree.getBelongedStorageGroups(patternPath).size()); + result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + } catch (MetadataException e) { + LOGGER.error("Error StorageGroup name", e); + result.setStatus( + new TSStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()) + .setMessage("Error StorageGroup name")); + } finally { + storageGroupReadWriteLock.readLock().unlock(); + } + return result; + } + /** @return All StorageGroupSchemas that matches to the specific StorageGroup pattern */ + public StorageGroupSchemaResp getMatchedStorageGroupSchemas(GetStorageGroupReq req) { + StorageGroupSchemaResp result = new StorageGroupSchemaResp(); + storageGroupReadWriteLock.readLock().lock(); try { - for (Map.Entry reqEntry : req.getRegionMap().entrySet()) { - PartialPath partialPathName = new PartialPath(reqEntry.getKey()); - TStorageGroupSchema storageGroupSchema = - mTree.getStorageGroupNodeByStorageGroupPath(partialPathName).getStorageGroupSchema(); - switch (reqEntry.getValue().getRegionId().getType()) { - case SchemaRegion: - storageGroupSchema.getSchemaRegionGroupIds().add(reqEntry.getValue().getRegionId()); - break; - case DataRegion: - storageGroupSchema.getDataRegionGroupIds().add(reqEntry.getValue().getRegionId()); - break; - } + Map schemaMap = new HashMap<>(); + PartialPath patternPath = new PartialPath(req.getStorageGroupPattern()); + List matchedPaths = mTree.getBelongedStorageGroups(patternPath); + for (PartialPath path : matchedPaths) { + schemaMap.put( + path.getFullPath(), + mTree.getStorageGroupNodeByStorageGroupPath(path).getStorageGroupSchema()); } - result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + result.setSchemaMap(schemaMap); + result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); } catch (MetadataException e) { LOGGER.error("Error StorageGroup name", e); - result.setCode(TSStatusCode.PERSISTENCE_FAILURE.getStatusCode()); + result.setStatus( + new TSStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()) + .setMessage("Error StorageGroup name")); } finally { - storageGroupReadWriteLock.writeLock().unlock(); + storageGroupReadWriteLock.readLock().unlock(); } - return result; } @@ -197,6 +206,7 @@ public TSStatus setTTL(SetTTLReq req) { result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } else { result.setCode(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()); + result.setMessage("StorageGroup does not exist"); } } catch (MetadataException e) { LOGGER.error("Error StorageGroup name", e); @@ -284,7 +294,43 @@ public TSStatus setTimePartitionInterval(SetTimePartitionIntervalReq req) { return result; } - /** @return List, all storageGroups' name */ + /** + * Adjust the maximum RegionGroup count of each StorageGroup + * + * @param req AdjustMaxRegionGroupCountReq + * @return SUCCESS_STATUS + */ + public TSStatus adjustMaxRegionGroupCount(AdjustMaxRegionGroupCountReq req) { + TSStatus result = new TSStatus(); + storageGroupReadWriteLock.writeLock().lock(); + try { + for (Map.Entry> entry : + req.getMaxRegionGroupCountMap().entrySet()) { + PartialPath path = new PartialPath(entry.getKey()); + TStorageGroupSchema storageGroupSchema = + mTree.getStorageGroupNodeByStorageGroupPath(path).getStorageGroupSchema(); + storageGroupSchema.setMaxSchemaRegionGroupCount(entry.getValue().getLeft()); + storageGroupSchema.setMaxDataRegionGroupCount(entry.getValue().getRight()); + } + result.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } catch (MetadataException e) { + LOGGER.error("Error StorageGroup name", e); + result.setCode(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()); + } finally { + storageGroupReadWriteLock.writeLock().unlock(); + } + return result; + } + + // ====================================================== + // Leader scheduling interfaces + // ====================================================== + + /** + * Only leader use this interface. + * + * @return List, all storageGroups' name + */ public List getStorageGroupNames() { List storageGroups = new ArrayList<>(); storageGroupReadWriteLock.readLock().lock(); @@ -299,53 +345,50 @@ public List getStorageGroupNames() { return storageGroups; } - /** @return The number of matched StorageGroups by the specific StorageGroup pattern */ - public CountStorageGroupResp countMatchedStorageGroups(CountStorageGroupReq req) { - CountStorageGroupResp result = new CountStorageGroupResp(); + /** + * Only leader use this interface. Check if the specific StorageGroup already exists. + * + * @param storageName The specific StorageGroup's name + * @throws MetadataException If the specific StorageGroup already exists + */ + public void checkContainsStorageGroup(String storageName) throws MetadataException { storageGroupReadWriteLock.readLock().lock(); try { - PartialPath patternPath = new PartialPath(req.getStorageGroupPattern()); - result.setCount(mTree.getBelongedStorageGroups(patternPath).size()); - result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); - } catch (MetadataException e) { - LOGGER.error("Error StorageGroup name", e); - result.setStatus( - new TSStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()) - .setMessage("Error StorageGroup name")); + mTree.checkStorageGroupAlreadySet(new PartialPath(storageName)); } finally { storageGroupReadWriteLock.readLock().unlock(); } - return result; } - /** @return All StorageGroupSchemas that matches to the specific StorageGroup pattern */ - public StorageGroupSchemaResp getMatchedStorageGroupSchemas(GetStorageGroupReq req) { - StorageGroupSchemaResp result = new StorageGroupSchemaResp(); + /** + * Only leader use this interface. Get the specific StorageGroupSchema + * + * @param storageGroup StorageGroupName + * @return The specific StorageGroupSchema + * @throws StorageGroupNotExistsException When the specific StorageGroup doesn't exist + */ + public TStorageGroupSchema getMatchedStorageGroupSchemaByName(String storageGroup) + throws StorageGroupNotExistsException { storageGroupReadWriteLock.readLock().lock(); try { - Map schemaMap = new HashMap<>(); - PartialPath patternPath = new PartialPath(req.getStorageGroupPattern()); - List matchedPaths = mTree.getBelongedStorageGroups(patternPath); - for (PartialPath path : matchedPaths) { - schemaMap.put( - path.getFullPath(), - mTree.getStorageGroupNodeByStorageGroupPath(path).getStorageGroupSchema()); - } - result.setSchemaMap(schemaMap); - result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + return mTree + .getStorageGroupNodeByStorageGroupPath(new PartialPath(storageGroup)) + .getStorageGroupSchema(); } catch (MetadataException e) { - LOGGER.error("Error StorageGroup name", e); - result.setStatus( - new TSStatus(TSStatusCode.STORAGE_GROUP_NOT_EXIST.getStatusCode()) - .setMessage("Error StorageGroup name")); + throw new StorageGroupNotExistsException(storageGroup); } finally { storageGroupReadWriteLock.readLock().unlock(); } - return result; } - /** @return All StorageGroupSchemas that matches to the specific StorageGroup patterns */ - public Map getDeleteStorageGroups(List rawPathList) { + /** + * Only leader use this interface. Get the matched StorageGroupSchemas. + * + * @param rawPathList StorageGroups' path patterns or full paths + * @return All StorageGroupSchemas that matches to the specific StorageGroup patterns + */ + public Map getMatchedStorageGroupSchemasByName( + List rawPathList) { Map schemaMap = new HashMap<>(); storageGroupReadWriteLock.readLock().lock(); try { @@ -365,72 +408,32 @@ public Map getDeleteStorageGroups(List rawP return schemaMap; } - /** @return True if StorageGroupInfo contains the specific StorageGroup */ - public boolean containsStorageGroup(String storageName) { - boolean result; - storageGroupReadWriteLock.readLock().lock(); - try { - result = mTree.isStorageGroupAlreadySet(new PartialPath(storageName)); - } catch (IllegalPathException e) { - LOGGER.error("Error StorageGroup name", e); - return false; - } finally { - storageGroupReadWriteLock.readLock().unlock(); - } - return result; - } - /** - * Get the specific StorageGroupSchema + * Only leader use this interface. Get the maxRegionGroupCount of specific StorageGroup. * * @param storageGroup StorageGroupName - * @return The specific StorageGroupSchema - * @throws MetadataException from MTree + * @param consensusGroupType SchemaRegion or DataRegion + * @return maxSchemaRegionGroupCount or maxDataRegionGroupCount */ - public TStorageGroupSchema getMatchedStorageGroupSchemaByName(String storageGroup) - throws MetadataException { + public int getMaxRegionGroupCount(String storageGroup, TConsensusGroupType consensusGroupType) { storageGroupReadWriteLock.readLock().lock(); try { - return mTree - .getStorageGroupNodeByStorageGroupPath(new PartialPath(storageGroup)) - .getStorageGroupSchema(); - } finally { - storageGroupReadWriteLock.readLock().unlock(); - } - } - - /** - * Get the SchemaRegionGroupIds or DataRegionGroupIds from the specific StorageGroup. - * - * @param storageGroup StorageGroupName - * @param type SchemaRegion or DataRegion - * @return All SchemaRegionGroupIds when type is SchemaRegion, and all DataRegionGroupIds when - * type is DataRegion - */ - public List getRegionGroupIds(String storageGroup, TConsensusGroupType type) { - List result; - storageGroupReadWriteLock.readLock().lock(); - try { - StorageGroupMNode mNode = - (StorageGroupMNode) - mTree.getStorageGroupNodeByStorageGroupPath(new PartialPath(storageGroup)); - switch (type) { + PartialPath path = new PartialPath(storageGroup); + TStorageGroupSchema storageGroupSchema = + mTree.getStorageGroupNodeByStorageGroupPath(path).getStorageGroupSchema(); + switch (consensusGroupType) { case SchemaRegion: - result = mNode.getStorageGroupSchema().getSchemaRegionGroupIds(); - break; + return storageGroupSchema.getMaxSchemaRegionGroupCount(); case DataRegion: - result = mNode.getStorageGroupSchema().getDataRegionGroupIds(); - break; default: - result = new ArrayList<>(); + return storageGroupSchema.getMaxDataRegionGroupCount(); } } catch (MetadataException e) { - LOGGER.error("Error StorageGroup name", e); - return new ArrayList<>(); + LOGGER.warn("Error StorageGroup name", e); + return -1; } finally { storageGroupReadWriteLock.readLock().unlock(); } - return result; } @Override @@ -445,20 +448,26 @@ public boolean processTakeSnapshot(File snapshotDir) throws IOException { } File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); - ByteBuffer buffer = ByteBuffer.allocate(bufferSize); storageGroupReadWriteLock.readLock().lock(); try { try (FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); - FileChannel fileChannel = fileOutputStream.getChannel()) { - mTree.serialize(buffer); - buffer.flip(); - fileChannel.write(buffer); + BufferedOutputStream outputStream = new BufferedOutputStream(fileOutputStream)) { + // Take snapshot for MTree + mTree.serialize(outputStream); + outputStream.flush(); } + return tmpFile.renameTo(snapshotFile); } finally { - buffer.clear(); - tmpFile.delete(); + for (int retry = 0; retry < 5; retry++) { + if (!tmpFile.exists() || tmpFile.delete()) { + break; + } else { + LOGGER.warn( + "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath()); + } + } storageGroupReadWriteLock.readLock().unlock(); } } @@ -474,35 +483,61 @@ public void processLoadSnapshot(File snapshotDir) throws IOException { return; } storageGroupReadWriteLock.writeLock().lock(); - ByteBuffer buffer = ByteBuffer.allocate(bufferSize); try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); - FileChannel fileChannel = fileInputStream.getChannel()) { - // get buffer from fileChannel - fileChannel.read(buffer); + BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream)) { + // Load snapshot of MTree mTree.clear(); - buffer.flip(); - mTree.deserialize(buffer); + mTree.deserialize(bufferedInputStream); } finally { - buffer.clear(); storageGroupReadWriteLock.writeLock().unlock(); } } - @TestOnly - public void clear() { - mTree.clear(); + public Pair, Set> getNodesListInGivenLevel( + PartialPath partialPath, int level) { + Pair, Set> matchedPathsInNextLevel = + new Pair(new HashSet<>(), new HashSet<>()); + storageGroupReadWriteLock.readLock().lock(); + try { + matchedPathsInNextLevel = mTree.getNodesListInGivenLevel(partialPath, level, true, null); + } catch (MetadataException e) { + LOGGER.error("Error get matched paths in given level.", e); + } finally { + storageGroupReadWriteLock.readLock().unlock(); + } + return matchedPathsInNextLevel; } - private static class StorageGroupInfoHolder { - - private static final ClusterSchemaInfo INSTANCE = new ClusterSchemaInfo(); + public Pair, Set> getChildNodePathInNextLevel(PartialPath partialPath) { + Pair, Set> matchedPathsInNextLevel = + new Pair<>(new HashSet<>(), new HashSet<>()); + storageGroupReadWriteLock.readLock().lock(); + try { + matchedPathsInNextLevel = mTree.getChildNodePathInNextLevel(partialPath); + } catch (MetadataException e) { + LOGGER.error("Error get matched paths in next level.", e); + } finally { + storageGroupReadWriteLock.readLock().unlock(); + } + return matchedPathsInNextLevel; + } - private StorageGroupInfoHolder() { - // Empty constructor + public Pair, Set> getChildNodeNameInNextLevel(PartialPath partialPath) { + Pair, Set> matchedNamesInNextLevel = + new Pair<>(new HashSet<>(), new HashSet<>()); + storageGroupReadWriteLock.readLock().lock(); + try { + matchedNamesInNextLevel = mTree.getChildNodeNameInNextLevel(partialPath); + } catch (MetadataException e) { + LOGGER.error("Error get matched names in next level.", e); + } finally { + storageGroupReadWriteLock.readLock().unlock(); } + return matchedNamesInNextLevel; } - public static ClusterSchemaInfo getInstance() { - return StorageGroupInfoHolder.INSTANCE; + @TestOnly + public void clear() { + mTree.clear(); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java index 38af6e207f85..2ee1820d23f2 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/NodeInfo.java @@ -18,8 +18,11 @@ */ package org.apache.iotdb.confignode.persistence; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.commons.utils.NodeUrlUtils; import org.apache.iotdb.commons.utils.TestOnly; import org.apache.iotdb.confignode.conf.ConfigNodeConstant; @@ -27,10 +30,20 @@ import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; -import org.apache.iotdb.confignode.consensus.response.DataNodeLocationsResp; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; +import org.apache.iotdb.confignode.consensus.response.DataNodeInfosResp; +import org.apache.iotdb.db.service.metrics.MetricsService; +import org.apache.iotdb.db.service.metrics.enums.Metric; +import org.apache.iotdb.db.service.metrics.enums.Tag; +import org.apache.iotdb.metrics.config.MetricConfigDescriptor; +import org.apache.iotdb.metrics.utils.MetricLevel; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TIOStreamTransport; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,15 +51,18 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; -import java.nio.ByteBuffer; +import java.io.InputStream; +import java.io.OutputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Properties; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; @@ -56,7 +72,7 @@ * The NodeInfo stores cluster node information. The cluster node information including: 1. DataNode * information 2. ConfigNode information */ -public class NodeInfo { +public class NodeInfo implements SnapshotProcessor { private static final Logger LOGGER = LoggerFactory.getLogger(NodeInfo.class); @@ -71,41 +87,61 @@ public class NodeInfo { ConfigNodeDescriptor.getInstance().getConf().getSchemaReplicationFactor(), ConfigNodeDescriptor.getInstance().getConf().getDataReplicationFactor()); - private final ReentrantReadWriteLock configNodeInfoReadWriteLock; - // Online ConfigNodes + private final ReentrantReadWriteLock configNodeInfoReadWriteLock; private final Set onlineConfigNodes; - private final ReentrantReadWriteLock dataNodeInfoReadWriteLock; - - // TODO: serialize and deserialize - private AtomicInteger nextDataNodeId = new AtomicInteger(0); - // Online DataNodes - // TODO: serialize and deserialize - private final ConcurrentNavigableMap onlineDataNodes = - new ConcurrentSkipListMap(); + private final ReentrantReadWriteLock dataNodeInfoReadWriteLock; + private final AtomicInteger nextNodeId = new AtomicInteger(1); + private final ConcurrentNavigableMap onlineDataNodes = + new ConcurrentSkipListMap<>(); // For remove or draining DataNode // TODO: implement - // TODO: serialize and deserialize private final Set drainingDataNodes = new HashSet<>(); - private NodeInfo() { + private final String snapshotFileName = "node_info.bin"; + + public NodeInfo() { this.dataNodeInfoReadWriteLock = new ReentrantReadWriteLock(); this.configNodeInfoReadWriteLock = new ReentrantReadWriteLock(); this.onlineConfigNodes = new HashSet<>(ConfigNodeDescriptor.getInstance().getConf().getConfigNodeList()); } - public boolean containsValue(TDataNodeLocation info) { + public void addMetrics() { + if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.CONFIG_NODE.toString(), + MetricLevel.CORE, + onlineConfigNodes, + o -> getOnlineDataNodeCount(), + Tag.NAME.toString(), + "online"); + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.DATA_NODE.toString(), + MetricLevel.CORE, + onlineDataNodes, + Map::size, + Tag.NAME.toString(), + "online"); + } + } + + /** @return true if the specific DataNode is now online */ + public boolean isOnlineDataNode(TDataNodeLocation info) { boolean result = false; dataNodeInfoReadWriteLock.readLock().lock(); try { - for (Map.Entry entry : onlineDataNodes.entrySet()) { + for (Map.Entry entry : onlineDataNodes.entrySet()) { info.setDataNodeId(entry.getKey()); - if (entry.getValue().equals(info)) { + if (entry.getValue().getLocation().equals(info)) { result = true; break; } @@ -117,10 +153,6 @@ public boolean containsValue(TDataNodeLocation info) { return result; } - public void put(int dataNodeID, TDataNodeLocation info) { - onlineDataNodes.put(dataNodeID, info); - } - /** * Persist DataNode info * @@ -129,29 +161,29 @@ public void put(int dataNodeID, TDataNodeLocation info) { */ public TSStatus registerDataNode(RegisterDataNodeReq registerDataNodeReq) { TSStatus result; - TDataNodeLocation info = registerDataNodeReq.getLocation(); + TDataNodeInfo info = registerDataNodeReq.getInfo(); dataNodeInfoReadWriteLock.writeLock().lock(); try { - onlineDataNodes.put(info.getDataNodeId(), info); - if (nextDataNodeId.get() < registerDataNodeReq.getLocation().getDataNodeId()) { - // In this case, at least one Datanode is registered with the leader node, - // so the nextDataNodeID of the followers needs to be added - nextDataNodeId.getAndIncrement(); + onlineDataNodes.put(info.getLocation().getDataNodeId(), info); + + // To ensure that the nextNodeId is updated correctly when + // the ConfigNode-followers concurrently processes RegisterDataNodeReq, + // we need to add a synchronization lock here + synchronized (nextNodeId) { + if (nextNodeId.get() < info.getLocation().getDataNodeId()) { + nextNodeId.set(info.getLocation().getDataNodeId()); + } } + result = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - if (nextDataNodeId.get() < minimumDataNode) { + if (nextNodeId.get() < minimumDataNode) { result.setMessage( String.format( "To enable IoTDB-Cluster's data service, please register %d more IoTDB-DataNode", - minimumDataNode - nextDataNodeId.get())); - } else if (nextDataNodeId.get() == minimumDataNode) { + minimumDataNode - nextNodeId.get())); + } else if (nextNodeId.get() == minimumDataNode) { result.setMessage("IoTDB-Cluster could provide data service, now enjoy yourself!"); } - - LOGGER.info( - "Successfully register DataNode: {}. Current online DataNodes: {}", - info, - onlineDataNodes); } finally { dataNodeInfoReadWriteLock.writeLock().unlock(); } @@ -165,18 +197,17 @@ public TSStatus registerDataNode(RegisterDataNodeReq registerDataNodeReq) { * @return The specific DataNode's info or all DataNode info if dataNodeId in * QueryDataNodeInfoPlan is -1 */ - public DataNodeLocationsResp getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoReq) { - DataNodeLocationsResp result = new DataNodeLocationsResp(); + public DataNodeInfosResp getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoReq) { + DataNodeInfosResp result = new DataNodeInfosResp(); result.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); int dataNodeId = getDataNodeInfoReq.getDataNodeID(); dataNodeInfoReadWriteLock.readLock().lock(); try { if (dataNodeId == -1) { - result.setDataNodeLocations(new HashMap<>(onlineDataNodes)); + result.setDataNodeInfoMap(new HashMap<>(onlineDataNodes)); } else { - - result.setDataNodeLocations( + result.setDataNodeInfoMap( Collections.singletonMap(dataNodeId, onlineDataNodes.get(dataNodeId))); } } finally { @@ -186,6 +217,7 @@ public DataNodeLocationsResp getDataNodeInfo(GetDataNodeInfoReq getDataNodeInfoR return result; } + /** Return the number of online DataNodes */ public int getOnlineDataNodeCount() { int result; dataNodeInfoReadWriteLock.readLock().lock(); @@ -197,22 +229,37 @@ public int getOnlineDataNodeCount() { return result; } - public List getOnlineDataNodes() { - List result; + /** Return the number of total cpu cores in online DataNodes */ + public int getTotalCpuCoreCount() { + int result = 0; dataNodeInfoReadWriteLock.readLock().lock(); try { - result = new ArrayList<>(onlineDataNodes.values()); + for (TDataNodeInfo info : onlineDataNodes.values()) { + result += info.getCpuCoreNum(); + } } finally { dataNodeInfoReadWriteLock.readLock().unlock(); } return result; } - public TDataNodeLocation getOnlineDataNode(int dataNodeId) { - TDataNodeLocation result; + /** + * Return the specific online DataNode + * + * @param dataNodeId Specific DataNodeId + * @return All online DataNodes if dataNodeId equals -1. And return the specific DataNode + * otherwise. + */ + public List getOnlineDataNodes(int dataNodeId) { + List result; dataNodeInfoReadWriteLock.readLock().lock(); try { - result = onlineDataNodes.get(dataNodeId); + // TODO: Check DataNode status, ensure the returned DataNode isn't removed + if (dataNodeId == -1) { + result = new ArrayList<>(onlineDataNodes.values()); + } else { + result = Collections.singletonList(onlineDataNodes.get(dataNodeId)); + } } finally { dataNodeInfoReadWriteLock.readLock().unlock(); } @@ -229,6 +276,15 @@ public TSStatus updateConfigNodeList(ApplyConfigNodeReq applyConfigNodeReq) { TSStatus status = new TSStatus(); configNodeInfoReadWriteLock.writeLock().lock(); try { + // To ensure that the nextNodeId is updated correctly when + // the ConfigNode-followers concurrently processes ApplyConfigNodeReq, + // we need to add a synchronization lock here + synchronized (nextNodeId) { + if (nextNodeId.get() < applyConfigNodeReq.getConfigNodeLocation().getConfigNodeId()) { + nextNodeId.set(applyConfigNodeReq.getConfigNodeLocation().getConfigNodeId()); + } + } + onlineConfigNodes.add(applyConfigNodeReq.getConfigNodeLocation()); storeConfigNode(); LOGGER.info( @@ -247,6 +303,34 @@ public TSStatus updateConfigNodeList(ApplyConfigNodeReq applyConfigNodeReq) { return status; } + /** + * Update ConfigNodeList both in memory and confignode-system.properties file + * + * @param removeConfigNodeReq RemoveConfigNodeReq + * @return REMOVE_CONFIGNODE_FAILED if remove online ConfigNode failed. + */ + public TSStatus removeConfigNodeList(RemoveConfigNodeReq removeConfigNodeReq) { + TSStatus status = new TSStatus(); + configNodeInfoReadWriteLock.writeLock().lock(); + try { + onlineConfigNodes.remove(removeConfigNodeReq.getConfigNodeLocation()); + storeConfigNode(); + LOGGER.info( + "Successfully remove ConfigNode: {}. Current ConfigNodeGroup: {}", + removeConfigNodeReq.getConfigNodeLocation(), + onlineConfigNodes); + status.setCode(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } catch (IOException e) { + LOGGER.error("Remove online ConfigNode failed.", e); + status.setCode(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()); + status.setMessage( + "Remove ConfigNode failed because current ConfigNode can't store ConfigNode information."); + } finally { + configNodeInfoReadWriteLock.writeLock().unlock(); + } + return status; + } + private void storeConfigNode() throws IOException { Properties systemProperties = new Properties(); try (FileInputStream inputStream = new FileInputStream(systemPropertiesFile)) { @@ -254,13 +338,16 @@ private void storeConfigNode() throws IOException { } systemProperties.setProperty( "confignode_list", NodeUrlUtils.convertTConfigNodeUrls(new ArrayList<>(onlineConfigNodes))); - systemProperties.store(new FileOutputStream(systemPropertiesFile), ""); + try (FileOutputStream fileOutputStream = new FileOutputStream(systemPropertiesFile)) { + systemProperties.store(fileOutputStream, ""); + } } public List getOnlineConfigNodes() { List result; configNodeInfoReadWriteLock.readLock().lock(); try { + // TODO: Check ConfigNode status, ensure the returned ConfigNode isn't removed result = new ArrayList<>(onlineConfigNodes); } finally { configNodeInfoReadWriteLock.readLock().unlock(); @@ -268,36 +355,146 @@ public List getOnlineConfigNodes() { return result; } - public int generateNextDataNodeId() { - return nextDataNodeId.getAndIncrement(); + public int generateNextNodeId() { + return nextNodeId.getAndIncrement(); } - public void serialize(ByteBuffer buffer) { - // TODO: Serialize DataNodeInfo + @Override + public boolean processTakeSnapshot(File snapshotDir) throws IOException, TException { + File snapshotFile = new File(snapshotDir, snapshotFileName); + if (snapshotFile.exists() && snapshotFile.isFile()) { + LOGGER.error( + "Failed to take snapshot, because snapshot file [{}] is already exist.", + snapshotFile.getAbsolutePath()); + return false; + } + + File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); + configNodeInfoReadWriteLock.readLock().lock(); + dataNodeInfoReadWriteLock.readLock().lock(); + try (FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); + TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileOutputStream)) { + + TProtocol protocol = new TBinaryProtocol(tioStreamTransport); + + ReadWriteIOUtils.write(nextNodeId.get(), fileOutputStream); + + serializeOnlineDataNode(fileOutputStream, protocol); + + serializeDrainingDataNodes(fileOutputStream, protocol); + + fileOutputStream.flush(); + + fileOutputStream.close(); + + return tmpFile.renameTo(snapshotFile); + + } finally { + configNodeInfoReadWriteLock.readLock().unlock(); + dataNodeInfoReadWriteLock.readLock().unlock(); + for (int retry = 0; retry < 5; retry++) { + if (!tmpFile.exists() || tmpFile.delete()) { + break; + } else { + LOGGER.warn( + "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath()); + } + } + } } - public void deserialize(ByteBuffer buffer) { - // TODO: Deserialize DataNodeInfo + private void serializeOnlineDataNode(OutputStream outputStream, TProtocol protocol) + throws IOException, TException { + ReadWriteIOUtils.write(onlineDataNodes.size(), outputStream); + for (Entry entry : onlineDataNodes.entrySet()) { + ReadWriteIOUtils.write(entry.getKey(), outputStream); + entry.getValue().write(protocol); + } } - @TestOnly - public void clear() { - nextDataNodeId = new AtomicInteger(0); - onlineDataNodes.clear(); - drainingDataNodes.clear(); - onlineConfigNodes.clear(); + private void serializeDrainingDataNodes(OutputStream outputStream, TProtocol protocol) + throws IOException, TException { + ReadWriteIOUtils.write(drainingDataNodes.size(), outputStream); + for (TDataNodeLocation tDataNodeLocation : drainingDataNodes) { + tDataNodeLocation.write(protocol); + } } - private static class DataNodeInfoPersistenceHolder { + @Override + public void processLoadSnapshot(File snapshotDir) throws IOException, TException { + + File snapshotFile = new File(snapshotDir, snapshotFileName); + if (!snapshotFile.exists() || !snapshotFile.isFile()) { + LOGGER.error( + "Failed to load snapshot,snapshot file [{}] is not exist.", + snapshotFile.getAbsolutePath()); + return; + } + + configNodeInfoReadWriteLock.writeLock().lock(); + dataNodeInfoReadWriteLock.writeLock().lock(); + + try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); + TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileInputStream)) { + TProtocol protocol = new TBinaryProtocol(tioStreamTransport); + + clear(); + + nextNodeId.set(ReadWriteIOUtils.readInt(fileInputStream)); + + deserializeOnlineDataNode(fileInputStream, protocol); - private static final NodeInfo INSTANCE = new NodeInfo(); + deserializeDrainingDataNodes(fileInputStream, protocol); + + } finally { + configNodeInfoReadWriteLock.writeLock().unlock(); + dataNodeInfoReadWriteLock.writeLock().unlock(); + } + } + + private void deserializeOnlineDataNode(InputStream inputStream, TProtocol protocol) + throws IOException, TException { + int size = ReadWriteIOUtils.readInt(inputStream); + while (size > 0) { + int dataNodeId = ReadWriteIOUtils.readInt(inputStream); + TDataNodeInfo dataNodeInfo = new TDataNodeInfo(); + dataNodeInfo.read(protocol); + onlineDataNodes.put(dataNodeId, dataNodeInfo); + size--; + } + } - private DataNodeInfoPersistenceHolder() { - // empty constructor + private void deserializeDrainingDataNodes(InputStream inputStream, TProtocol protocol) + throws IOException, TException { + int size = ReadWriteIOUtils.readInt(inputStream); + while (size > 0) { + TDataNodeLocation tDataNodeLocation = new TDataNodeLocation(); + tDataNodeLocation.read(protocol); + drainingDataNodes.add(tDataNodeLocation); + size--; } } - public static NodeInfo getInstance() { - return NodeInfo.DataNodeInfoPersistenceHolder.INSTANCE; + // as drainingDataNodes is not currently implemented, manually set it to validate the test + @TestOnly + public void setDrainingDataNodes(Set tDataNodeLocations) { + drainingDataNodes.addAll(tDataNodeLocations); + } + + @TestOnly + public int getNextNodeId() { + return nextNodeId.get(); + } + + @TestOnly + public Set getDrainingDataNodes() { + return drainingDataNodes; + } + + public void clear() { + nextNodeId.set(0); + onlineDataNodes.clear(); + drainingDataNodes.clear(); + onlineConfigNodes.clear(); } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/PartitionInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/PartitionInfo.java deleted file mode 100644 index 8a10c7e84f9d..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/PartitionInfo.java +++ /dev/null @@ -1,544 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.confignode.persistence; - -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; -import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; -import org.apache.iotdb.commons.partition.DataPartition; -import org.apache.iotdb.commons.partition.SchemaPartition; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; -import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; -import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; -import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; -import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; -import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; -import org.apache.iotdb.confignode.consensus.request.write.DeleteRegionsReq; -import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; -import org.apache.iotdb.confignode.consensus.response.DataPartitionResp; -import org.apache.iotdb.confignode.consensus.response.SchemaPartitionResp; -import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.consensus.common.DataSet; -import org.apache.iotdb.rpc.TSStatusCode; - -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TIOStreamTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * The PartitionInfo stores cluster PartitionTable. The PartitionTable including: 1. regionMap: - * location of Region member 2. schemaPartition: location of schema 3. dataPartition: location of - * data - */ -public class PartitionInfo implements SnapshotProcessor { - - private static final Logger LOGGER = LoggerFactory.getLogger(PartitionInfo.class); - // Region read write lock - private final ReentrantReadWriteLock regionReadWriteLock; - private AtomicInteger nextRegionGroupId = new AtomicInteger(0); - private final Map regionMap; - - // SchemaPartition read write lock - private final ReentrantReadWriteLock schemaPartitionReadWriteLock; - private final SchemaPartition schemaPartition; - - // DataPartition read write lock - private final ReentrantReadWriteLock dataPartitionReadWriteLock; - private final DataPartition dataPartition; - - // The size of the buffer used for snapshot(temporary value) - private final int bufferSize = 10 * 1024 * 1024; - - private final String snapshotFileName = "partition_info.bin"; - - private PartitionInfo() { - this.regionReadWriteLock = new ReentrantReadWriteLock(); - this.regionMap = new HashMap<>(); - - this.schemaPartitionReadWriteLock = new ReentrantReadWriteLock(); - this.schemaPartition = - new SchemaPartition( - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionExecutorClass(), - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum()); - this.schemaPartition.setSchemaPartitionMap(new HashMap<>()); - - this.dataPartitionReadWriteLock = new ReentrantReadWriteLock(); - this.dataPartition = - new DataPartition( - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionExecutorClass(), - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum()); - this.dataPartition.setDataPartitionMap(new HashMap<>()); - } - - public int generateNextRegionGroupId() { - return nextRegionGroupId.getAndIncrement(); - } - - @TestOnly - public Integer getNextRegionGroupId() { - return nextRegionGroupId.get(); - } - - /** - * Persistence allocation result of new Regions - * - * @param req CreateRegionsPlan - * @return SUCCESS_STATUS - */ - public TSStatus createRegions(CreateRegionsReq req) { - TSStatus result; - regionReadWriteLock.writeLock().lock(); - try { - int maxRegionId = Integer.MIN_VALUE; - - for (TRegionReplicaSet regionReplicaSet : req.getRegionMap().values()) { - regionMap.put(regionReplicaSet.getRegionId(), regionReplicaSet); - maxRegionId = Math.max(maxRegionId, regionReplicaSet.getRegionId().getId()); - } - - if (nextRegionGroupId.get() < maxRegionId) { - // In this case, at least one Region is created with the leader node, - // so the nextRegionGroupID of the followers needs to be added - nextRegionGroupId.getAndAdd(req.getRegionMap().size()); - } - - result = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } finally { - regionReadWriteLock.writeLock().unlock(); - } - return result; - } - - /** - * Delete Regions - * - * @param req DeleteRegionsReq - * @return SUCCESS_STATUS - */ - public TSStatus deleteRegions(DeleteRegionsReq req) { - TSStatus result; - regionReadWriteLock.writeLock().lock(); - try { - for (TConsensusGroupId consensusGroupId : req.getConsensusGroupIds()) { - regionMap.remove(consensusGroupId); - } - result = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } finally { - regionReadWriteLock.writeLock().unlock(); - } - return result; - } - - /** - * Delete Regions - * - * @param req DeleteRegionsReq - * @return SUCCESS_STATUS - */ - public TSStatus deleteStorageGroup(DeleteStorageGroupReq req) { - TStorageGroupSchema storageGroupSchema = req.getStorageGroup(); - List dataRegionGroupIds = storageGroupSchema.getDataRegionGroupIds(); - List schemaRegionGroupIds = storageGroupSchema.getSchemaRegionGroupIds(); - DeleteRegionsReq deleteRegionsReq = new DeleteRegionsReq(); - for (TConsensusGroupId schemaRegionGroupId : schemaRegionGroupIds) { - deleteRegionsReq.addConsensusGroupId(schemaRegionGroupId); - } - for (TConsensusGroupId dataRegionId : dataRegionGroupIds) { - deleteRegionsReq.addConsensusGroupId(dataRegionId); - } - deleteRegions(deleteRegionsReq); - deleteDataPartitionMapByStorageGroup(storageGroupSchema.getName()); - deleteSchemaPartitionMapByStorageGroup(storageGroupSchema.getName()); - return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } - - /** - * Get SchemaPartition - * - * @param req SchemaPartitionPlan with partitionSlotsMap - * @return SchemaPartitionDataSet that contains only existing SchemaPartition - */ - public DataSet getSchemaPartition(GetSchemaPartitionReq req) { - SchemaPartitionResp schemaPartitionResp = new SchemaPartitionResp(); - schemaPartitionReadWriteLock.readLock().lock(); - - try { - schemaPartitionResp.setSchemaPartition( - schemaPartition.getSchemaPartition(req.getPartitionSlotsMap())); - } finally { - schemaPartitionReadWriteLock.readLock().unlock(); - schemaPartitionResp.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); - } - - return schemaPartitionResp; - } - - /** - * Create SchemaPartition - * - * @param req CreateSchemaPartitionPlan with SchemaPartition assigned result - * @return TSStatusCode.SUCCESS_STATUS when creation successful - */ - public TSStatus createSchemaPartition(CreateSchemaPartitionReq req) { - schemaPartitionReadWriteLock.writeLock().lock(); - - try { - // Allocate SchemaPartition by CreateSchemaPartitionPlan - Map> assignedResult = - req.getAssignedSchemaPartition(); - assignedResult.forEach( - (storageGroup, partitionSlots) -> - partitionSlots.forEach( - (seriesPartitionSlot, regionReplicaSet) -> - schemaPartition.createSchemaPartition( - storageGroup, seriesPartitionSlot, regionReplicaSet))); - } finally { - schemaPartitionReadWriteLock.writeLock().unlock(); - } - - return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } - - /** - * Filter no assigned SchemaPartitionSlots - * - * @param partitionSlotsMap Map> - * @return Map>, SchemaPartitionSlots that is not - * assigned in partitionSlotsMap - */ - public Map> filterNoAssignedSchemaPartitionSlots( - Map> partitionSlotsMap) { - Map> result; - schemaPartitionReadWriteLock.readLock().lock(); - try { - result = schemaPartition.filterNoAssignedSchemaPartitionSlot(partitionSlotsMap); - } finally { - schemaPartitionReadWriteLock.readLock().unlock(); - } - return result; - } - - /** - * Get DataPartition - * - * @param req DataPartitionPlan with partitionSlotsMap - * @return DataPartitionDataSet that contains only existing DataPartition - */ - public DataSet getDataPartition(GetDataPartitionReq req) { - DataPartitionResp dataPartitionResp = new DataPartitionResp(); - dataPartitionReadWriteLock.readLock().lock(); - - try { - dataPartitionResp.setDataPartition( - dataPartition.getDataPartition( - req.getPartitionSlotsMap(), - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionExecutorClass(), - ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum())); - } finally { - dataPartitionReadWriteLock.readLock().unlock(); - dataPartitionResp.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); - } - - return dataPartitionResp; - } - - /** - * Create DataPartition - * - * @param req CreateDataPartitionPlan with DataPartition assigned result - * @return TSStatusCode.SUCCESS_STATUS when creation successful - */ - public TSStatus createDataPartition(CreateDataPartitionReq req) { - dataPartitionReadWriteLock.writeLock().lock(); - - try { - // Allocate DataPartition by CreateDataPartitionPlan - Map>>> - assignedResult = req.getAssignedDataPartition(); - assignedResult.forEach( - (storageGroup, seriesPartitionTimePartitionSlots) -> - seriesPartitionTimePartitionSlots.forEach( - ((seriesPartitionSlot, timePartitionSlotRegionReplicaSets) -> - timePartitionSlotRegionReplicaSets.forEach( - ((timePartitionSlot, regionReplicaSets) -> - regionReplicaSets.forEach( - regionReplicaSet -> - dataPartition.createDataPartition( - storageGroup, - seriesPartitionSlot, - timePartitionSlot, - regionReplicaSet))))))); - } finally { - dataPartitionReadWriteLock.writeLock().unlock(); - } - - return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); - } - - /** - * Filter no assigned DataPartitionSlots - * - * @param partitionSlotsMap Map>> - * @return Map>>, - * DataPartitionSlots that is not assigned in partitionSlotsMap - */ - public Map>> - filterNoAssignedDataPartitionSlots( - Map>> partitionSlotsMap) { - Map>> result; - dataPartitionReadWriteLock.readLock().lock(); - try { - result = dataPartition.filterNoAssignedDataPartitionSlots(partitionSlotsMap); - } finally { - dataPartitionReadWriteLock.readLock().unlock(); - } - return result; - } - - /** Get RegionReplicaSet by the specific TConsensusGroupIds */ - public List getRegionReplicaSets(List groupIds) { - List result = new ArrayList<>(); - regionReadWriteLock.readLock().lock(); - try { - for (TConsensusGroupId groupId : groupIds) { - result.add(regionMap.get(groupId)); - } - } finally { - regionReadWriteLock.readLock().unlock(); - } - return result; - } - - /** Get all allocated RegionReplicaSets */ - public List getAllocatedRegions() { - List result; - regionReadWriteLock.readLock().lock(); - try { - result = new ArrayList<>(regionMap.values()); - } finally { - regionReadWriteLock.readLock().unlock(); - } - return result; - } - - private void deleteDataPartitionMapByStorageGroup(String storageGroup) { - dataPartitionReadWriteLock.writeLock().lock(); - try { - dataPartition.getDataPartitionMap().remove(storageGroup); - } finally { - dataPartitionReadWriteLock.writeLock().unlock(); - } - } - - private void deleteSchemaPartitionMapByStorageGroup(String storageGroup) { - schemaPartitionReadWriteLock.writeLock().lock(); - try { - schemaPartition.getSchemaPartitionMap().remove(storageGroup); - } finally { - schemaPartitionReadWriteLock.writeLock().unlock(); - } - } - - public boolean processTakeSnapshot(File snapshotDir) throws TException, IOException { - - File snapshotFile = new File(snapshotDir, snapshotFileName); - if (snapshotFile.exists() && snapshotFile.isFile()) { - LOGGER.error( - "Failed to take snapshot, because snapshot file [{}] is already exist.", - snapshotFile.getAbsolutePath()); - return false; - } - - File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); - - lockAllRead(); - ByteBuffer byteBuffer = ByteBuffer.allocate(bufferSize); - try { - // serialize nextRegionGroupId - byteBuffer.putInt(nextRegionGroupId.get()); - // serialize regionMap - serializeRegionMap(byteBuffer); - // serialize schemaPartition - schemaPartition.serialize(byteBuffer); - // serialize dataPartition - dataPartition.serialize(byteBuffer); - // write to file - try (FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); - FileChannel fileChannel = fileOutputStream.getChannel()) { - byteBuffer.flip(); - fileChannel.write(byteBuffer); - } - // rename file - return tmpFile.renameTo(snapshotFile); - } finally { - unlockAllRead(); - byteBuffer.clear(); - // with or without success, delete temporary files anyway - tmpFile.delete(); - } - } - - public void processLoadSnapshot(File snapshotDir) throws TException, IOException { - - File snapshotFile = new File(snapshotDir, snapshotFileName); - if (!snapshotFile.exists() || !snapshotFile.isFile()) { - LOGGER.error( - "Failed to load snapshot,snapshot file [{}] is not exist.", - snapshotFile.getAbsolutePath()); - return; - } - - // no operations are processed at this time - lockAllWrite(); - - ByteBuffer buffer = ByteBuffer.allocate(bufferSize); - try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); - FileChannel fileChannel = fileInputStream.getChannel()) { - // get buffer from fileChannel - fileChannel.read(buffer); - buffer.flip(); - // before restoring a snapshot, clear all old data - clear(); - // start to restore - nextRegionGroupId.set(buffer.getInt()); - deserializeRegionMap(buffer); - schemaPartition.deserialize(buffer); - dataPartition.deserialize(buffer); - } finally { - unlockAllWrite(); - buffer.clear(); - } - } - - private void lockAllWrite() { - regionReadWriteLock.writeLock().lock(); - schemaPartitionReadWriteLock.writeLock().lock(); - dataPartitionReadWriteLock.writeLock().lock(); - } - - private void unlockAllWrite() { - regionReadWriteLock.writeLock().unlock(); - schemaPartitionReadWriteLock.writeLock().unlock(); - dataPartitionReadWriteLock.writeLock().unlock(); - } - - private void lockAllRead() { - regionReadWriteLock.readLock().lock(); - schemaPartitionReadWriteLock.readLock().lock(); - dataPartitionReadWriteLock.readLock().lock(); - } - - private void unlockAllRead() { - regionReadWriteLock.readLock().unlock(); - schemaPartitionReadWriteLock.readLock().unlock(); - dataPartitionReadWriteLock.readLock().unlock(); - } - - @TestOnly - public DataPartition getDataPartition() { - return dataPartition; - } - - @TestOnly - public SchemaPartition getSchemaPartition() { - return schemaPartition; - } - - private void serializeRegionMap(ByteBuffer buffer) throws TException, IOException { - try (ByteArrayOutputStream out = new ByteArrayOutputStream(); - TIOStreamTransport tioStreamTransport = new TIOStreamTransport(out)) { - TProtocol protocol = new TBinaryProtocol(tioStreamTransport); - for (Entry entry : regionMap.entrySet()) { - entry.getKey().write(protocol); - entry.getValue().write(protocol); - } - byte[] toArray = out.toByteArray(); - buffer.putInt(toArray.length); - buffer.put(toArray); - } - } - - private void deserializeRegionMap(ByteBuffer buffer) throws TException, IOException { - int length = buffer.getInt(); - byte[] regionMapBuffer = new byte[length]; - buffer.get(regionMapBuffer); - try (ByteArrayInputStream in = new ByteArrayInputStream(regionMapBuffer); - TIOStreamTransport tioStreamTransport = new TIOStreamTransport(in)) { - while (in.available() > 0) { - TProtocol protocol = new TBinaryProtocol(tioStreamTransport); - TConsensusGroupId tConsensusGroupId = new TConsensusGroupId(); - tConsensusGroupId.read(protocol); - TRegionReplicaSet tRegionReplicaSet = new TRegionReplicaSet(); - tRegionReplicaSet.read(protocol); - regionMap.put(tConsensusGroupId, tRegionReplicaSet); - } - } - } - - public void clear() { - nextRegionGroupId = new AtomicInteger(0); - regionMap.clear(); - - if (schemaPartition.getSchemaPartitionMap() != null) { - schemaPartition.getSchemaPartitionMap().clear(); - } - - if (dataPartition.getDataPartitionMap() != null) { - dataPartition.getDataPartitionMap().clear(); - } - } - - private static class PartitionInfoHolder { - - private static final PartitionInfo INSTANCE = new PartitionInfo(); - - private PartitionInfoHolder() { - // empty constructor - } - } - - public static PartitionInfo getInstance() { - return PartitionInfoHolder.INSTANCE; - } -} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java index 880556015af9..e12e3ae5a1bd 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/ProcedureInfo.java @@ -23,10 +23,10 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.confignode.consensus.request.write.DeleteProcedureReq; import org.apache.iotdb.confignode.consensus.request.write.UpdateProcedureReq; -import org.apache.iotdb.confignode.procedure.ProcedureFactory; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.conf.ProcedureNodeConstant; -import org.apache.iotdb.procedure.store.ProcedureWAL; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; +import org.apache.iotdb.confignode.procedure.store.ProcedureStore; +import org.apache.iotdb.confignode.procedure.store.ProcedureWAL; import org.apache.iotdb.rpc.TSStatusCode; import org.slf4j.Logger; @@ -48,18 +48,11 @@ public class ProcedureInfo { CommonDescriptor.getInstance().getConfig().getProcedureWalFolder(); private final ConcurrentHashMap procWALMap = new ConcurrentHashMap<>(); - public static ProcedureInfo getInstance() { - return ProcedureInfoHolder.INSTANCE; - } - public void load(List procedureList) { try { Files.list(Paths.get(procedureWalDir)) .filter( - path -> - path.getFileName() - .toString() - .endsWith(ProcedureNodeConstant.PROCEDURE_WAL_SUFFIX)) + path -> path.getFileName().toString().endsWith(ProcedureStore.PROCEDURE_WAL_SUFFIX)) .sorted( (p1, p2) -> Long.compareUnsigned( @@ -82,7 +75,7 @@ public void load(List procedureList) { public TSStatus updateProcedure(UpdateProcedureReq updateProcedureReq) { Procedure procedure = updateProcedureReq.getProcedure(); long procId = procedure.getProcId(); - Path path = Paths.get(procedureWalDir, procId + ProcedureNodeConstant.PROCEDURE_WAL_SUFFIX); + Path path = Paths.get(procedureWalDir, procId + ProcedureStore.PROCEDURE_WAL_SUFFIX); ProcedureWAL procedureWAL = procWALMap.computeIfAbsent(procId, id -> new ProcedureWAL(path, procedureFactory)); try { @@ -103,13 +96,4 @@ public TSStatus deleteProcedure(DeleteProcedureReq deleteProcedureReq) { procWALMap.remove(procId); return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); } - - private static class ProcedureInfoHolder { - - private static final ProcedureInfo INSTANCE = new ProcedureInfo(); - - private ProcedureInfoHolder() { - // Empty constructor - } - } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java new file mode 100644 index 000000000000..ea3a17420d86 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/UDFInfo.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.persistence; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; +import org.apache.iotdb.commons.udf.service.UDFClassLoader; +import org.apache.iotdb.commons.udf.service.UDFExecutableManager; +import org.apache.iotdb.commons.udf.service.UDFExecutableResource; +import org.apache.iotdb.commons.udf.service.UDFRegistrationService; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.consensus.request.write.CreateFunctionReq; +import org.apache.iotdb.confignode.consensus.request.write.DropFunctionReq; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +public class UDFInfo implements SnapshotProcessor { + + private static final Logger LOGGER = LoggerFactory.getLogger(UDFInfo.class); + + private static final ConfigNodeConfig CONFIG_NODE_CONF = + ConfigNodeDescriptor.getInstance().getConf(); + + private final UDFExecutableManager udfExecutableManager; + private final UDFRegistrationService udfRegistrationService; + + public UDFInfo() { + udfExecutableManager = + UDFExecutableManager.setupAndGetInstance( + CONFIG_NODE_CONF.getTemporaryLibDir(), CONFIG_NODE_CONF.getUdfLibDir()); + udfRegistrationService = + UDFRegistrationService.setupAndGetInstance(CONFIG_NODE_CONF.getSystemUdfDir()); + } + + public synchronized void validateBeforeRegistration( + String functionName, String className, List uris) throws Exception { + udfRegistrationService.validate(functionName, className); + + if (uris.isEmpty()) { + fetchExecutablesAndCheckInstantiation(className); + } else { + fetchExecutablesAndCheckInstantiation(className, uris); + } + } + + private void fetchExecutablesAndCheckInstantiation(String className) throws Exception { + try (UDFClassLoader temporaryUdfClassLoader = + new UDFClassLoader(CONFIG_NODE_CONF.getUdfLibDir())) { + Class.forName(className, true, temporaryUdfClassLoader) + .getDeclaredConstructor() + .newInstance(); + } + } + + private void fetchExecutablesAndCheckInstantiation(String className, List uris) + throws Exception { + final UDFExecutableResource resource = udfExecutableManager.request(uris); + try (UDFClassLoader temporaryUdfClassLoader = new UDFClassLoader(resource.getResourceDir())) { + Class.forName(className, true, temporaryUdfClassLoader) + .getDeclaredConstructor() + .newInstance(); + } finally { + udfExecutableManager.removeFromTemporaryLibRoot(resource); + } + } + + public synchronized TSStatus createFunction(CreateFunctionReq req) { + final String functionName = req.getFunctionName(); + final String className = req.getClassName(); + final List uris = req.getUris(); + + try { + udfRegistrationService.register(functionName, className, uris, udfExecutableManager, true); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } catch (Exception e) { + final String errorMessage = + String.format( + "[ConfigNode] Failed to register UDF %s(class name: %s, uris: %s), because of exception: %s", + functionName, className, uris, e); + LOGGER.warn(errorMessage, e); + return new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(errorMessage); + } + } + + public synchronized TSStatus dropFunction(DropFunctionReq req) { + try { + udfRegistrationService.deregister(req.getFunctionName()); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } catch (Exception e) { + final String errorMessage = + String.format( + "[ConfigNode] Failed to deregister UDF %s, because of exception: %s", + req.getFunctionName(), e); + LOGGER.warn(errorMessage, e); + return new TSStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()) + .setMessage(errorMessage); + } + } + + @Override + public synchronized boolean processTakeSnapshot(File snapshotDir) throws IOException { + return udfExecutableManager.processTakeSnapshot(snapshotDir) + && udfRegistrationService.processTakeSnapshot(snapshotDir); + } + + @Override + public synchronized void processLoadSnapshot(File snapshotDir) throws IOException { + udfExecutableManager.processLoadSnapshot(snapshotDir); + udfRegistrationService.processLoadSnapshot(snapshotDir); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigRequestExecutor.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigRequestExecutor.java index f69efec57096..68129c151dd3 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigRequestExecutor.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigRequestExecutor.java @@ -20,36 +20,46 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.auth.AuthException; +import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; import org.apache.iotdb.confignode.consensus.request.ConfigRequest; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetNodePathsPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.AdjustMaxRegionGroupCountReq; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.CreateFunctionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteProcedureReq; -import org.apache.iotdb.confignode.consensus.request.write.DeleteRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.DropFunctionReq; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.SetTTLReq; import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.request.write.UpdateProcedureReq; +import org.apache.iotdb.confignode.consensus.response.SchemaNodeManagementResp; import org.apache.iotdb.confignode.exception.physical.UnknownPhysicalPlanTypeException; import org.apache.iotdb.confignode.persistence.AuthorInfo; import org.apache.iotdb.confignode.persistence.ClusterSchemaInfo; import org.apache.iotdb.confignode.persistence.NodeInfo; -import org.apache.iotdb.confignode.persistence.PartitionInfo; import org.apache.iotdb.confignode.persistence.ProcedureInfo; -import org.apache.iotdb.confignode.persistence.SnapshotProcessor; +import org.apache.iotdb.confignode.persistence.UDFInfo; +import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; import org.apache.iotdb.consensus.common.DataSet; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.Pair; import org.apache.thrift.TException; import org.slf4j.Logger; @@ -59,7 +69,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; public class ConfigRequestExecutor { @@ -75,15 +87,24 @@ public class ConfigRequestExecutor { private final ProcedureInfo procedureInfo; - public ConfigRequestExecutor() { - this.nodeInfo = NodeInfo.getInstance(); - this.clusterSchemaInfo = ClusterSchemaInfo.getInstance(); - this.partitionInfo = PartitionInfo.getInstance(); - this.authorInfo = AuthorInfo.getInstance(); - this.procedureInfo = ProcedureInfo.getInstance(); + private final UDFInfo udfInfo; + + public ConfigRequestExecutor( + NodeInfo nodeInfo, + ClusterSchemaInfo clusterSchemaInfo, + PartitionInfo partitionInfo, + AuthorInfo authorInfo, + ProcedureInfo procedureInfo, + UDFInfo udfInfo) { + this.nodeInfo = nodeInfo; + this.clusterSchemaInfo = clusterSchemaInfo; + this.partitionInfo = partitionInfo; + this.authorInfo = authorInfo; + this.procedureInfo = procedureInfo; + this.udfInfo = udfInfo; } - public DataSet executorQueryPlan(ConfigRequest req) + public DataSet executeQueryPlan(ConfigRequest req) throws UnknownPhysicalPlanTypeException, AuthException { switch (req.getType()) { case GetDataNodeInfo: @@ -110,20 +131,33 @@ public DataSet executorQueryPlan(ConfigRequest req) return authorInfo.executeListUserRoles((AuthorReq) req); case ListRoleUsers: return authorInfo.executeListRoleUsers((AuthorReq) req); + case GetNodePathsPartition: + return getSchemaNodeManagementPartition(req); + case GetRegionInfoList: + return partitionInfo.getRegionInfoList((GetRegionInfoListReq) req); default: throw new UnknownPhysicalPlanTypeException(req.getType()); } } - public TSStatus executorNonQueryPlan(ConfigRequest req) + public TSStatus executeNonQueryPlan(ConfigRequest req) throws UnknownPhysicalPlanTypeException, AuthException { switch (req.getType()) { case RegisterDataNode: return nodeInfo.registerDataNode((RegisterDataNodeReq) req); case SetStorageGroup: - return clusterSchemaInfo.setStorageGroup((SetStorageGroupReq) req); + TSStatus status = clusterSchemaInfo.setStorageGroup((SetStorageGroupReq) req); + if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return status; + } + return partitionInfo.setStorageGroup((SetStorageGroupReq) req); + case AdjustMaxRegionGroupCount: + return clusterSchemaInfo.adjustMaxRegionGroupCount((AdjustMaxRegionGroupCountReq) req); case DeleteStorageGroup: + partitionInfo.deleteStorageGroup((DeleteStorageGroupReq) req); return clusterSchemaInfo.deleteStorageGroup((DeleteStorageGroupReq) req); + case PreDeleteStorageGroup: + return partitionInfo.preDeleteStorageGroup((PreDeleteStorageGroupReq) req); case SetTTL: return clusterSchemaInfo.setTTL((SetTTLReq) req); case SetSchemaReplicationFactor: @@ -132,14 +166,8 @@ public TSStatus executorNonQueryPlan(ConfigRequest req) return clusterSchemaInfo.setDataReplicationFactor((SetDataReplicationFactorReq) req); case SetTimePartitionInterval: return clusterSchemaInfo.setTimePartitionInterval((SetTimePartitionIntervalReq) req); - case CreateRegions: - TSStatus status = clusterSchemaInfo.createRegions((CreateRegionsReq) req); - if (status.getCode() != TSStatusCode.SUCCESS_STATUS.getStatusCode()) { - return status; - } - return partitionInfo.createRegions((CreateRegionsReq) req); - case DeleteRegions: - return partitionInfo.deleteRegions((DeleteRegionsReq) req); + case CreateRegionGroups: + return partitionInfo.createRegionGroups((CreateRegionsReq) req); case CreateSchemaPartition: return partitionInfo.createSchemaPartition((CreateSchemaPartitionReq) req); case CreateDataPartition: @@ -162,6 +190,12 @@ public TSStatus executorNonQueryPlan(ConfigRequest req) return authorInfo.authorNonQuery((AuthorReq) req); case ApplyConfigNode: return nodeInfo.updateConfigNodeList((ApplyConfigNodeReq) req); + case RemoveConfigNode: + return nodeInfo.removeConfigNodeList((RemoveConfigNodeReq) req); + case CreateFunction: + return udfInfo.createFunction((CreateFunctionReq) req); + case DropFunction: + return udfInfo.dropFunction((DropFunctionReq) req); default: throw new UnknownPhysicalPlanTypeException(req.getType()); } @@ -169,11 +203,21 @@ public TSStatus executorNonQueryPlan(ConfigRequest req) public boolean takeSnapshot(File snapshotDir) { - if (!snapshotDir.exists() && !snapshotDir.mkdirs()) { - LOGGER.error("snapshot directory [{}] can not be created.", snapshotDir.getAbsolutePath()); - return false; + // consensus layer needs to ensure that the directory exists. + // if it does not exist, print a log to warn there may have a problem. + if (!snapshotDir.exists()) { + LOGGER.warn( + "snapshot directory [{}] is not exist,start to create it.", + snapshotDir.getAbsolutePath()); + // try to create a directory to enable snapshot operation + if (!snapshotDir.mkdirs()) { + LOGGER.error("snapshot directory [{}] can not be created.", snapshotDir.getAbsolutePath()); + return false; + } } + // If the directory is not empty, we should not continue the snapshot operation, + // which may result in incorrect results. File[] fileList = snapshotDir.listFiles(); if (fileList != null && fileList.length > 0) { LOGGER.error("snapshot directory [{}] is not empty.", snapshotDir.getAbsolutePath()); @@ -203,7 +247,6 @@ public boolean takeSnapshot(File snapshotDir) { } public void loadSnapshot(File latestSnapshotRootDir) { - if (!latestSnapshotRootDir.exists()) { LOGGER.error( "snapshot directory [{}] is not exist, can not load snapshot with this directory.", @@ -223,10 +266,50 @@ public void loadSnapshot(File latestSnapshotRootDir) { }); } + private DataSet getSchemaNodeManagementPartition(ConfigRequest req) { + int level; + PartialPath partialPath; + Set alreadyMatchedNode; + Set needMatchedNode; + List matchedStorageGroups = new ArrayList<>(); + + GetNodePathsPartitionReq getNodePathsPartitionReq = (GetNodePathsPartitionReq) req; + partialPath = getNodePathsPartitionReq.getPartialPath(); + level = getNodePathsPartitionReq.getLevel(); + if (-1 == level) { + // get child paths + Pair, Set> matchedChildInNextLevel = + clusterSchemaInfo.getChildNodePathInNextLevel(partialPath); + alreadyMatchedNode = matchedChildInNextLevel.left; + needMatchedNode = matchedChildInNextLevel.right; + } else { + // count nodes + Pair, Set> matchedChildInNextLevel = + clusterSchemaInfo.getNodesListInGivenLevel(partialPath, level); + alreadyMatchedNode = + matchedChildInNextLevel.left.stream() + .map(PartialPath::getFullPath) + .collect(Collectors.toSet()); + needMatchedNode = matchedChildInNextLevel.right; + } + + needMatchedNode.forEach(nodePath -> matchedStorageGroups.add(nodePath.getFullPath())); + SchemaNodeManagementResp schemaNodeManagementResp = + (SchemaNodeManagementResp) + partitionInfo.getSchemaNodeManagementPartition(matchedStorageGroups); + if (schemaNodeManagementResp.getStatus().getCode() + == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + schemaNodeManagementResp.setMatchedNode(alreadyMatchedNode); + } + return schemaNodeManagementResp; + } + private List getAllAttributes() { List allAttributes = new ArrayList<>(); allAttributes.add(clusterSchemaInfo); allAttributes.add(partitionInfo); + allAttributes.add(nodeInfo); + allAttributes.add(udfInfo); return allAttributes; } } diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java new file mode 100644 index 000000000000..594778a53432 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java @@ -0,0 +1,750 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.persistence.partition; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TRegionInfo; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.commons.snapshot.SnapshotProcessor; +import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; +import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; +import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.response.DataPartitionResp; +import org.apache.iotdb.confignode.consensus.response.RegionInfoListResp; +import org.apache.iotdb.confignode.consensus.response.SchemaNodeManagementResp; +import org.apache.iotdb.confignode.consensus.response.SchemaPartitionResp; +import org.apache.iotdb.confignode.exception.StorageGroupNotExistsException; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.db.service.metrics.MetricsService; +import org.apache.iotdb.db.service.metrics.enums.Metric; +import org.apache.iotdb.db.service.metrics.enums.Tag; +import org.apache.iotdb.metrics.config.MetricConfigDescriptor; +import org.apache.iotdb.metrics.utils.MetricLevel; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.tsfile.utils.Pair; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TIOStreamTransport; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * The PartitionInfo stores cluster PartitionTable. The PartitionTable including: 1. regionMap: + * location of Region member 2. schemaPartition: location of schema 3. dataPartition: location of + * data + */ +public class PartitionInfo implements SnapshotProcessor { + + private static final Logger LOGGER = LoggerFactory.getLogger(PartitionInfo.class); + + // For allocating Regions + private final AtomicInteger nextRegionGroupId; + // Map + private final ConcurrentHashMap storageGroupPartitionTables; + + private final Set deletedRegionSet; + + private final String snapshotFileName = "partition_info.bin"; + + public PartitionInfo() { + this.storageGroupPartitionTables = new ConcurrentHashMap<>(); + this.nextRegionGroupId = new AtomicInteger(0); + + // Ensure that the PartitionTables of the StorageGroups who've been logically deleted + // are unreadable and un-writable + // For RegionCleaner + this.deletedRegionSet = Collections.synchronizedSet(new HashSet<>()); + } + + public void addMetrics() { + if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.STORAGE_GROUP.toString(), + MetricLevel.CORE, + storageGroupPartitionTables, + o -> o.size() / 2, + Tag.NAME.toString(), + "number"); + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.REGION.toString(), + MetricLevel.IMPORTANT, + this, + o -> o.updateRegionMetric(TConsensusGroupType.SchemaRegion), + Tag.NAME.toString(), + "total", + Tag.TYPE.toString(), + TConsensusGroupType.SchemaRegion.toString()); + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.REGION.toString(), + MetricLevel.IMPORTANT, + this, + o -> o.updateRegionMetric(TConsensusGroupType.DataRegion), + Tag.NAME.toString(), + "total", + Tag.TYPE.toString(), + TConsensusGroupType.DataRegion.toString()); + } + } + + public int generateNextRegionGroupId() { + return nextRegionGroupId.getAndIncrement(); + } + + // ====================================================== + // Consensus read/write interfaces + // ====================================================== + + /** + * Thread-safely create new StorageGroupPartitionInfo + * + * @param req SetStorageGroupReq + * @return SUCCESS_STATUS if the new StorageGroupPartitionInfo is created successfully. + */ + public TSStatus setStorageGroup(SetStorageGroupReq req) { + String storageGroupName = req.getSchema().getName(); + storageGroupPartitionTables.put( + storageGroupName, new StorageGroupPartitionTable(storageGroupName)); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** + * Thread-safely cache allocation result of new RegionGroups + * + * @param req CreateRegionGroupsReq + * @return SUCCESS_STATUS + */ + public TSStatus createRegionGroups(CreateRegionsReq req) { + TSStatus result; + AtomicInteger maxRegionId = new AtomicInteger(Integer.MIN_VALUE); + + req.getRegionGroupMap() + .forEach( + (storageGroup, regionReplicaSets) -> { + storageGroupPartitionTables.get(storageGroup).createRegionGroups(regionReplicaSets); + regionReplicaSets.forEach( + regionReplicaSet -> + maxRegionId.set( + Math.max(maxRegionId.get(), regionReplicaSet.getRegionId().getId()))); + }); + + // To ensure that the nextRegionGroupId is updated correctly when + // the ConfigNode-followers concurrently processes CreateRegionsReq, + // we need to add a synchronization lock here + synchronized (nextRegionGroupId) { + if (nextRegionGroupId.get() < maxRegionId.get()) { + nextRegionGroupId.set(maxRegionId.get()); + } + } + + result = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + return result; + } + + /** + * Thread-safely pre-delete the specific StorageGroup + * + * @param preDeleteStorageGroupReq PreDeleteStorageGroupReq + * @return SUCCESS_STATUS + */ + public TSStatus preDeleteStorageGroup(PreDeleteStorageGroupReq preDeleteStorageGroupReq) { + final PreDeleteStorageGroupReq.PreDeleteType preDeleteType = + preDeleteStorageGroupReq.getPreDeleteType(); + final String storageGroup = preDeleteStorageGroupReq.getStorageGroup(); + StorageGroupPartitionTable storageGroupPartitionTable = + storageGroupPartitionTables.get(storageGroup); + if (storageGroupPartitionTable == null) { + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + switch (preDeleteType) { + case EXECUTE: + storageGroupPartitionTable.setPredeleted(true); + break; + case ROLLBACK: + storageGroupPartitionTable.setPredeleted(false); + break; + } + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** + * Thread-safely delete StorageGroup + * + * @param req DeleteRegionsReq + */ + public void deleteStorageGroup(DeleteStorageGroupReq req) { + StorageGroupPartitionTable storageGroupPartitionTable = + storageGroupPartitionTables.get(req.getName()); + if (storageGroupPartitionTable == null) { + return; + } + // Cache RegionReplicaSets + synchronized (deletedRegionSet) { + storageGroupPartitionTable = storageGroupPartitionTables.get(req.getName()); + if (storageGroupPartitionTable == null) { + return; + } + deletedRegionSet.addAll(storageGroupPartitionTable.getAllReplicaSets()); + // Clean the cache + storageGroupPartitionTables.remove(req.getName()); + } + } + + /** @return The Regions that should be deleted among the DataNodes */ + public Set getDeletedRegionSet() { + synchronized (deletedRegionSet) { + return deletedRegionSet; + } + } + + /** + * Thread-safely get SchemaPartition + * + * @param req SchemaPartitionPlan with partitionSlotsMap + * @return SchemaPartitionDataSet that contains only existing SchemaPartition + */ + public DataSet getSchemaPartition(GetSchemaPartitionReq req) { + AtomicBoolean isAllPartitionsExist = new AtomicBoolean(true); + // TODO: Replace this map whit new SchemaPartition + Map schemaPartition = new ConcurrentHashMap<>(); + + if (req.getPartitionSlotsMap().size() == 0) { + // Return all SchemaPartitions when the queried PartitionSlots are empty + storageGroupPartitionTables.forEach( + (storageGroup, storageGroupPartitionTable) -> { + if (!storageGroupPartitionTable.isPredeleted()) { + schemaPartition.put(storageGroup, new SchemaPartitionTable()); + + storageGroupPartitionTable.getSchemaPartition( + new ArrayList<>(), schemaPartition.get(storageGroup)); + + if (schemaPartition.get(storageGroup).getSchemaPartitionMap().isEmpty()) { + // Remove empty Map + schemaPartition.remove(storageGroup); + } + } + }); + } else { + // Return the SchemaPartition for each StorageGroup + req.getPartitionSlotsMap() + .forEach( + (storageGroup, partitionSlots) -> { + if (isStorageGroupExisted(storageGroup)) { + schemaPartition.put(storageGroup, new SchemaPartitionTable()); + + if (!storageGroupPartitionTables + .get(storageGroup) + .getSchemaPartition(partitionSlots, schemaPartition.get(storageGroup))) { + isAllPartitionsExist.set(false); + } + + if (schemaPartition.get(storageGroup).getSchemaPartitionMap().isEmpty()) { + // Remove empty Map + schemaPartition.remove(storageGroup); + } + } + }); + } + + return new SchemaPartitionResp( + new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()), + isAllPartitionsExist.get(), + schemaPartition); + } + + /** + * Thread-safely get DataPartition + * + * @param req DataPartitionPlan with partitionSlotsMap + * @return DataPartitionDataSet that contains only existing DataPartition + */ + public DataSet getDataPartition(GetDataPartitionReq req) { + AtomicBoolean isAllPartitionsExist = new AtomicBoolean(true); + // TODO: Replace this map whit new DataPartition + Map dataPartition = new ConcurrentHashMap<>(); + + req.getPartitionSlotsMap() + .forEach( + (storageGroup, partitionSlots) -> { + if (isStorageGroupExisted(storageGroup)) { + dataPartition.put(storageGroup, new DataPartitionTable()); + + if (!storageGroupPartitionTables + .get(storageGroup) + .getDataPartition(partitionSlots, dataPartition.get(storageGroup))) { + isAllPartitionsExist.set(false); + } + + if (dataPartition.get(storageGroup).getDataPartitionMap().isEmpty()) { + // Remove empty Map + dataPartition.remove(storageGroup); + } + } + }); + + return new DataPartitionResp( + new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()), + isAllPartitionsExist.get(), + dataPartition); + } + + private boolean isStorageGroupExisted(String storageGroup) { + final StorageGroupPartitionTable storageGroupPartitionTable = + storageGroupPartitionTables.get(storageGroup); + return storageGroupPartitionTable != null && !storageGroupPartitionTable.isPredeleted(); + } + + /** + * Create SchemaPartition + * + * @param req CreateSchemaPartitionPlan with SchemaPartition assigned result + * @return TSStatusCode.SUCCESS_STATUS + */ + public TSStatus createSchemaPartition(CreateSchemaPartitionReq req) { + req.getAssignedSchemaPartition() + .forEach( + (storageGroup, schemaPartitionTable) -> { + if (isStorageGroupExisted(storageGroup)) { + storageGroupPartitionTables + .get(storageGroup) + .createSchemaPartition(schemaPartitionTable); + } + }); + + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** + * Create DataPartition + * + * @param req CreateDataPartitionPlan with DataPartition assigned result + * @return TSStatusCode.SUCCESS_STATUS + */ + public TSStatus createDataPartition(CreateDataPartitionReq req) { + req.getAssignedDataPartition() + .forEach( + (storageGroup, dataPartitionTable) -> { + if (isStorageGroupExisted(storageGroup)) { + storageGroupPartitionTables + .get(storageGroup) + .createDataPartition(dataPartitionTable); + } + }); + + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** Get SchemaNodeManagementPartition through matched storageGroup */ + public DataSet getSchemaNodeManagementPartition(List matchedStorageGroups) { + SchemaNodeManagementResp schemaNodeManagementResp = new SchemaNodeManagementResp(); + Map schemaPartitionMap = new ConcurrentHashMap<>(); + + matchedStorageGroups.stream() + .filter(this::isStorageGroupExisted) + .forEach( + storageGroup -> { + schemaPartitionMap.put(storageGroup, new SchemaPartitionTable()); + + storageGroupPartitionTables + .get(storageGroup) + .getSchemaPartition(new ArrayList<>(), schemaPartitionMap.get(storageGroup)); + + if (schemaPartitionMap.get(storageGroup).getSchemaPartitionMap().isEmpty()) { + // Remove empty Map + schemaPartitionMap.remove(storageGroup); + } + }); + + schemaNodeManagementResp.setSchemaPartition(schemaPartitionMap); + schemaNodeManagementResp.setStatus(new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode())); + return schemaNodeManagementResp; + } + + /** Get region information */ + public DataSet getRegionInfoList(GetRegionInfoListReq regionsInfoReq) { + RegionInfoListResp regionResp = new RegionInfoListResp(); + List regionInfoList = new ArrayList<>(); + if (storageGroupPartitionTables.isEmpty()) { + regionResp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + return regionResp; + } + storageGroupPartitionTables.forEach( + (storageGroup, storageGroupPartitionTable) -> { + storageGroupPartitionTable.getRegionInfoList(regionsInfoReq, regionInfoList); + }); + regionInfoList.sort( + Comparator.comparingInt(regionId -> regionId.getConsensusGroupId().getId())); + regionResp.setRegionInfoList(regionInfoList); + regionResp.setStatus(RpcUtils.getStatus(TSStatusCode.SUCCESS_STATUS)); + return regionResp; + } + + // ====================================================== + // Leader scheduling interfaces + // ====================================================== + + /** + * Only Leader use this interface. Filter unassigned SchemaPartitionSlots + * + * @param partitionSlotsMap Map> + * @return Map>, SchemaPartitionSlots that is not + * assigned in partitionSlotsMap + */ + public Map> filterUnassignedSchemaPartitionSlots( + Map> partitionSlotsMap) { + Map> result = new ConcurrentHashMap<>(); + + partitionSlotsMap.forEach( + (storageGroup, partitionSlots) -> { + if (isStorageGroupExisted(storageGroup)) { + result.put( + storageGroup, + storageGroupPartitionTables + .get(storageGroup) + .filterUnassignedSchemaPartitionSlots(partitionSlots)); + } + }); + + return result; + } + + /** + * Only Leader use this interface. Filter unassigned SchemaPartitionSlots + * + * @param partitionSlotsMap Map>> + * @return Map>>, + * DataPartitionSlots that is not assigned in partitionSlotsMap + */ + public Map>> + filterUnassignedDataPartitionSlots( + Map>> partitionSlotsMap) { + Map>> result = + new ConcurrentHashMap<>(); + + partitionSlotsMap.forEach( + (storageGroup, partitionSlots) -> { + if (isStorageGroupExisted(storageGroup)) { + result.put( + storageGroup, + storageGroupPartitionTables + .get(storageGroup) + .filterUnassignedDataPartitionSlots(partitionSlots)); + } + }); + + return result; + } + + /** + * Only leader use this interface. + * + * @return All Regions' RegionReplicaSet + */ + public List getAllReplicaSets() { + List result = new Vector<>(); + storageGroupPartitionTables + .values() + .forEach( + storageGroupPartitionTable -> + result.addAll(storageGroupPartitionTable.getAllReplicaSets())); + return result; + } + + /** + * Only leader use this interface. Get the number of Regions currently owned by the specific + * StorageGroup + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + * @return Number of Regions currently owned by the specific StorageGroup + * @throws StorageGroupNotExistsException When the specific StorageGroup doesn't exist + */ + public int getRegionCount(String storageGroup, TConsensusGroupType type) + throws StorageGroupNotExistsException { + if (!isStorageGroupExisted(storageGroup)) { + throw new StorageGroupNotExistsException(storageGroup); + } + + return storageGroupPartitionTables.get(storageGroup).getRegionCount(type); + } + + public int getSlotCount(String storageGroup) { + return storageGroupPartitionTables.get(storageGroup).getSlotsCount(); + } + + /** + * Only leader use this interface. Contending the Region allocation particle. + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + * @return True when successfully get the allocation particle, false otherwise + */ + public boolean contendRegionAllocationParticle(String storageGroup, TConsensusGroupType type) { + return storageGroupPartitionTables.get(storageGroup).contendRegionAllocationParticle(type); + } + + /** + * Only leader use this interface. Put back the Region allocation particle. + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + */ + public void putBackRegionAllocationParticle(String storageGroup, TConsensusGroupType type) { + storageGroupPartitionTables.get(storageGroup).putBackRegionAllocationParticle(type); + } + + /** + * Only leader use this interface. Get the Region allocation particle. + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + */ + public boolean getRegionAllocationParticle(String storageGroup, TConsensusGroupType type) { + return storageGroupPartitionTables.get(storageGroup).getRegionAllocationParticle(type); + } + + /** + * Only leader use this interface. + * + * @param storageGroup StorageGroupName + * @param type SchemaRegion or DataRegion + * @return The specific StorageGroup's Regions that sorted by the number of allocated slots + */ + public List> getSortedRegionSlotsCounter( + String storageGroup, TConsensusGroupType type) { + return storageGroupPartitionTables.get(storageGroup).getSortedRegionSlotsCounter(type); + } + + /** + * Get total region number + * + * @param type SchemaRegion or DataRegion + * @return the number of SchemaRegion or DataRegion + */ + public int getTotalRegionCount(TConsensusGroupType type) { + Set regionGroups = new HashSet<>(); + for (Map.Entry entry : + storageGroupPartitionTables.entrySet()) { + regionGroups.addAll(entry.getValue().getRegion(type)); + } + return regionGroups.size(); + } + + /** + * update region-related metric + * + * @param type SchemaRegion or DataRegion + * @return the number of SchemaRegion or DataRegion + */ + private int updateRegionMetric(TConsensusGroupType type) { + Set regionGroups = new HashSet<>(); + for (Map.Entry entry : + storageGroupPartitionTables.entrySet()) { + regionGroups.addAll(entry.getValue().getRegion(type)); + } + int result = regionGroups.size(); + // datanode location -> region number + Map dataNodeLocationIntegerMap = new HashMap<>(); + for (RegionGroup regionGroup : regionGroups) { + TRegionReplicaSet regionReplicaSet = regionGroup.getReplicaSet(); + List dataNodeLocations = regionReplicaSet.getDataNodeLocations(); + for (TDataNodeLocation dataNodeLocation : dataNodeLocations) { + if (!dataNodeLocationIntegerMap.containsKey(dataNodeLocation)) { + dataNodeLocationIntegerMap.put(dataNodeLocation, 0); + } + dataNodeLocationIntegerMap.put( + dataNodeLocation, dataNodeLocationIntegerMap.get(dataNodeLocation) + 1); + } + } + for (Map.Entry entry : dataNodeLocationIntegerMap.entrySet()) { + TDataNodeLocation dataNodeLocation = entry.getKey(); + String name = + "EndPoint(" + + dataNodeLocation.getClientRpcEndPoint().ip + + ":" + + dataNodeLocation.getClientRpcEndPoint().port + + ")"; + MetricsService.getInstance() + .getMetricManager() + .getOrCreateGauge( + Metric.REGION.toString(), + MetricLevel.IMPORTANT, + Tag.NAME.toString(), + name, + Tag.TYPE.toString(), + type.toString()) + .set(dataNodeLocationIntegerMap.get(dataNodeLocation)); + } + return result; + } + + @Override + public boolean processTakeSnapshot(File snapshotDir) throws TException, IOException { + + File snapshotFile = new File(snapshotDir, snapshotFileName); + if (snapshotFile.exists() && snapshotFile.isFile()) { + LOGGER.error( + "Failed to take snapshot, because snapshot file [{}] is already exist.", + snapshotFile.getAbsolutePath()); + return false; + } + + // prevents temporary files from being damaged and cannot be deleted, which affects the next + // snapshot operation. + File tmpFile = new File(snapshotFile.getAbsolutePath() + "-" + UUID.randomUUID()); + + // TODO: Lock PartitionInfo + try (FileOutputStream fileOutputStream = new FileOutputStream(tmpFile); + TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileOutputStream)) { + TProtocol protocol = new TBinaryProtocol(tioStreamTransport); + + // serialize nextRegionGroupId + ReadWriteIOUtils.write(nextRegionGroupId.get(), fileOutputStream); + // serialize StorageGroupPartitionTable + ReadWriteIOUtils.write(storageGroupPartitionTables.size(), fileOutputStream); + for (Map.Entry storageGroupPartitionTableEntry : + storageGroupPartitionTables.entrySet()) { + ReadWriteIOUtils.write(storageGroupPartitionTableEntry.getKey(), fileOutputStream); + storageGroupPartitionTableEntry.getValue().serialize(fileOutputStream, protocol); + } + // serialize deletedRegionSet + ReadWriteIOUtils.write(deletedRegionSet.size(), fileOutputStream); + for (TRegionReplicaSet regionReplicaSet : deletedRegionSet) { + regionReplicaSet.write(protocol); + } + + // write to file + fileOutputStream.flush(); + fileOutputStream.close(); + // rename file + return tmpFile.renameTo(snapshotFile); + } finally { + // with or without success, delete temporary files anyway + for (int retry = 0; retry < 5; retry++) { + if (!tmpFile.exists() || tmpFile.delete()) { + break; + } else { + LOGGER.warn( + "Can't delete temporary snapshot file: {}, retrying...", tmpFile.getAbsolutePath()); + } + } + } + } + + public void processLoadSnapshot(File snapshotDir) throws TException, IOException { + + File snapshotFile = new File(snapshotDir, snapshotFileName); + if (!snapshotFile.exists() || !snapshotFile.isFile()) { + LOGGER.error( + "Failed to load snapshot,snapshot file [{}] is not exist.", + snapshotFile.getAbsolutePath()); + return; + } + + // TODO: Lock PartitionInfo + try (FileInputStream fileInputStream = new FileInputStream(snapshotFile); + TIOStreamTransport tioStreamTransport = new TIOStreamTransport(fileInputStream)) { + TProtocol protocol = new TBinaryProtocol(tioStreamTransport); + // before restoring a snapshot, clear all old data + clear(); + // start to restore + nextRegionGroupId.set(ReadWriteIOUtils.readInt(fileInputStream)); + + // restore StorageGroupPartitionTable + int length = ReadWriteIOUtils.readInt(fileInputStream); + for (int i = 0; i < length; i++) { + String storageGroup = ReadWriteIOUtils.readString(fileInputStream); + StorageGroupPartitionTable storageGroupPartitionTable = + new StorageGroupPartitionTable(storageGroup); + storageGroupPartitionTable.deserialize(fileInputStream, protocol); + storageGroupPartitionTables.put(storageGroup, storageGroupPartitionTable); + } + // restore deletedRegionSet + length = ReadWriteIOUtils.readInt(fileInputStream); + for (int i = 0; i < length; i++) { + TRegionReplicaSet regionReplicaSet = new TRegionReplicaSet(); + regionReplicaSet.read(protocol); + deletedRegionSet.add(regionReplicaSet); + } + } + } + + public void clear() { + nextRegionGroupId.set(0); + storageGroupPartitionTables.clear(); + deletedRegionSet.clear(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PartitionInfo that = (PartitionInfo) o; + return storageGroupPartitionTables.equals(that.storageGroupPartitionTables) + && deletedRegionSet.equals(that.deletedRegionSet); + } + + @Override + public int hashCode() { + return Objects.hash(storageGroupPartitionTables, deletedRegionSet); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java new file mode 100644 index 000000000000..555a5195f8a2 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/RegionGroup.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.persistence.partition; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocol; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +public class RegionGroup { + + private final TRegionReplicaSet replicaSet; + + // For DataRegion, each SeriesSlot * TimeSlot form a slot, + // for SchemaRegion, each SeriesSlot is a slot + private final AtomicLong slotCount; + + public RegionGroup() { + this.replicaSet = new TRegionReplicaSet(); + this.slotCount = new AtomicLong(); + } + + public RegionGroup(TRegionReplicaSet replicaSet) { + this.replicaSet = replicaSet; + this.slotCount = new AtomicLong(0); + } + + public TConsensusGroupId getId() { + return replicaSet.getRegionId(); + } + + public TRegionReplicaSet getReplicaSet() { + return replicaSet; + } + + public void addCounter(long delta) { + slotCount.getAndAdd(delta); + } + + public long getCounter() { + return slotCount.get(); + } + + public void serialize(OutputStream outputStream, TProtocol protocol) + throws IOException, TException { + replicaSet.write(protocol); + ReadWriteIOUtils.write(slotCount.get(), outputStream); + } + + public void deserialize(InputStream inputStream, TProtocol protocol) + throws IOException, TException { + replicaSet.read(protocol); + slotCount.set(ReadWriteIOUtils.readLong(inputStream)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RegionGroup that = (RegionGroup) o; + return replicaSet.equals(that.replicaSet); + } + + @Override + public int hashCode() { + return Objects.hash(replicaSet); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/StorageGroupPartitionTable.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/StorageGroupPartitionTable.java new file mode 100644 index 000000000000..f5ef81017b57 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/StorageGroupPartitionTable.java @@ -0,0 +1,450 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.persistence.partition; + +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; +import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TRegionInfo; +import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; +import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; +import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.cluster.RegionStatus; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; +import org.apache.iotdb.db.service.metrics.MetricsService; +import org.apache.iotdb.db.service.metrics.enums.Metric; +import org.apache.iotdb.db.service.metrics.enums.Tag; +import org.apache.iotdb.metrics.config.MetricConfigDescriptor; +import org.apache.iotdb.metrics.utils.MetricLevel; +import org.apache.iotdb.tsfile.utils.Pair; +import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils; + +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocol; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class StorageGroupPartitionTable { + private volatile boolean isPredeleted = false; + // The name of storage group + private String storageGroupName; + + // Total number of SeriesPartitionSlots occupied by schema, + // determines whether a new Region needs to be created + private final AtomicInteger seriesPartitionSlotsCount; + + // Region allocation particle + private final AtomicBoolean schemaRegionParticle; + private final AtomicBoolean dataRegionParticle; + + // Region + private final Map regionInfoMap; + // SchemaPartition + private final SchemaPartitionTable schemaPartitionTable; + // DataPartition + private final DataPartitionTable dataPartitionTable; + + public StorageGroupPartitionTable(String storageGroupName) { + this.storageGroupName = storageGroupName; + this.seriesPartitionSlotsCount = new AtomicInteger(0); + + this.schemaRegionParticle = new AtomicBoolean(true); + this.dataRegionParticle = new AtomicBoolean(true); + this.regionInfoMap = new ConcurrentHashMap<>(); + + this.schemaPartitionTable = new SchemaPartitionTable(); + this.dataPartitionTable = new DataPartitionTable(); + + addMetrics(); + } + + private void addMetrics() { + if (MetricConfigDescriptor.getInstance().getMetricConfig().getEnableMetric()) { + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.REGION.toString(), + MetricLevel.NORMAL, + this, + o -> o.getRegionCount(TConsensusGroupType.SchemaRegion), + Tag.NAME.toString(), + storageGroupName, + Tag.TYPE.toString(), + TConsensusGroupType.SchemaRegion.toString()); + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.REGION.toString(), + MetricLevel.NORMAL, + this, + o -> o.getRegionCount(TConsensusGroupType.DataRegion), + Tag.NAME.toString(), + storageGroupName, + Tag.TYPE.toString(), + TConsensusGroupType.DataRegion.toString()); + // TODO slot will be updated in the future + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.SLOT.toString(), + MetricLevel.NORMAL, + schemaPartitionTable, + o -> o.getSchemaPartitionMap().size(), + Tag.NAME.toString(), + storageGroupName, + Tag.TYPE.toString(), + "schemaSlotNumber"); + MetricsService.getInstance() + .getMetricManager() + .getOrCreateAutoGauge( + Metric.SLOT.toString(), + MetricLevel.NORMAL, + dataPartitionTable, + o -> o.getDataPartitionMap().size(), + Tag.NAME.toString(), + storageGroupName, + Tag.TYPE.toString(), + "dataSlotNumber"); + } + } + + public boolean isPredeleted() { + return isPredeleted; + } + + public void setPredeleted(boolean predeleted) { + isPredeleted = predeleted; + } + + /** + * Cache allocation result of new RegionGroups + * + * @param replicaSets List + */ + public void createRegionGroups(List replicaSets) { + replicaSets.forEach( + replicaSet -> regionInfoMap.put(replicaSet.getRegionId(), new RegionGroup(replicaSet))); + } + + /** @return All Regions' RegionReplicaSet within one StorageGroup */ + public List getAllReplicaSets() { + List result = new ArrayList<>(); + + for (RegionGroup regionGroup : regionInfoMap.values()) { + result.add(regionGroup.getReplicaSet()); + } + + return result; + } + + /** + * Get regions currently owned by this StorageGroup + * + * @param type SchemaRegion or DataRegion + * @return The regions currently owned by this StorageGroup + */ + public Set getRegion(TConsensusGroupType type) { + Set regionGroups = new HashSet<>(); + regionInfoMap + .values() + .forEach( + regionGroup -> { + if (regionGroup.getId().getType().equals(type)) { + regionGroups.add(regionGroup); + } + }); + return regionGroups; + } + /** + * Get the number of Regions currently owned by this StorageGroup + * + * @param type SchemaRegion or DataRegion + * @return The number of Regions currently owned by this StorageGroup + */ + public int getRegionCount(TConsensusGroupType type) { + AtomicInteger result = new AtomicInteger(0); + regionInfoMap + .values() + .forEach( + regionGroup -> { + if (regionGroup.getId().getType().equals(type)) { + result.getAndIncrement(); + } + }); + return result.getAndIncrement(); + } + + /** + * Only leader use this interface. Contending the Region allocation particle. + * + * @param type SchemaRegion or DataRegion + * @return True when successfully get the allocation particle, false otherwise + */ + public boolean contendRegionAllocationParticle(TConsensusGroupType type) { + switch (type) { + case SchemaRegion: + return schemaRegionParticle.getAndSet(false); + case DataRegion: + return dataRegionParticle.getAndSet(false); + default: + return false; + } + } + + /** + * Only leader use this interface. Put back the Region allocation particle. + * + * @param type SchemaRegion or DataRegion + */ + public void putBackRegionAllocationParticle(TConsensusGroupType type) { + switch (type) { + case SchemaRegion: + schemaRegionParticle.set(true); + case DataRegion: + dataRegionParticle.set(true); + } + } + + /** + * Only leader use this interface. Get the Region allocation particle. + * + * @param type SchemaRegion or DataRegion + */ + public boolean getRegionAllocationParticle(TConsensusGroupType type) { + switch (type) { + case SchemaRegion: + return schemaRegionParticle.get(); + case DataRegion: + return dataRegionParticle.get(); + default: + return false; + } + } + + public int getSlotsCount() { + return seriesPartitionSlotsCount.get(); + } + + /** + * Thread-safely get SchemaPartition within the specific StorageGroup + * + * @param partitionSlots SeriesPartitionSlots + * @param schemaPartition Where the results are stored + * @return True if all the SeriesPartitionSlots are matched, false otherwise + */ + public boolean getSchemaPartition( + List partitionSlots, SchemaPartitionTable schemaPartition) { + return schemaPartitionTable.getSchemaPartition(partitionSlots, schemaPartition); + } + + /** + * Thread-safely get DataPartition within the specific StorageGroup + * + * @param partitionSlots SeriesPartitionSlots and TimePartitionSlots + * @param dataPartition Where the results are stored + * @return True if all the PartitionSlots are matched, false otherwise + */ + public boolean getDataPartition( + Map> partitionSlots, + DataPartitionTable dataPartition) { + return dataPartitionTable.getDataPartition(partitionSlots, dataPartition); + } + + /** + * Create SchemaPartition within the specific StorageGroup + * + * @param assignedSchemaPartition Assigned result + */ + public void createSchemaPartition(SchemaPartitionTable assignedSchemaPartition) { + // Cache assigned result + Map deltaMap = + schemaPartitionTable.createSchemaPartition(assignedSchemaPartition); + + // Add counter + AtomicInteger total = new AtomicInteger(0); + deltaMap.forEach( + ((consensusGroupId, delta) -> { + total.getAndAdd(delta.get()); + regionInfoMap.get(consensusGroupId).addCounter(delta.get()); + })); + seriesPartitionSlotsCount.getAndAdd(total.get()); + } + + /** + * Create DataPartition within the specific StorageGroup + * + * @param assignedDataPartition Assigned result + */ + public void createDataPartition(DataPartitionTable assignedDataPartition) { + // Cache assigned result + Map deltaMap = + dataPartitionTable.createDataPartition(assignedDataPartition); + + // Add counter + AtomicInteger total = new AtomicInteger(0); + deltaMap.forEach( + ((consensusGroupId, delta) -> { + total.getAndAdd(delta.get()); + regionInfoMap.get(consensusGroupId).addCounter(delta.get()); + })); + } + + /** + * Only Leader use this interface. Filter unassigned SchemaPartitionSlots within the specific + * StorageGroup + * + * @param partitionSlots List + * @return Unassigned PartitionSlots + */ + public List filterUnassignedSchemaPartitionSlots( + List partitionSlots) { + return schemaPartitionTable.filterUnassignedSchemaPartitionSlots(partitionSlots); + } + + /** + * Only Leader use this interface. Filter unassigned DataPartitionSlots within the specific + * StorageGroup + * + * @param partitionSlots List + * @return Unassigned PartitionSlots + */ + public Map> filterUnassignedDataPartitionSlots( + Map> partitionSlots) { + return dataPartitionTable.filterUnassignedDataPartitionSlots(partitionSlots); + } + + /** + * Only leader use this interface. + * + * @param type SchemaRegion or DataRegion + * @return Regions that sorted by the number of allocated slots + */ + public List> getSortedRegionSlotsCounter(TConsensusGroupType type) { + List> result = new Vector<>(); + + regionInfoMap.forEach( + (consensusGroupId, regionGroup) -> { + if (consensusGroupId.getType().equals(type)) { + result.add(new Pair<>(regionGroup.getCounter(), consensusGroupId)); + } + }); + + result.sort(Comparator.comparingLong(Pair::getLeft)); + return result; + } + + public void getRegionInfoList( + GetRegionInfoListReq regionsInfoReq, List regionInfoList) { + regionInfoMap.forEach( + (consensusGroupId, regionGroup) -> { + TRegionReplicaSet replicaSet = regionGroup.getReplicaSet(); + if (regionsInfoReq.getRegionType() == null) { + buildTRegionsInfo(regionInfoList, replicaSet, regionGroup); + } else if (regionsInfoReq.getRegionType().ordinal() + == replicaSet.getRegionId().getType().ordinal()) { + buildTRegionsInfo(regionInfoList, replicaSet, regionGroup); + } + }); + } + + private void buildTRegionsInfo( + List regionInfoList, TRegionReplicaSet replicaSet, RegionGroup regionGroup) { + replicaSet + .getDataNodeLocations() + .forEach( + (dataNodeLocation) -> { + TRegionInfo tRegionInfoList = new TRegionInfo(); + tRegionInfoList.setConsensusGroupId(replicaSet.getRegionId()); + tRegionInfoList.setStorageGroup(storageGroupName); + long slots = regionGroup.getCounter(); + tRegionInfoList.setSlots((int) slots); + tRegionInfoList.setDataNodeId(dataNodeLocation.getDataNodeId()); + tRegionInfoList.setClientRpcIp(dataNodeLocation.getClientRpcEndPoint().getIp()); + tRegionInfoList.setClientRpcPort(dataNodeLocation.getClientRpcEndPoint().getPort()); + // TODO: Wait for data migration. And then add the state + tRegionInfoList.setStatus(RegionStatus.Up.getStatus()); + regionInfoList.add(tRegionInfoList); + }); + } + + public void serialize(OutputStream outputStream, TProtocol protocol) + throws IOException, TException { + ReadWriteIOUtils.write(isPredeleted, outputStream); + ReadWriteIOUtils.write(storageGroupName, outputStream); + ReadWriteIOUtils.write(seriesPartitionSlotsCount.get(), outputStream); + + ReadWriteIOUtils.write(regionInfoMap.size(), outputStream); + for (Map.Entry regionInfoEntry : regionInfoMap.entrySet()) { + regionInfoEntry.getKey().write(protocol); + regionInfoEntry.getValue().serialize(outputStream, protocol); + } + + schemaPartitionTable.serialize(outputStream, protocol); + dataPartitionTable.serialize(outputStream, protocol); + } + + public void deserialize(InputStream inputStream, TProtocol protocol) + throws IOException, TException { + isPredeleted = ReadWriteIOUtils.readBool(inputStream); + storageGroupName = ReadWriteIOUtils.readString(inputStream); + seriesPartitionSlotsCount.set(ReadWriteIOUtils.readInt(inputStream)); + + int length = ReadWriteIOUtils.readInt(inputStream); + for (int i = 0; i < length; i++) { + TConsensusGroupId consensusGroupId = new TConsensusGroupId(); + consensusGroupId.read(protocol); + RegionGroup regionGroup = new RegionGroup(); + regionGroup.deserialize(inputStream, protocol); + regionInfoMap.put(consensusGroupId, regionGroup); + } + + schemaPartitionTable.deserialize(inputStream, protocol); + dataPartitionTable.deserialize(inputStream, protocol); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + StorageGroupPartitionTable that = (StorageGroupPartitionTable) o; + return isPredeleted == that.isPredeleted + && regionInfoMap.equals(that.regionInfoMap) + && schemaPartitionTable.equals(that.schemaPartitionTable) + && dataPartitionTable.equals(that.dataPartitionTable); + } + + @Override + public int hashCode() { + return Objects.hash(isPredeleted, regionInfoMap, schemaPartitionTable, dataPartitionTable); + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureContainer.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureContainer.java new file mode 100644 index 000000000000..ba6712a983d3 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureContainer.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure; + +public class CompletedProcedureContainer { + private final Procedure procedure; + + public CompletedProcedureContainer(Procedure procedure) { + this.procedure = procedure; + } + + public Procedure getProcedure() { + return procedure; + } + + public boolean isExpired(long now, long evictTtl) { + return (now - procedure.getLastUpdate()) >= evictTtl; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java new file mode 100644 index 000000000000..46d1b47dbc01 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/CompletedProcedureRecycler.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure; + +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** Internal cleaner that removes the completed procedure results after a TTL. */ +public class CompletedProcedureRecycler extends InternalProcedure { + private static final Logger LOG = LoggerFactory.getLogger(CompletedProcedureRecycler.class); + private static final int DEFAULT_BATCH_SIZE = 32; + private long evictTTL; + private final Map> completed; + private final IProcedureStore store; + + public CompletedProcedureRecycler( + IProcedureStore store, + Map> completedMap, + long cleanTimeInterval, + long evictTTL) { + super(TimeUnit.SECONDS.toMillis(cleanTimeInterval)); + this.completed = completedMap; + this.store = store; + this.evictTTL = evictTTL; + } + + @Override + protected void periodicExecute(final Env env) { + if (completed.isEmpty()) { + if (LOG.isTraceEnabled()) { + LOG.trace("No completed procedures to cleanup."); + } + return; + } + + final long[] batchIds = new long[DEFAULT_BATCH_SIZE]; + int batchCount = 0; + + final long now = System.currentTimeMillis(); + final Iterator>> it = + completed.entrySet().iterator(); + while (it.hasNext() && store.isRunning()) { + final Map.Entry> entry = it.next(); + final CompletedProcedureContainer retainer = entry.getValue(); + final Procedure proc = retainer.getProcedure(); + if (retainer.isExpired(now, evictTTL)) { + // Failed procedures aren't persisted in WAL. + batchIds[batchCount++] = entry.getKey(); + if (batchCount == batchIds.length) { + store.delete(batchIds, 0, batchCount); + batchCount = 0; + } + it.remove(); + LOG.trace("Evict completed {}", proc); + } + } + if (batchCount > 0) { + store.delete(batchIds, 0, batchCount); + } + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupProcedure.java deleted file mode 100644 index 97923f493c08..000000000000 --- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupProcedure.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iotdb.confignode.procedure; - -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.common.rpc.thrift.TSStatus; -import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; -import org.apache.iotdb.commons.utils.StatusUtils; -import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; -import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; -import org.apache.iotdb.confignode.persistence.PartitionInfo; -import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; -import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.mpp.rpc.thrift.InternalService; -import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.StateMachineProcedure; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; - -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -public class DeleteStorageGroupProcedure - extends StateMachineProcedure { - private static final Logger LOG = LoggerFactory.getLogger(Procedure.class); - private static final int retryThreshold = 5; - - private static boolean byPassForTest = false; - - @TestOnly - public static void setByPassForTest(boolean byPass) { - byPassForTest = byPass; - } - - private TStorageGroupSchema deleteSgSchema; - - public DeleteStorageGroupProcedure() { - super(); - } - - public DeleteStorageGroupProcedure(TStorageGroupSchema deleteSgSchema) { - super(); - this.deleteSgSchema = deleteSgSchema; - } - - public TStorageGroupSchema getDeleteSgSchema() { - return deleteSgSchema; - } - - public void setDeleteSgSchema(TStorageGroupSchema deleteSgSchema) { - this.deleteSgSchema = deleteSgSchema; - } - - @Override - protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteStorageGroupState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { - if (deleteSgSchema == null) { - return Flow.NO_MORE_STATE; - } - String storageGroupName = deleteSgSchema.getName(); - List dataRegionGroupIds = deleteSgSchema.getDataRegionGroupIds(); - List schemaRegionGroupIds = deleteSgSchema.getSchemaRegionGroupIds(); - List dataRegionReplicaSets = - new ArrayList<>(PartitionInfo.getInstance().getRegionReplicaSets(dataRegionGroupIds)); - List schemaRegionReplicaSets = - new ArrayList<>(PartitionInfo.getInstance().getRegionReplicaSets(schemaRegionGroupIds)); - try { - switch (state) { - case DELETE_STORAGE_GROUP_PREPEARE: - // TODO: lock related ClusterSchemaInfo, PartitionInfo and Regions - setNextState(DeleteStorageGroupState.DELETE_DATA_REGION); - break; - case DELETE_DATA_REGION: - LOG.info("Delete dataRegions of {}", storageGroupName); - if (byPassForTest || deleteRegion(env, dataRegionReplicaSets)) { - setNextState(DeleteStorageGroupState.DELETE_SCHEMA_REGION); - } - break; - case DELETE_SCHEMA_REGION: - LOG.info("Delete schemaRegions of {}", storageGroupName); - if (byPassForTest || deleteRegion(env, schemaRegionReplicaSets)) { - setNextState(DeleteStorageGroupState.DELETE_CONFIG); - } - break; - case DELETE_CONFIG: - LOG.info("Delete config info of {}", storageGroupName); - TSStatus status = deleteConfig(env, deleteSgSchema); - if (verifySucceed(status)) { - if (byPassForTest) { - return Flow.NO_MORE_STATE; - } - setNextState(DeleteStorageGroupState.INVALIDATE_CACHE); - } else if (getCycles() > retryThreshold) { - setFailure(new ProcedureException("Delete config info id failed, status is " + status)); - } - break; - case INVALIDATE_CACHE: - LOG.info("Invalidate cache of {}", storageGroupName); - invalidateCache(env, storageGroupName); - return Flow.NO_MORE_STATE; - } - } catch (TException | IOException e) { - LOG.error( - "Retriable error trying to delete storage group {}, state {}", - storageGroupName, - state, - e); - if (getCycles() > retryThreshold) { - setFailure(new ProcedureException("State stack at " + state)); - } - } - return Flow.HAS_MORE_STATE; - } - - private TSStatus deleteConfig(ConfigNodeProcedureEnv env, TStorageGroupSchema deleteSgSchema) { - DeleteStorageGroupReq deleteStorageGroupReq = new DeleteStorageGroupReq(deleteSgSchema); - return env.getConfigManager() - .getClusterSchemaManager() - .deleteStorageGroup(deleteStorageGroupReq); - } - - private boolean deleteRegion( - ConfigNodeProcedureEnv env, List regionReplicaSets) throws TException { - for (TRegionReplicaSet dataRegionReplicaSet : regionReplicaSets) { - TConsensusGroupId regionId = dataRegionReplicaSet.getRegionId(); - InternalService.Client dataNodeClient = null; - try { - dataNodeClient = env.getDataNodeClient(dataRegionReplicaSet); - if (dataNodeClient != null) { - TSStatus status = dataNodeClient.deleteRegion(regionId); - if (status.getCode() != StatusUtils.OK.getCode()) { - if (getCycles() > retryThreshold) { - setFailure( - new ProcedureException( - "Delete data region id=" + regionId + " failed, status is " + status)); - } - return false; - } - LOG.info("Delete region {} success", regionId); - } - } catch (IOException e) { - LOG.error("Connect dataRegion-{} failed", dataRegionReplicaSet.getRegionId(), e); - if (getCycles() > retryThreshold) { - setFailure( - new ProcedureException( - "Delete data region id=" + regionId + " failed", e.getCause())); - } - return false; - } - } - return true; - } - - private void invalidateCache(ConfigNodeProcedureEnv env, String storageGroupName) - throws IOException, TException { - List allDataNodes = - env.getConfigManager().getNodeManager().getOnlineDataNodes(); - TInvalidateCacheReq invalidateCacheReq = new TInvalidateCacheReq(); - invalidateCacheReq.setStorageGroup(true); - invalidateCacheReq.setFullPath(storageGroupName); - for (TDataNodeLocation dataNodeLocation : allDataNodes) { - env.getDataNodeClient(dataNodeLocation).invalidateSchemaCache(invalidateCacheReq); - env.getDataNodeClient(dataNodeLocation).invalidatePartitionCache(invalidateCacheReq); - } - } - - @Override - protected void rollbackState( - ConfigNodeProcedureEnv clusterProcedureEnvironment, - DeleteStorageGroupState deleteStorageGroupState) - throws IOException, InterruptedException {} - - @Override - protected DeleteStorageGroupState getState(int stateId) { - return DeleteStorageGroupState.values()[stateId]; - } - - @Override - protected int getStateId(DeleteStorageGroupState deleteStorageGroupState) { - return deleteStorageGroupState.ordinal(); - } - - @Override - protected DeleteStorageGroupState getInitialState() { - return DeleteStorageGroupState.DELETE_STORAGE_GROUP_PREPEARE; - } - - @Override - public void serialize(ByteBuffer byteBuffer) { - byteBuffer.putInt(ProcedureFactory.ProcedureType.DELETE_STORAGE_GROUP_PROCEDURE.ordinal()); - super.serialize(byteBuffer); - ThriftConfigNodeSerDeUtils.serializeTStorageGroupSchema(deleteSgSchema, byteBuffer); - } - - @Override - public void deserialize(ByteBuffer byteBuffer) { - super.deserialize(byteBuffer); - try { - deleteSgSchema = ThriftConfigNodeSerDeUtils.deserializeTStorageGroupSchema(byteBuffer); - } catch (ThriftSerDeException e) { - LOG.error("error in deser", e); - } - } - - public boolean verifySucceed(TSStatus status) { - return status.getCode() == StatusUtils.OK.getCode(); - } - - @Override - public boolean equals(Object that) { - if (that instanceof DeleteStorageGroupProcedure) { - DeleteStorageGroupProcedure thatProc = (DeleteStorageGroupProcedure) that; - return thatProc.getProcId() == this.getProcId() - && thatProc.getState() == this.getState() - && thatProc.deleteSgSchema.equals(this.getDeleteSgSchema()); - } - return false; - } -} diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/InternalProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java similarity index 87% rename from procedure/src/main/java/org/apache/iotdb/procedure/InternalProcedure.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java index 6d76e5e79db8..a3f9a110b620 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/InternalProcedure.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/InternalProcedure.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import java.io.IOException; import java.nio.ByteBuffer; @@ -53,9 +53,6 @@ protected boolean abort(Env env) { throw new UnsupportedOperationException(); } - @Override - public void serialize(ByteBuffer byteBuffer) {} - @Override public void deserialize(ByteBuffer byteBuffer) {} } diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/Procedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java similarity index 94% rename from procedure/src/main/java/org/apache/iotdb/procedure/Procedure.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java index 6ca34b3a1f0b..668f7db971f8 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/Procedure.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/Procedure.java @@ -17,19 +17,21 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.exception.ProcedureAbortedException; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureTimeoutException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; -import org.apache.iotdb.procedure.store.IProcedureStore; -import org.apache.iotdb.service.rpc.thrift.ProcedureState; +import org.apache.iotdb.confignode.procedure.exception.ProcedureAbortedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureTimeoutException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; +import org.apache.iotdb.confignode.procedure.state.ProcedureState; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.DataOutputStream; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; @@ -126,63 +128,65 @@ protected abstract Procedure[] execute(Env env) */ protected abstract boolean abort(Env env); - public void serialize(ByteBuffer byteBuffer) { + public void serialize(DataOutputStream stream) throws IOException { // procid - byteBuffer.putLong(this.procId); + stream.writeLong(this.procId); // state - byteBuffer.putInt(this.state.getValue()); + stream.writeInt(this.state.ordinal()); // submit time - byteBuffer.putLong(this.submittedTime); + stream.writeLong(this.submittedTime); // last updated - byteBuffer.putLong(this.lastUpdate); + stream.writeLong(this.lastUpdate); // parent id - byteBuffer.putLong(this.parentProcId); + stream.writeLong(this.parentProcId); // time out - byteBuffer.putLong(this.timeout); + stream.writeLong(this.timeout); // stack indexes if (stackIndexes != null) { - byteBuffer.putInt(stackIndexes.length); - Arrays.stream(stackIndexes).forEach(byteBuffer::putInt); + stream.writeInt(stackIndexes.length); + for (int index : stackIndexes) { + stream.writeInt(index); + } } else { - byteBuffer.putInt(-1); + stream.writeInt(-1); } // exceptions if (hasException()) { - byteBuffer.put((byte) 1); + stream.write((byte) 1); String exceptionClassName = exception.getClass().getName(); byte[] exceptionClassNameBytes = exceptionClassName.getBytes(StandardCharsets.UTF_8); - byteBuffer.putInt(exceptionClassNameBytes.length); - byteBuffer.put(exceptionClassNameBytes); + stream.writeInt(exceptionClassNameBytes.length); + stream.write(exceptionClassNameBytes); String message = this.exception.getMessage(); if (message != null) { byte[] messageBytes = message.getBytes(StandardCharsets.UTF_8); - byteBuffer.putInt(messageBytes.length); - byteBuffer.put(messageBytes); + stream.writeInt(messageBytes.length); + stream.write(messageBytes); } else { - byteBuffer.putInt(-1); + stream.writeInt(-1); } } else { - byteBuffer.put((byte) 0); + stream.write((byte) 0); } // result if (result != null) { - byteBuffer.putInt(result.length); - byteBuffer.put(result); + stream.writeInt(result.length); + stream.write(result); } else { - byteBuffer.putInt(-1); + stream.writeInt(-1); } // has lock - byteBuffer.put(this.hasLock() ? (byte) 1 : (byte) 0); + stream.write(this.hasLock() ? (byte) 1 : (byte) 0); } public void deserialize(ByteBuffer byteBuffer) { // procid this.setProcId(byteBuffer.getLong()); // state - this.setState(ProcedureState.findByValue(byteBuffer.getInt())); + this.setState(ProcedureState.values()[byteBuffer.getInt()]); // submit time this.setSubmittedTime(byteBuffer.getLong()); // last updated diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java similarity index 93% rename from procedure/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java index 35da844070b9..67980f6404f9 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/ProcedureExecutor.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureExecutor.java @@ -17,15 +17,16 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; -import org.apache.iotdb.procedure.scheduler.ProcedureScheduler; -import org.apache.iotdb.procedure.scheduler.SimpleProcedureScheduler; -import org.apache.iotdb.procedure.store.IProcedureStore; -import org.apache.iotdb.service.rpc.thrift.ProcedureState; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; +import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; +import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; +import org.apache.iotdb.confignode.procedure.state.ProcedureState; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; import com.google.common.base.Preconditions; import org.slf4j.Logger; @@ -45,14 +46,11 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantLock; public class ProcedureExecutor { private static final Logger LOG = LoggerFactory.getLogger(ProcedureExecutor.class); - private final ConcurrentHashMap idLockMap = new ConcurrentHashMap<>(); - - private final ConcurrentHashMap> completed = + private final ConcurrentHashMap> completed = new ConcurrentHashMap<>(); private final ConcurrentHashMap> rollbackStack = @@ -70,7 +68,6 @@ public class ProcedureExecutor { private int corePoolSize; private int maxPoolSize; - private volatile long keepAliveTime; private final ProcedureScheduler scheduler; @@ -113,15 +110,6 @@ public void init(int numThreads) { recover(); } - public void setKeepAliveTime(final long keepAliveTime, final TimeUnit timeUnit) { - this.keepAliveTime = timeUnit.toMillis(keepAliveTime); - this.scheduler.signalAll(); - } - - public long getKeepAliveTime(final TimeUnit timeUnit) { - return timeUnit.convert(keepAliveTime, TimeUnit.MILLISECONDS); - } - private void recover() { // 1.Build rollback stack int runnableCount = 0; @@ -133,7 +121,7 @@ private void recover() { store.load(procedureList); for (Procedure proc : procedureList) { if (proc.isFinished()) { - completed.putIfAbsent(proc.getProcId(), new CompletedProcedureRetainer(proc)); + completed.putIfAbsent(proc.getProcId(), new CompletedProcedureContainer(proc)); } else { if (!proc.hasParent()) { rollbackStack.put(proc.getProcId(), new RootProcedureStack<>()); @@ -283,7 +271,7 @@ public void startWorkers() { public void startCompletedCleaner(long cleanTimeInterval, long cleanEvictTTL) { addInternalProcedure( - new CompletedProcedureCleaner(store, completed, cleanTimeInterval, cleanEvictTTL)); + new CompletedProcedureRecycler(store, completed, cleanTimeInterval, cleanEvictTTL)); } private void addInternalProcedure(InternalProcedure interalProcedure) { @@ -647,10 +635,7 @@ private ProcedureLockState acquireLock(Procedure proc) { * @return procedure lock state */ private ProcedureLockState executeRollback(Procedure procedure) { - ReentrantLock idLock = - idLockMap.computeIfAbsent(procedure.getProcId(), procId -> new ReentrantLock()); try { - idLock.lock(); procedure.doRollback(this.environment); } catch (IOException e) { LOG.error("Roll back failed for {}", procedure, e); @@ -658,8 +643,6 @@ private ProcedureLockState executeRollback(Procedure procedure) { LOG.warn("Interrupted exception occured for {}", procedure, e); } catch (Throwable t) { LOG.error("CODE-BUG: runtime exception for {}", procedure, t); - } finally { - idLock.unlock(); } cleanupAfterRollback(procedure); return ProcedureLockState.LOCK_ACQUIRED; @@ -699,7 +682,7 @@ private void executeCompletionCleanup(Procedure proc) { private void rootProcedureCleanup(Procedure proc) { executeCompletionCleanup(proc); - CompletedProcedureRetainer retainer = new CompletedProcedureRetainer<>(proc); + CompletedProcedureContainer retainer = new CompletedProcedureContainer<>(proc); completed.put(proc.getProcId(), retainer); rollbackStack.remove(proc.getProcId()); procedures.remove(proc.getProcId()); @@ -727,6 +710,7 @@ private long pushProcedure(Procedure procedure) { private class WorkerThread extends StoppableThread { private final AtomicLong startTime = new AtomicLong(Long.MAX_VALUE); private volatile Procedure activeProcedure; + protected long keepAliveTime = -1; public WorkerThread(ThreadGroup threadGroup) { this(threadGroup, "ProcExecWorker-"); @@ -754,16 +738,12 @@ public void run() { this.activeProcedure = procedure; int activeCount = activeExecutorCount.incrementAndGet(); startTime.set(System.currentTimeMillis()); - ReentrantLock idLock = - idLockMap.computeIfAbsent(procedure.getProcId(), id -> new ReentrantLock()); - idLock.lock(); executeProcedure(procedure); activeCount = activeExecutorCount.decrementAndGet(); LOG.trace("Halt pid={}, activeCount={}", procedure.getProcId(), activeCount); this.activeProcedure = null; lastUpdated = System.currentTimeMillis(); startTime.set(lastUpdated); - idLock.unlock(); } } catch (Throwable throwable) { @@ -793,8 +773,10 @@ public long getCurrentRunTime() { // A worker thread which can be added when core workers are stuck. Will timeout after // keepAliveTime if there is no procedure to run. private final class KeepAliveWorkerThread extends WorkerThread { + public KeepAliveWorkerThread(ThreadGroup group) { super(group, "KAProcExecWorker-"); + this.keepAliveTime = TimeUnit.SECONDS.toMillis(10); } @Override @@ -810,10 +792,6 @@ private final class WorkerMonitor extends InternalProcedure { private static final float DEFAULT_WORKER_ADD_STUCK_PERCENTAGE = 0.5f; // 50% stuck - private float addWorkerStuckPercentage = DEFAULT_WORKER_ADD_STUCK_PERCENTAGE; - private int timeoutInterval = DEFAULT_WORKER_MONITOR_INTERVAL; - private int stuckThreshold = DEFAULT_WORKER_STUCK_THRESHOLD; - public WorkerMonitor() { super(DEFAULT_WORKER_MONITOR_INTERVAL); updateTimestamp(); @@ -823,7 +801,8 @@ private int checkForStuckWorkers() { // check if any of the worker is stuck int stuckCount = 0; for (WorkerThread worker : workerThreads) { - if (worker.activeProcedure == null || worker.getCurrentRunTime() < stuckThreshold) { + if (worker.activeProcedure == null + || worker.getCurrentRunTime() < DEFAULT_WORKER_STUCK_THRESHOLD) { continue; } @@ -844,7 +823,7 @@ private void checkThreadCount(final int stuckCount) { final float stuckPerc = ((float) stuckCount) / workerThreads.size(); // let's add new worker thread more aggressively, as they will timeout finally if there is no // work to do. - if (stuckPerc >= addWorkerStuckPercentage && workerThreads.size() < maxPoolSize) { + if (stuckPerc >= DEFAULT_WORKER_ADD_STUCK_PERCENTAGE && workerThreads.size() < maxPoolSize) { final KeepAliveWorkerThread worker = new KeepAliveWorkerThread(threadGroup); workerThreads.add(worker); worker.start(); @@ -954,7 +933,7 @@ public boolean abort(long procId) { } public Procedure getResult(long procId) { - CompletedProcedureRetainer retainer = completed.get(procId); + CompletedProcedureContainer retainer = completed.get(procId); if (retainer == null) { return null; } else { @@ -969,7 +948,7 @@ public Procedure getResult(long procId) { * @return procedure or retainer */ public Procedure getResultOrProcedure(long procId) { - CompletedProcedureRetainer retainer = completed.get(procId); + CompletedProcedureContainer retainer = completed.get(procId); if (retainer == null) { return procedures.get(procId); } else { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/RootProcedureStack.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/RootProcedureStack.java similarity index 96% rename from procedure/src/main/java/org/apache/iotdb/procedure/RootProcedureStack.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/RootProcedureStack.java index 2491e827d358..88057c97de5b 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/RootProcedureStack.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/RootProcedureStack.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.service.rpc.thrift.ProcedureState; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.state.ProcedureState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/StateMachineProcedure.java similarity index 95% rename from procedure/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/StateMachineProcedure.java index cf3c812ac47a..f63a76141abb 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/StateMachineProcedure.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/StateMachineProcedure.java @@ -17,14 +17,15 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -300,11 +301,11 @@ protected void toStringState(StringBuilder builder) { } @Override - public void serialize(ByteBuffer byteBuffer) { - super.serialize(byteBuffer); - byteBuffer.putInt(stateCount); + public void serialize(DataOutputStream stream) throws IOException { + super.serialize(stream); + stream.writeInt(stateCount); for (int i = 0; i < stateCount; ++i) { - byteBuffer.putInt(states[i]); + stream.writeInt(states[i]); } } diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/StoppableThread.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/StoppableThread.java similarity index 97% rename from procedure/src/main/java/org/apache/iotdb/procedure/StoppableThread.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/StoppableThread.java index 9309333a6569..84f2f27ed171 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/StoppableThread.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/StoppableThread.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java similarity index 98% rename from procedure/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java index 5ac861560ffe..28ba7aafceff 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/TimeoutExecutorThread.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/TimeoutExecutorThread.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java index 922cebff5083..b2ec2c47e528 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/ConfigNodeProcedureEnv.java @@ -19,66 +19,148 @@ package org.apache.iotdb.confignode.procedure.env; -import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; -import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; -import org.apache.iotdb.commons.client.IClientManager; -import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.confignode.client.SyncConfigNodeClientPool; +import org.apache.iotdb.confignode.client.SyncDataNodeClientPool; +import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; import org.apache.iotdb.confignode.manager.ConfigManager; -import org.apache.iotdb.db.client.DataNodeClientPoolFactory; -import org.apache.iotdb.mpp.rpc.thrift.InternalService; +import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; +import org.apache.iotdb.mpp.rpc.thrift.TInvalidateCacheReq; +import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.concurrent.locks.ReentrantLock; public class ConfigNodeProcedureEnv { private static final Logger LOG = LoggerFactory.getLogger(ConfigNodeProcedureEnv.class); + private final ReentrantLock addConfigNodeLock = new ReentrantLock(); + private final ConfigManager configManager; - public ConfigNodeProcedureEnv(ConfigManager configManager) { - this.configManager = configManager; + private final ProcedureScheduler scheduler; + + private static boolean skipForTest = false; + + private static boolean invalidCacheResult = true; + + public static void setSkipForTest(boolean skipForTest) { + ConfigNodeProcedureEnv.skipForTest = skipForTest; } - // TODO: reuse the same ClientPool with other module - private static final IClientManager - INTERNAL_SERVICE_CLIENT_MANAGER = - new IClientManager.Factory() - .createClientManager( - new DataNodeClientPoolFactory.SyncDataNodeInternalServiceClientPoolFactory()); + public static void setInvalidCacheResult(boolean result) { + ConfigNodeProcedureEnv.invalidCacheResult = result; + } + + public ConfigNodeProcedureEnv(ConfigManager configManager, ProcedureScheduler scheduler) { + this.configManager = configManager; + this.scheduler = scheduler; + } public ConfigManager getConfigManager() { return configManager; } - public InternalService.Client getDataNodeClient(TRegionReplicaSet dataRegionReplicaSet) - throws IOException { - List dataNodeLocations = dataRegionReplicaSet.getDataNodeLocations(); - int retry = dataNodeLocations.size() - 1; - for (TDataNodeLocation dataNodeLocation : dataNodeLocations) { - try { - return INTERNAL_SERVICE_CLIENT_MANAGER.borrowClient(dataNodeLocation.getInternalEndPoint()); - } catch (IOException e) { - if (retry-- > 0) { - LOG.warn( - "Connect dataRegion-{} at dataNode-{} failed, trying next replica..", - dataRegionReplicaSet.getRegionId(), - dataNodeLocation); - } else { - LOG.warn("Connect dataRegion{} failed", dataRegionReplicaSet.getRegionId()); - throw e; - } + /** + * Delete ConfigNode cache, includes ClusterSchemaInfo and PartitionInfo + * + * @param name storage group name + * @return tsStatus + */ + public TSStatus deleteConfig(String name) { + DeleteStorageGroupReq deleteStorageGroupReq = new DeleteStorageGroupReq(name); + return configManager.getClusterSchemaManager().deleteStorageGroup(deleteStorageGroupReq); + } + + /** + * Pre delete a storage group + * + * @param preDeleteType execute/rollback + * @param deleteSgName storage group name + */ + public void preDelete(PreDeleteStorageGroupReq.PreDeleteType preDeleteType, String deleteSgName) { + configManager.getPartitionManager().preDeleteStorageGroup(deleteSgName, preDeleteType); + } + + /** + * @param storageGroupName Storage group name + * @return ALL SUCCESS OR NOT + * @throws IOException IOE + * @throws TException Thrift IOE + */ + public boolean invalidateCache(String storageGroupName) throws IOException, TException { + // TODO: Remove it after IT is supported + if (skipForTest) { + return invalidCacheResult; + } + List allDataNodes = configManager.getNodeManager().getOnlineDataNodes(-1); + TInvalidateCacheReq invalidateCacheReq = new TInvalidateCacheReq(); + invalidateCacheReq.setStorageGroup(true); + invalidateCacheReq.setFullPath(storageGroupName); + for (TDataNodeInfo dataNodeInfo : allDataNodes) { + final TSStatus invalidateSchemaStatus = + SyncDataNodeClientPool.getInstance() + .invalidateSchemaCache( + dataNodeInfo.getLocation().getInternalEndPoint(), invalidateCacheReq); + final TSStatus invalidatePartitionStatus = + SyncDataNodeClientPool.getInstance() + .invalidatePartitionCache( + dataNodeInfo.getLocation().getInternalEndPoint(), invalidateCacheReq); + if (!verifySucceed(invalidatePartitionStatus, invalidateSchemaStatus)) { + LOG.error( + "Invalidate cache failed, invalidate partition cache status is {}, invalidate schema cache status is {}", + invalidatePartitionStatus, + invalidateSchemaStatus); + return false; } } - return null; + return true; + } + + public boolean verifySucceed(TSStatus... status) { + return Arrays.stream(status) + .allMatch(tsStatus -> tsStatus.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + /** + * Execute remotely on the new node + * + * @param tConfigNodeLocation new config node location + */ + public void addConsensusGroup(TConfigNodeLocation tConfigNodeLocation) { + List configNodeLocations = new ArrayList<>(); + configNodeLocations.addAll(configManager.getNodeManager().getOnlineConfigNodes()); + configNodeLocations.add(tConfigNodeLocation); + SyncConfigNodeClientPool.getInstance() + .addConsensusGroup(tConfigNodeLocation.getInternalEndPoint(), configNodeLocations); + } + + /** + * When current node is leader, execute it. + * + * @param tConfigNodeLocation new config node location + */ + public void addPeer(TConfigNodeLocation tConfigNodeLocation) { + configManager.getNodeManager().applyConfigNode(new ApplyConfigNodeReq(tConfigNodeLocation)); + } + + public ReentrantLock getAddConfigNodeLock() { + return addConfigNodeLock; } - public InternalService.Client getDataNodeClient(TDataNodeLocation dataNodeLocation) - throws IOException { - return INTERNAL_SERVICE_CLIENT_MANAGER.borrowClient(dataNodeLocation.getInternalEndPoint()); + public ProcedureScheduler getScheduler() { + return scheduler; } } diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureAbortedException.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java similarity index 94% rename from procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureAbortedException.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java index 51143ca14fcd..31d10e263478 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureAbortedException.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureAbortedException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure.exception; +package org.apache.iotdb.confignode.procedure.exception; public class ProcedureAbortedException extends ProcedureException { public ProcedureAbortedException() { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureException.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureException.java similarity index 95% rename from procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureException.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureException.java index 7a9749d76d50..923737f7ebd0 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureException.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure.exception; +package org.apache.iotdb.confignode.procedure.exception; public class ProcedureException extends Exception { /** default constructor */ diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureSuspendedException.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java similarity index 95% rename from procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureSuspendedException.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java index 7e9dbdab3cbe..1b6f049bd48b 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureSuspendedException.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureSuspendedException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure.exception; +package org.apache.iotdb.confignode.procedure.exception; public class ProcedureSuspendedException extends ProcedureException { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureTimeoutException.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureTimeoutException.java similarity index 94% rename from procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureTimeoutException.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureTimeoutException.java index 422dad201c46..ccc1e4b2d8ec 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureTimeoutException.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureTimeoutException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure.exception; +package org.apache.iotdb.confignode.procedure.exception; public class ProcedureTimeoutException extends ProcedureException { public ProcedureTimeoutException(String s) { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureYieldException.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureYieldException.java similarity index 95% rename from procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureYieldException.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureYieldException.java index 3f5c69a069b7..d3ed088c57eb 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/exception/ProcedureYieldException.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/exception/ProcedureYieldException.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure.exception; +package org.apache.iotdb.confignode.procedure.exception; public class ProcedureYieldException extends ProcedureException { /** default constructor */ diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/AddConfigNodeProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/AddConfigNodeProcedure.java new file mode 100644 index 000000000000..b3aa6b1f1096 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/AddConfigNodeProcedure.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl; + +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; +import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; +import org.apache.iotdb.confignode.procedure.StateMachineProcedure; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; +import org.apache.iotdb.confignode.procedure.state.AddConfigNodeState; +import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; +import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** add config node procedure */ +public class AddConfigNodeProcedure + extends StateMachineProcedure { + private static final Logger LOG = LoggerFactory.getLogger(AddConfigNodeProcedure.class); + private static final int retryThreshold = 5; + + private TConfigNodeLocation tConfigNodeLocation; + + public AddConfigNodeProcedure() { + super(); + } + + public AddConfigNodeProcedure(TConfigNodeLocation tConfigNodeLocation) { + super(); + this.tConfigNodeLocation = tConfigNodeLocation; + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, AddConfigNodeState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + if (tConfigNodeLocation == null) { + return Flow.NO_MORE_STATE; + } + try { + switch (state) { + case ADD_CONFIG_NODE_PREPARE: + setNextState(AddConfigNodeState.ADD_CONSENSUS_GROUP); + break; + case ADD_CONSENSUS_GROUP: + env.addConsensusGroup(tConfigNodeLocation); + setNextState(AddConfigNodeState.ADD_PEER); + LOG.info("Add consensus group {}", tConfigNodeLocation); + break; + case ADD_PEER: + env.addPeer(tConfigNodeLocation); + LOG.info("Add Peer of {}", tConfigNodeLocation); + return Flow.NO_MORE_STATE; + } + } catch (Exception e) { + if (isRollbackSupported(state)) { + setFailure(new ProcedureException("Add Config Node failed " + state)); + } else { + LOG.error( + "Retriable error trying to add config node {}, state {}", + tConfigNodeLocation, + state, + e); + if (getCycles() > retryThreshold) { + setFailure(new ProcedureException("State stuck at " + state)); + } + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(ConfigNodeProcedureEnv env, AddConfigNodeState state) + throws IOException, InterruptedException { + switch (state) { + case ADD_CONSENSUS_GROUP: + case ADD_PEER: + LOG.info("Rollback remove peer:{}", tConfigNodeLocation); + // TODO: if remove consensus group and remove peer + break; + } + } + + @Override + protected boolean isRollbackSupported(AddConfigNodeState state) { + switch (state) { + case ADD_CONSENSUS_GROUP: + case ADD_PEER: + return true; + } + return false; + } + + @Override + protected ProcedureLockState acquireLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { + if (configNodeProcedureEnv.getAddConfigNodeLock().tryLock()) { + LOG.info("{} acquire lock.", getProcId()); + return ProcedureLockState.LOCK_ACQUIRED; + } + SimpleProcedureScheduler simpleProcedureScheduler = + (SimpleProcedureScheduler) configNodeProcedureEnv.getScheduler(); + simpleProcedureScheduler.addWaiting(this); + LOG.info("{} wait for lock.", getProcId()); + return ProcedureLockState.LOCK_EVENT_WAIT; + } + + @Override + protected void releaseLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { + LOG.info("{} release lock.", getProcId()); + configNodeProcedureEnv.getAddConfigNodeLock().unlock(); + SimpleProcedureScheduler simpleProcedureScheduler = + (SimpleProcedureScheduler) configNodeProcedureEnv.getScheduler(); + simpleProcedureScheduler.releaseWaiting(); + } + + @Override + protected boolean holdLock(ConfigNodeProcedureEnv configNodeProcedureEnv) { + return configNodeProcedureEnv.getAddConfigNodeLock().isHeldByCurrentThread(); + } + + @Override + protected AddConfigNodeState getState(int stateId) { + return AddConfigNodeState.values()[stateId]; + } + + @Override + protected int getStateId(AddConfigNodeState deleteStorageGroupState) { + return deleteStorageGroupState.ordinal(); + } + + @Override + protected AddConfigNodeState getInitialState() { + return AddConfigNodeState.ADD_CONFIG_NODE_PREPARE; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeInt(ProcedureFactory.ProcedureType.ADD_CONFIG_NODE_PROCEDURE.ordinal()); + super.serialize(stream); + ThriftConfigNodeSerDeUtils.serializeTConfigNodeLocation(tConfigNodeLocation, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + try { + tConfigNodeLocation = ThriftConfigNodeSerDeUtils.deserializeTConfigNodeLocation(byteBuffer); + } catch (ThriftSerDeException e) { + LOG.error("Error in deserialize AddConfigNodeProcedure", e); + } + } + + @Override + public boolean equals(Object that) { + if (that instanceof AddConfigNodeProcedure) { + AddConfigNodeProcedure thatProc = (AddConfigNodeProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && thatProc.tConfigNodeLocation.equals(this.tConfigNodeLocation); + } + return false; + } +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/DeleteStorageGroupProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/DeleteStorageGroupProcedure.java new file mode 100644 index 000000000000..3d5af6d75f41 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/DeleteStorageGroupProcedure.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.impl; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException; +import org.apache.iotdb.commons.utils.ThriftConfigNodeSerDeUtils; +import org.apache.iotdb.confignode.consensus.request.write.PreDeleteStorageGroupReq; +import org.apache.iotdb.confignode.procedure.StateMachineProcedure; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.state.DeleteStorageGroupState; +import org.apache.iotdb.confignode.procedure.store.ProcedureFactory; +import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +public class DeleteStorageGroupProcedure + extends StateMachineProcedure { + private static final Logger LOG = LoggerFactory.getLogger(DeleteStorageGroupProcedure.class); + private static final int retryThreshold = 5; + + private TStorageGroupSchema deleteSgSchema; + + public DeleteStorageGroupProcedure() { + super(); + } + + public DeleteStorageGroupProcedure(TStorageGroupSchema deleteSgSchema) { + super(); + this.deleteSgSchema = deleteSgSchema; + } + + public TStorageGroupSchema getDeleteSgSchema() { + return deleteSgSchema; + } + + public void setDeleteSgSchema(TStorageGroupSchema deleteSgSchema) { + this.deleteSgSchema = deleteSgSchema; + } + + @Override + protected Flow executeFromState(ConfigNodeProcedureEnv env, DeleteStorageGroupState state) + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + if (deleteSgSchema == null) { + return Flow.NO_MORE_STATE; + } + try { + switch (state) { + case DELETE_STORAGE_GROUP_PREPARE: + // TODO: lock related ClusterSchemaInfo, PartitionInfo and Regions + setNextState(DeleteStorageGroupState.DELETE_PRE); + break; + case DELETE_PRE: + LOG.info("Pre delete for Storage group {}", deleteSgSchema.getName()); + env.preDelete(PreDeleteStorageGroupReq.PreDeleteType.EXECUTE, deleteSgSchema.getName()); + setNextState(DeleteStorageGroupState.INVALIDATE_CACHE); + break; + case INVALIDATE_CACHE: + LOG.info("Invalidate cache of {}", deleteSgSchema.getName()); + if (env.invalidateCache(deleteSgSchema.getName())) { + setNextState(DeleteStorageGroupState.DELETE_CONFIG); + } else { + setFailure(new ProcedureException("Invalidate cache failed")); + } + break; + case DELETE_CONFIG: + LOG.info("Delete config info of {}", deleteSgSchema.getName()); + TSStatus status = env.deleteConfig(deleteSgSchema.getName()); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + return Flow.NO_MORE_STATE; + } else if (getCycles() > retryThreshold) { + setFailure(new ProcedureException("Delete config info id failed")); + } + } + } catch (TException | IOException e) { + if (isRollbackSupported(state)) { + setFailure(new ProcedureException("Delete storage group failed " + state)); + } else { + LOG.error( + "Retriable error trying to delete storage group {}, state {}", + deleteSgSchema.getName(), + state, + e); + if (getCycles() > retryThreshold) { + setFailure(new ProcedureException("State stuck at " + state)); + } + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(ConfigNodeProcedureEnv env, DeleteStorageGroupState state) + throws IOException, InterruptedException { + switch (state) { + case DELETE_PRE: + case INVALIDATE_CACHE: + LOG.info("Rollback preDeleted:{}", deleteSgSchema.getName()); + env.preDelete(PreDeleteStorageGroupReq.PreDeleteType.ROLLBACK, deleteSgSchema.getName()); + break; + } + } + + @Override + protected boolean isRollbackSupported(DeleteStorageGroupState state) { + switch (state) { + case DELETE_PRE: + case INVALIDATE_CACHE: + return true; + } + return false; + } + + @Override + protected DeleteStorageGroupState getState(int stateId) { + return DeleteStorageGroupState.values()[stateId]; + } + + @Override + protected int getStateId(DeleteStorageGroupState deleteStorageGroupState) { + return deleteStorageGroupState.ordinal(); + } + + @Override + protected DeleteStorageGroupState getInitialState() { + return DeleteStorageGroupState.DELETE_STORAGE_GROUP_PREPARE; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeInt(ProcedureFactory.ProcedureType.DELETE_STORAGE_GROUP_PROCEDURE.ordinal()); + super.serialize(stream); + ThriftConfigNodeSerDeUtils.serializeTStorageGroupSchema(deleteSgSchema, stream); + } + + @Override + public void deserialize(ByteBuffer byteBuffer) { + super.deserialize(byteBuffer); + try { + deleteSgSchema = ThriftConfigNodeSerDeUtils.deserializeTStorageGroupSchema(byteBuffer); + } catch (ThriftSerDeException e) { + LOG.error("Error in deserialize DeleteStorageGroupProcedure", e); + } + } + + @Override + public boolean equals(Object that) { + if (that instanceof DeleteStorageGroupProcedure) { + DeleteStorageGroupProcedure thatProc = (DeleteStorageGroupProcedure) that; + return thatProc.getProcId() == this.getProcId() + && thatProc.getState() == this.getState() + && thatProc.deleteSgSchema.equals(this.getDeleteSgSchema()); + } + return false; + } +} diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/AbstractProcedureScheduler.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java similarity index 98% rename from procedure/src/main/java/org/apache/iotdb/procedure/scheduler/AbstractProcedureScheduler.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java index be98b3986749..00b1a7d1b1f8 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/AbstractProcedureScheduler.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/AbstractProcedureScheduler.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure.scheduler; +package org.apache.iotdb.confignode.procedure.scheduler; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/ProcedureScheduler.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/ProcedureScheduler.java similarity index 96% rename from procedure/src/main/java/org/apache/iotdb/procedure/scheduler/ProcedureScheduler.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/ProcedureScheduler.java index b582e80502eb..33ff7cf04d37 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/ProcedureScheduler.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/ProcedureScheduler.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure.scheduler; +package org.apache.iotdb.confignode.procedure.scheduler; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; import java.util.concurrent.TimeUnit; diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/SimpleProcedureScheduler.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/SimpleProcedureScheduler.java similarity index 94% rename from procedure/src/main/java/org/apache/iotdb/procedure/scheduler/SimpleProcedureScheduler.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/SimpleProcedureScheduler.java index 7024ddc25f8e..f6065a585ae1 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/scheduler/SimpleProcedureScheduler.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/scheduler/SimpleProcedureScheduler.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure.scheduler; +package org.apache.iotdb.confignode.procedure.scheduler; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; import java.util.ArrayDeque; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/AddConfigNodeState.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/AddConfigNodeState.java new file mode 100644 index 000000000000..3c98ee7df92c --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/AddConfigNodeState.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state; + +public enum AddConfigNodeState { + ADD_CONFIG_NODE_PREPARE, + ADD_CONSENSUS_GROUP, + ADD_PEER +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupState.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/DeleteStorageGroupState.java similarity index 84% rename from confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupState.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/DeleteStorageGroupState.java index eecc11578f8c..2ca63c67a89e 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/DeleteStorageGroupState.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/DeleteStorageGroupState.java @@ -17,12 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.procedure; +package org.apache.iotdb.confignode.procedure.state; public enum DeleteStorageGroupState { - DELETE_STORAGE_GROUP_PREPEARE, - DELETE_DATA_REGION, - DELETE_SCHEMA_REGION, - DELETE_CONFIG, - INVALIDATE_CACHE + DELETE_STORAGE_GROUP_PREPARE, + DELETE_PRE, + INVALIDATE_CACHE, + DELETE_CONFIG } diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/ProcedureLockState.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureLockState.java similarity index 94% rename from procedure/src/main/java/org/apache/iotdb/procedure/ProcedureLockState.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureLockState.java index 2c98174a3620..9098cfc163d5 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/ProcedureLockState.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureLockState.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure.state; public enum ProcedureLockState { LOCK_ACQUIRED, diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureState.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureState.java new file mode 100644 index 000000000000..24db4ec60391 --- /dev/null +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/state/ProcedureState.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.state; + +public enum ProcedureState { + INITIALIZING, + RUNNABLE, + WAITING, + WAITING_TIMEOUT, + ROLLEDBACK, + SUCCESS, + FAILED +} diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ConfigProcedureStore.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java similarity index 90% rename from confignode/src/main/java/org/apache/iotdb/confignode/procedure/ConfigProcedureStore.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java index cd061f928522..d21b250b31e1 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ConfigProcedureStore.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ConfigProcedureStore.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.iotdb.confignode.procedure; +package org.apache.iotdb.confignode.procedure.store; import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.confignode.consensus.request.write.DeleteProcedureReq; @@ -25,9 +25,7 @@ import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.ConsensusManager; import org.apache.iotdb.confignode.persistence.ProcedureInfo; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.store.IProcedureStore; -import org.apache.iotdb.procedure.store.ProcedureStore; +import org.apache.iotdb.confignode.procedure.Procedure; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,15 +39,14 @@ public class ConfigProcedureStore implements IProcedureStore { private static final Logger LOG = LoggerFactory.getLogger(ProcedureStore.class); private volatile boolean isRunning = false; - private ProcedureInfo procedureInfo = ProcedureInfo.getInstance(); + private final ProcedureInfo procedureInfo; private final String procedureWalDir = CommonDescriptor.getInstance().getConfig().getProcedureWalFolder(); - private ConfigManager configManager; + private final ConfigManager configManager; - public ConfigProcedureStore() {} - - public ConfigProcedureStore(ConfigManager configManager) { + public ConfigProcedureStore(ConfigManager configManager, ProcedureInfo procedureInfo) { this.configManager = configManager; + this.procedureInfo = procedureInfo; try { checkProcWalDir(procedureWalDir); } catch (IOException e) { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureFactory.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureFactory.java similarity index 89% rename from procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureFactory.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureFactory.java index 85330d679dd5..d080c928d4fe 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureFactory.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureFactory.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure.store; +package org.apache.iotdb.confignode.procedure.store; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureStore.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureStore.java similarity index 91% rename from procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureStore.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureStore.java index 439160afb83d..79161e978408 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/store/IProcedureStore.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/IProcedureStore.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure.store; +package org.apache.iotdb.confignode.procedure.store; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; import java.util.List; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureFactory.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java similarity index 79% rename from confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureFactory.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java index 9d566236be26..9487f0a3c90b 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/ProcedureFactory.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureFactory.java @@ -17,10 +17,11 @@ * under the License. */ -package org.apache.iotdb.confignode.procedure; +package org.apache.iotdb.confignode.procedure.store; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.store.IProcedureFactory; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.impl.AddConfigNodeProcedure; +import org.apache.iotdb.confignode.procedure.impl.DeleteStorageGroupProcedure; import java.io.IOException; import java.nio.ByteBuffer; @@ -39,6 +40,9 @@ public Procedure create(ByteBuffer buffer) throws IOException { case DELETE_STORAGE_GROUP_PROCEDURE: procedure = new DeleteStorageGroupProcedure(); break; + case ADD_CONFIG_NODE_PROCEDURE: + procedure = new AddConfigNodeProcedure(); + break; default: throw new IOException("unknown Procedure type: " + typeNum); } @@ -49,12 +53,15 @@ public Procedure create(ByteBuffer buffer) throws IOException { public static ProcedureType getProcedureType(Procedure procedure) { if (procedure instanceof DeleteStorageGroupProcedure) { return ProcedureType.DELETE_STORAGE_GROUP_PROCEDURE; + } else if (procedure instanceof AddConfigNodeProcedure) { + return ProcedureType.ADD_CONFIG_NODE_PROCEDURE; } return null; } public enum ProcedureType { - DELETE_STORAGE_GROUP_PROCEDURE + DELETE_STORAGE_GROUP_PROCEDURE, + ADD_CONFIG_NODE_PROCEDURE } private static class ProcedureFactoryHolder { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureStore.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureStore.java similarity index 89% rename from procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureStore.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureStore.java index 1ca2c33cc072..9700675728a8 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureStore.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureStore.java @@ -17,12 +17,11 @@ * under the License. */ -package org.apache.iotdb.procedure.store; +package org.apache.iotdb.confignode.procedure.store; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.utils.TestOnly; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.conf.ProcedureNodeConfigDescriptor; -import org.apache.iotdb.procedure.conf.ProcedureNodeConstant; +import org.apache.iotdb.confignode.procedure.Procedure; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; @@ -40,8 +39,9 @@ public class ProcedureStore implements IProcedureStore { private static final Logger LOG = LoggerFactory.getLogger(ProcedureStore.class); private String procedureWalDir = - ProcedureNodeConfigDescriptor.getInstance().getConf().getProcedureWalDir(); + CommonDescriptor.getInstance().getConfig().getProcedureWalFolder(); private final ConcurrentHashMap procWALMap = new ConcurrentHashMap<>(); + public static final String PROCEDURE_WAL_SUFFIX = ".proc.wal"; private final IProcedureFactory procedureFactory; private volatile boolean isRunning = false; @@ -81,11 +81,7 @@ public void setRunning(boolean running) { public void load(List procedureList) { try { Files.list(Paths.get(procedureWalDir)) - .filter( - path -> - path.getFileName() - .toString() - .endsWith(ProcedureNodeConstant.PROCEDURE_WAL_SUFFIX)) + .filter(path -> path.getFileName().toString().endsWith(PROCEDURE_WAL_SUFFIX)) .sorted( (p1, p2) -> Long.compareUnsigned( @@ -116,7 +112,7 @@ public void update(Procedure procedure) { return; } long procId = procedure.getProcId(); - Path path = Paths.get(procedureWalDir, procId + ProcedureNodeConstant.PROCEDURE_WAL_SUFFIX); + Path path = Paths.get(procedureWalDir, procId + ProcedureStore.PROCEDURE_WAL_SUFFIX); ProcedureWAL procedureWAL = procWALMap.computeIfAbsent(procId, id -> new ProcedureWAL(path, procedureFactory)); try { diff --git a/procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureWAL.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureWAL.java similarity index 85% rename from procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureWAL.java rename to confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureWAL.java index 2960fb18c131..b37e32d27c11 100644 --- a/procedure/src/main/java/org/apache/iotdb/procedure/store/ProcedureWAL.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/store/ProcedureWAL.java @@ -17,13 +17,15 @@ * under the License. */ -package org.apache.iotdb.procedure.store; +package org.apache.iotdb.confignode.procedure.store; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.tsfile.utils.PublicBAOS; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; @@ -60,11 +62,11 @@ public void save(Procedure procedure) throws IOException { Files.deleteIfExists(walTmpPath); Files.createFile(walTmpPath); try (FileOutputStream fos = new FileOutputStream(walTmp); - FileChannel channel = fos.getChannel()) { - ByteBuffer byteBuffer = ByteBuffer.allocate(PROCEDURE_WAL_BUFFER_SIZE); - procedure.serialize(byteBuffer); - byteBuffer.flip(); - channel.write(byteBuffer); + FileChannel channel = fos.getChannel(); + PublicBAOS publicBAOS = new PublicBAOS(); + DataOutputStream dataOutputStream = new DataOutputStream(publicBAOS)) { + procedure.serialize(dataOutputStream); + channel.write(ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size())); } Files.deleteIfExists(walFilePath); Files.move(walTmpPath, walFilePath); diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java b/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java index 8a431f25229c..a12b2398b7e2 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNode.java @@ -18,13 +18,24 @@ */ package org.apache.iotdb.confignode.service; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.exception.BadNodeUrlException; import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.service.JMXService; import org.apache.iotdb.commons.service.RegisterManager; +import org.apache.iotdb.commons.udf.service.UDFClassLoaderManager; +import org.apache.iotdb.commons.udf.service.UDFExecutableManager; +import org.apache.iotdb.commons.udf.service.UDFRegistrationService; +import org.apache.iotdb.commons.utils.NodeUrlUtils; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeConstant; +import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; +import org.apache.iotdb.confignode.conf.ConfigNodeRemoveCheck; import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.service.thrift.ConfigNodeRPCService; import org.apache.iotdb.confignode.service.thrift.ConfigNodeRPCServiceProcessor; +import org.apache.iotdb.db.service.metrics.MetricsService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,15 +52,19 @@ public class ConfigNode implements ConfigNodeMBean { private final RegisterManager registerManager = new RegisterManager(); - private final ConfigNodeRPCService configNodeRPCService; - private final ConfigNodeRPCServiceProcessor configNodeRPCServiceProcessor; + private ConfigNodeRPCService configNodeRPCService; + private ConfigNodeRPCServiceProcessor configNodeRPCServiceProcessor; private ConfigManager configManager; private ConfigNode() { + // we do not init anything here, so that we can re-initialize the instance in IT. + } + + private void initConfigManager() { // Init ConfigManager try { - this.configManager = new ConfigManager(); + configManager = new ConfigManager(); } catch (IOException e) { LOGGER.error("Can't start ConfigNode consensus group!", e); try { @@ -61,8 +76,8 @@ private ConfigNode() { } // Init RPC service - this.configNodeRPCService = new ConfigNodeRPCService(); - this.configNodeRPCServiceProcessor = new ConfigNodeRPCServiceProcessor(configManager); + configNodeRPCService = new ConfigNodeRPCService(); + configNodeRPCServiceProcessor = new ConfigNodeRPCServiceProcessor(configManager); } public static void main(String[] args) { @@ -72,12 +87,33 @@ public static void main(String[] args) { /** Register services */ private void setUp() throws StartupException, IOException { LOGGER.info("Setting up {}...", ConfigNodeConstant.GLOBAL_NAME); + // Init ConfigManager + initConfigManager(); + registerManager.register(new JMXService()); JMXService.registerMBean(this, mbeanName); + registerManager.register(MetricsService.getInstance()); + configManager.addMetrics(); + registerUdfServices(); + configNodeRPCService.initSyncedServiceImpl(configNodeRPCServiceProcessor); registerManager.register(configNodeRPCService); LOGGER.info("Init rpc server success"); + + // start reporter + MetricsService.getInstance().startAllReporter(); + } + + private void registerUdfServices() throws StartupException { + final ConfigNodeConfig configNodeConfig = ConfigNodeDescriptor.getInstance().getConf(); + registerManager.register( + UDFExecutableManager.setupAndGetInstance( + configNodeConfig.getTemporaryLibDir(), configNodeConfig.getUdfLibDir())); + registerManager.register( + UDFClassLoaderManager.setupAndGetInstance(configNodeConfig.getUdfLibDir())); + registerManager.register( + UDFRegistrationService.setupAndGetInstance(configNodeConfig.getSystemUdfDir())); } public void active() { @@ -101,7 +137,9 @@ public void deactivate() throws IOException { LOGGER.info("Deactivating {}...", ConfigNodeConstant.GLOBAL_NAME); registerManager.deregisterAll(); JMXService.deregisterMBean(mbeanName); - configManager.close(); + if (configManager != null) { + configManager.close(); + } LOGGER.info("{} is deactivated.", ConfigNodeConstant.GLOBAL_NAME); } @@ -109,6 +147,29 @@ public void stop() throws IOException { deactivate(); } + public void doRemoveNode(String[] args) throws IOException { + LOGGER.info("Starting to remove {}...", ConfigNodeConstant.GLOBAL_NAME); + if (args.length != 3) { + LOGGER.info("Usage: -r :"); + return; + } + + try { + TEndPoint endPoint = NodeUrlUtils.parseTEndPointUrl(args[2]); + TConfigNodeLocation removeConfigNodeLocation = + ConfigNodeRemoveCheck.getInstance().removeCheck(endPoint); + if (removeConfigNodeLocation == null) { + LOGGER.error("The ConfigNode not in the Cluster."); + return; + } + + ConfigNodeRemoveCheck.getInstance().removeConfigNode(removeConfigNodeLocation); + } catch (BadNodeUrlException e) { + LOGGER.warn("No ConfigNodes need to be removed.", e); + } + LOGGER.info("{} is removed.", ConfigNodeConstant.GLOBAL_NAME); + } + private static class ConfigNodeHolder { private static final ConfigNode INSTANCE = new ConfigNode(); diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java b/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java index c66e861274c4..d3e1a63b9b48 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/service/ConfigNodeCommandLine.java @@ -73,7 +73,13 @@ protected int run(String[] args) { } ConfigNode.getInstance().active(); } else if (MODE_REMOVE.equals(mode)) { - // TODO: remove node + // remove node + try { + ConfigNode.getInstance().doRemoveNode(args); + } catch (IOException e) { + LOGGER.error("Meet error when doing remove", e); + return -1; + } } return 0; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCService.java b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCService.java index 7f73117c61ae..8ba2b961ecf1 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCService.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCService.java @@ -24,14 +24,14 @@ import org.apache.iotdb.commons.service.ServiceType; import org.apache.iotdb.commons.service.ThriftService; import org.apache.iotdb.commons.service.ThriftServiceThread; -import org.apache.iotdb.confignode.conf.ConfigNodeConf; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.rpc.thrift.ConfigIService; /** ConfigNodeRPCServer exposes the interface that interacts with the DataNode */ public class ConfigNodeRPCService extends ThriftService implements ConfigNodeRPCServiceMBean { - private static final ConfigNodeConf conf = ConfigNodeDescriptor.getInstance().getConf(); + private static final ConfigNodeConfig conf = ConfigNodeDescriptor.getInstance().getConf(); private ConfigNodeRPCServiceProcessor configNodeRPCServiceProcessor; diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java index 2abff56aaac5..3527afa64330 100644 --- a/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java +++ b/confignode/src/main/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessor.java @@ -18,10 +18,17 @@ */ package org.apache.iotdb.confignode.service.thrift; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TFlushReq; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.conf.CommonDescriptor; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.confignode.client.SyncConfigNodeClientPool; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.consensus.request.ConfigRequestType; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; @@ -29,9 +36,10 @@ import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; -import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; @@ -39,28 +47,34 @@ import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.response.CountStorageGroupResp; import org.apache.iotdb.confignode.consensus.response.DataNodeConfigurationResp; -import org.apache.iotdb.confignode.consensus.response.DataNodeLocationsResp; -import org.apache.iotdb.confignode.consensus.response.DataPartitionResp; +import org.apache.iotdb.confignode.consensus.response.DataNodeInfosResp; import org.apache.iotdb.confignode.consensus.response.PermissionInfoResp; -import org.apache.iotdb.confignode.consensus.response.SchemaPartitionResp; +import org.apache.iotdb.confignode.consensus.response.RegionInfoListResp; import org.apache.iotdb.confignode.consensus.response.StorageGroupSchemaResp; import org.apache.iotdb.confignode.manager.ConfigManager; +import org.apache.iotdb.confignode.manager.ConsensusManager; import org.apache.iotdb.confignode.rpc.thrift.ConfigIService; import org.apache.iotdb.confignode.rpc.thrift.TAuthorizerReq; import org.apache.iotdb.confignode.rpc.thrift.TAuthorizerResp; import org.apache.iotdb.confignode.rpc.thrift.TCheckUserPrivilegesReq; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.confignode.rpc.thrift.TClusterNodeInfos; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeRegisterResp; import org.apache.iotdb.confignode.rpc.thrift.TCountStorageGroupResp; -import org.apache.iotdb.confignode.rpc.thrift.TDataNodeLocationResp; +import org.apache.iotdb.confignode.rpc.thrift.TCreateFunctionReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeActiveReq; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterResp; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionResp; import org.apache.iotdb.confignode.rpc.thrift.TDeleteStorageGroupReq; import org.apache.iotdb.confignode.rpc.thrift.TDeleteStorageGroupsReq; +import org.apache.iotdb.confignode.rpc.thrift.TDropFunctionReq; import org.apache.iotdb.confignode.rpc.thrift.TLoginReq; +import org.apache.iotdb.confignode.rpc.thrift.TPermissionInfoResp; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementReq; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionResp; import org.apache.iotdb.confignode.rpc.thrift.TSetDataReplicationFactorReq; @@ -68,10 +82,16 @@ import org.apache.iotdb.confignode.rpc.thrift.TSetStorageGroupReq; import org.apache.iotdb.confignode.rpc.thrift.TSetTTLReq; import org.apache.iotdb.confignode.rpc.thrift.TSetTimePartitionIntervalReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowRegionReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowRegionResp; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchemaResp; +import org.apache.iotdb.confignode.service.ConfigNode; +import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; import org.apache.iotdb.db.mpp.common.schematree.PathPatternTree; import org.apache.iotdb.db.qp.logical.sys.AuthorOperator; +import org.apache.iotdb.rpc.RpcUtils; +import org.apache.iotdb.rpc.TSStatusCode; import org.apache.thrift.TException; import org.slf4j.Logger; @@ -79,9 +99,9 @@ import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.stream.Collectors; /** ConfigNodeRPCServer exposes the interface that interacts with the DataNode */ public class ConfigNodeRPCServiceProcessor implements ConfigIService.Iface { @@ -99,9 +119,14 @@ public void close() throws IOException { configManager.close(); } + @TestOnly + public ConsensusManager getConsensusManager() { + return configManager.getConsensusManager(); + } + @Override public TDataNodeRegisterResp registerDataNode(TDataNodeRegisterReq req) throws TException { - RegisterDataNodeReq registerReq = new RegisterDataNodeReq(req.getDataNodeLocation()); + RegisterDataNodeReq registerReq = new RegisterDataNodeReq(req.getDataNodeInfo()); DataNodeConfigurationResp registerResp = (DataNodeConfigurationResp) configManager.registerDataNode(registerReq); @@ -115,21 +140,41 @@ public TDataNodeRegisterResp registerDataNode(TDataNodeRegisterReq req) throws T } @Override - public TDataNodeLocationResp getDataNodeLocations(int dataNodeID) throws TException { + public TSStatus activeDataNode(TDataNodeActiveReq req) throws TException { + // TODO: implement active data node + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()); + } + + @Override + public TDataNodeInfoResp getDataNodeInfo(int dataNodeID) throws TException { GetDataNodeInfoReq queryReq = new GetDataNodeInfoReq(dataNodeID); - DataNodeLocationsResp queryResp = - (DataNodeLocationsResp) configManager.getDataNodeInfo(queryReq); + DataNodeInfosResp queryResp = (DataNodeInfosResp) configManager.getDataNodeInfo(queryReq); - TDataNodeLocationResp resp = new TDataNodeLocationResp(); + TDataNodeInfoResp resp = new TDataNodeInfoResp(); queryResp.convertToRpcDataNodeLocationResp(resp); return resp; } + @Override + public TClusterNodeInfos getAllClusterNodeInfos() throws TException { + List configNodeLocations = + configManager.getNodeManager().getOnlineConfigNodes(); + List dataNodeInfoLocations = + configManager.getNodeManager().getOnlineDataNodes(-1).stream() + .map(TDataNodeInfo::getLocation) + .collect(Collectors.toList()); + + return new TClusterNodeInfos( + new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()), + configNodeLocations, + dataNodeInfoLocations); + } + @Override public TSStatus setStorageGroup(TSetStorageGroupReq req) throws TException { TStorageGroupSchema storageGroupSchema = req.getStorageGroup(); - // Set default configurations + // Set default configurations if necessary if (!storageGroupSchema.isSetTTL()) { storageGroupSchema.setTTL(CommonDescriptor.getInstance().getConfig().getDefaultTTL()); } @@ -145,18 +190,10 @@ public TSStatus setStorageGroup(TSetStorageGroupReq req) throws TException { storageGroupSchema.setTimePartitionInterval( ConfigNodeDescriptor.getInstance().getConf().getTimePartitionInterval()); } - if (!storageGroupSchema.isSetMaximumSchemaRegionCount()) { - storageGroupSchema.setMaximumSchemaRegionCount( - ConfigNodeDescriptor.getInstance().getConf().getMaximumSchemaRegionCount()); - } - if (!storageGroupSchema.isSetMaximumDataRegionCount()) { - storageGroupSchema.setMaximumDataRegionCount( - ConfigNodeDescriptor.getInstance().getConf().getMaximumDataRegionCount()); - } - // Initialize RegionGroupId List - storageGroupSchema.setSchemaRegionGroupIds(new ArrayList<>()); - storageGroupSchema.setDataRegionGroupIds(new ArrayList<>()); + // Initialize the maxSchemaRegionGroupCount and maxDataRegionGroupCount as 0 + storageGroupSchema.setMaxSchemaRegionGroupCount(0); + storageGroupSchema.setMaxDataRegionGroupCount(0); SetStorageGroupReq setReq = new SetStorageGroupReq(storageGroupSchema); TSStatus resp = configManager.setStorageGroup(setReq); @@ -232,12 +269,7 @@ public TStorageGroupSchemaResp getMatchedStorageGroupSchemas(List storag public TSchemaPartitionResp getSchemaPartition(TSchemaPartitionReq req) throws TException { PathPatternTree patternTree = PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); - SchemaPartitionResp schemaResp = - (SchemaPartitionResp) configManager.getSchemaPartition(patternTree); - - TSchemaPartitionResp resp = new TSchemaPartitionResp(); - schemaResp.convertToRpcSchemaPartitionResp(resp); - return resp; + return configManager.getSchemaPartition(patternTree); } @Override @@ -245,36 +277,30 @@ public TSchemaPartitionResp getOrCreateSchemaPartition(TSchemaPartitionReq req) throws TException { PathPatternTree patternTree = PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); - SchemaPartitionResp dataResp = - (SchemaPartitionResp) configManager.getOrCreateSchemaPartition(patternTree); + return configManager.getOrCreateSchemaPartition(patternTree); + } - TSchemaPartitionResp resp = new TSchemaPartitionResp(); - dataResp.convertToRpcSchemaPartitionResp(resp); - return resp; + @Override + public TSchemaNodeManagementResp getSchemaNodeManagementPartition(TSchemaNodeManagementReq req) + throws TException { + PathPatternTree patternTree = + PathPatternTree.deserialize(ByteBuffer.wrap(req.getPathPatternTree())); + PartialPath partialPath = patternTree.getAllPathPatterns().get(0); + return configManager.getNodePathsPartition(partialPath, req.getLevel()); } @Override public TDataPartitionResp getDataPartition(TDataPartitionReq req) throws TException { GetDataPartitionReq getDataPartitionReq = new GetDataPartitionReq(); getDataPartitionReq.convertFromRpcTDataPartitionReq(req); - DataPartitionResp dataResp = - (DataPartitionResp) configManager.getDataPartition(getDataPartitionReq); - - TDataPartitionResp resp = new TDataPartitionResp(); - dataResp.convertToRpcDataPartitionResp(resp); - return resp; + return configManager.getDataPartition(getDataPartitionReq); } @Override public TDataPartitionResp getOrCreateDataPartition(TDataPartitionReq req) throws TException { GetOrCreateDataPartitionReq getOrCreateDataPartitionReq = new GetOrCreateDataPartitionReq(); getOrCreateDataPartitionReq.convertFromRpcTDataPartitionReq(req); - DataPartitionResp dataResp = - (DataPartitionResp) configManager.getOrCreateDataPartition(getOrCreateDataPartitionReq); - - TDataPartitionResp resp = new TDataPartitionResp(); - dataResp.convertToRpcDataPartitionResp(resp); - return resp; + return configManager.getOrCreateDataPartition(getOrCreateDataPartitionReq); } @Override @@ -323,16 +349,18 @@ public TAuthorizerResp queryPermission(TAuthorizerReq req) throws TException { LOGGER.error(e.getMessage()); } PermissionInfoResp dataSet = (PermissionInfoResp) configManager.queryPermission(plan); - return new TAuthorizerResp(dataSet.getStatus(), dataSet.getPermissionInfo()); + TAuthorizerResp resp = new TAuthorizerResp(dataSet.getStatus()); + resp.setAuthorizerInfo(dataSet.getPermissionInfo()); + return resp; } @Override - public TSStatus login(TLoginReq req) throws TException { + public TPermissionInfoResp login(TLoginReq req) throws TException { return configManager.login(req.getUserrname(), req.getPassword()); } @Override - public TSStatus checkUserPrivileges(TCheckUserPrivilegesReq req) throws TException { + public TPermissionInfoResp checkUserPrivileges(TCheckUserPrivilegesReq req) throws TException { return configManager.checkUserPrivileges( req.getUsername(), req.getPaths(), req.getPermission()); } @@ -348,16 +376,103 @@ public TConfigNodeRegisterResp registerConfigNode(TConfigNodeRegisterReq req) th } @Override - public TSStatus applyConfigNode(TConfigNodeLocation configNodeLocation) throws TException { - ApplyConfigNodeReq applyConfigNodeReq = new ApplyConfigNodeReq(configNodeLocation); - TSStatus status = configManager.applyConfigNode(applyConfigNodeReq); + public TSStatus addConsensusGroup(TConfigNodeRegisterResp registerResp) { + return configManager.addConsensusGroup(registerResp.getConfigNodeList()); + } - // Print log to record the ConfigNode that performs the ApplyConfigNodeRequest - LOGGER.info("Execute ApplyConfigNodeRequest {} with result {}", configNodeLocation, status); + /** + * For leader to remove ConfigNode configuration in consensus layer + * + * @param configNodeLocation + * @return + */ + @Override + public TSStatus removeConfigNode(TConfigNodeLocation configNodeLocation) throws TException { + RemoveConfigNodeReq removeConfigNodeReq = new RemoveConfigNodeReq(configNodeLocation); + + TSStatus status = configManager.removeConfigNode(removeConfigNodeReq); + if (status.getCode() == TSStatusCode.SUCCESS_STATUS.getStatusCode()) { + status = SyncConfigNodeClientPool.getInstance().stopConfigNode(configNodeLocation); + } + + // Print log to record the ConfigNode that performs the RemoveConfigNodeRequest + LOGGER.info("Execute RemoveConfigNodeRequest {} with result {}", configNodeLocation, status); return status; } + /** + * For leader to stop ConfigNode + * + * @param configNodeLocation + * @return + */ + @Override + public TSStatus stopConfigNode(TConfigNodeLocation configNodeLocation) throws TException { + if (!configManager.getNodeManager().getOnlineConfigNodes().contains(configNodeLocation)) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage("Stop ConfigNode failed because the ConfigNode not in current Cluster."); + } + + ConsensusGroupId groupId = configManager.getConsensusManager().getConsensusGroupId(); + ConsensusGenericResponse resp = + configManager.getConsensusManager().getConsensusImpl().removeConsensusGroup(groupId); + if (!resp.isSuccess()) { + return new TSStatus(TSStatusCode.REMOVE_CONFIGNODE_FAILED.getStatusCode()) + .setMessage("Stop ConfigNode failed because remove ConsensusGroup failed."); + } + + new Thread( + () -> { + try { + ConfigNode.getInstance().stop(); + System.exit(0); + } catch (IOException e) { + LOGGER.error("Meet error when stop ConfigNode!", e); + } + }) + .start(); + return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode()) + .setMessage("Stop ConfigNode success."); + } + + @Override + public TSStatus createFunction(TCreateFunctionReq req) { + return configManager.createFunction(req.getUdfName(), req.getClassName(), req.getUris()); + } + + @Override + public TSStatus dropFunction(TDropFunctionReq req) throws TException { + return configManager.dropFunction(req.getUdfName()); + } + + @Override + public TSStatus flush(TFlushReq req) throws TException { + if (req.storageGroups != null) { + List noExistSg = + configManager.checkStorageGroupExist(PartialPath.fromStringList(req.storageGroups)); + if (!noExistSg.isEmpty()) { + StringBuilder sb = new StringBuilder(); + noExistSg.forEach(storageGroup -> sb.append(storageGroup.getFullPath()).append(",")); + return RpcUtils.getStatus( + TSStatusCode.STORAGE_GROUP_NOT_EXIST, + "storageGroup " + sb.subSequence(0, sb.length() - 1) + " does not exist"); + } + } + return configManager.flush(req); + } + + @Override + public TShowRegionResp showRegion(TShowRegionReq showRegionReq) throws TException { + GetRegionInfoListReq getRegionsinfoReq = + new GetRegionInfoListReq(showRegionReq.getConsensusGroupType()); + RegionInfoListResp dataSet = (RegionInfoListResp) configManager.showRegion(getRegionsinfoReq); + TShowRegionResp showRegionResp = new TShowRegionResp(); + showRegionResp.setStatus(dataSet.getStatus()); + showRegionResp.setRegionInfoList(dataSet.getRegionInfoList()); + return showRegionResp; + } + public void handleClientExit() {} // TODO: Interfaces for data operations diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestSerDeTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestSerDeTest.java index 9f6e10a14ceb..b1fdb3a05c71 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestSerDeTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/consensus/request/ConfigRequestSerDeTest.java @@ -18,8 +18,10 @@ */ package org.apache.iotdb.confignode.consensus.request; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; @@ -27,14 +29,19 @@ import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; import org.apache.iotdb.commons.auth.AuthException; import org.apache.iotdb.commons.auth.entity.PrivilegeType; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.commons.partition.SeriesPartitionTable; import org.apache.iotdb.confignode.consensus.request.auth.AuthorReq; import org.apache.iotdb.confignode.consensus.request.read.CountStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataNodeInfoReq; import org.apache.iotdb.confignode.consensus.request.read.GetDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetOrCreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.read.GetSchemaPartitionReq; import org.apache.iotdb.confignode.consensus.request.read.GetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.request.write.AdjustMaxRegionGroupCountReq; import org.apache.iotdb.confignode.consensus.request.write.ApplyConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; @@ -43,23 +50,22 @@ import org.apache.iotdb.confignode.consensus.request.write.DeleteRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.DeleteStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; +import org.apache.iotdb.confignode.consensus.request.write.RemoveConfigNodeReq; import org.apache.iotdb.confignode.consensus.request.write.SetDataReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetSchemaReplicationFactorReq; import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; import org.apache.iotdb.confignode.consensus.request.write.SetTTLReq; import org.apache.iotdb.confignode.consensus.request.write.SetTimePartitionIntervalReq; import org.apache.iotdb.confignode.consensus.request.write.UpdateProcedureReq; -import org.apache.iotdb.confignode.procedure.DeleteStorageGroupProcedure; -import org.apache.iotdb.confignode.rpc.thrift.TConfigNodeLocation; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.impl.DeleteStorageGroupProcedure; import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; -import org.apache.iotdb.procedure.Procedure; +import org.apache.iotdb.tsfile.utils.Pair; -import org.junit.After; import org.junit.Assert; import org.junit.Test; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -71,34 +77,32 @@ public class ConfigRequestSerDeTest { - private final ByteBuffer buffer = ByteBuffer.allocate(10240); - - @After - public void cleanBuffer() { - buffer.clear(); - } - @Test public void RegisterDataNodeReqTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); dataNodeLocation.setDataNodeId(1); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 7777)); - RegisterDataNodeReq req0 = new RegisterDataNodeReq(dataNodeLocation); - req0.serialize(buffer); - buffer.flip(); - RegisterDataNodeReq req1 = (RegisterDataNodeReq) ConfigRequest.Factory.create(buffer); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010)); + + TDataNodeInfo dataNodeInfo = new TDataNodeInfo(); + dataNodeInfo.setLocation(dataNodeLocation); + dataNodeInfo.setCpuCoreNum(16); + dataNodeInfo.setMaxMemory(34359738368L); + + RegisterDataNodeReq req0 = new RegisterDataNodeReq(dataNodeInfo); + RegisterDataNodeReq req1 = + (RegisterDataNodeReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void QueryDataNodeInfoReqTest() throws IOException { GetDataNodeInfoReq req0 = new GetDataNodeInfoReq(-1); - req0.serialize(buffer); - buffer.flip(); - GetDataNodeInfoReq req1 = (GetDataNodeInfoReq) ConfigRequest.Factory.create(buffer); + GetDataNodeInfoReq req1 = + (GetDataNodeInfoReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -111,84 +115,76 @@ public void SetStorageGroupReqTest() throws IOException { .setTTL(Long.MAX_VALUE) .setSchemaReplicationFactor(3) .setDataReplicationFactor(3) - .setTimePartitionInterval(604800) - .setSchemaRegionGroupIds(new ArrayList<>()) - .setDataRegionGroupIds(new ArrayList<>())); - req0.serialize(buffer); - buffer.flip(); - SetStorageGroupReq req1 = (SetStorageGroupReq) ConfigRequest.Factory.create(buffer); + .setTimePartitionInterval(604800)); + SetStorageGroupReq req1 = + (SetStorageGroupReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void DeleteStorageGroupReqTest() throws IOException { - TStorageGroupSchema storageGroupSchema = new TStorageGroupSchema(); - storageGroupSchema.setName("root.sg"); - storageGroupSchema.addToSchemaRegionGroupIds( - new TConsensusGroupId(TConsensusGroupType.DataRegion, 1)); - storageGroupSchema.addToSchemaRegionGroupIds( - new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 2)); - DeleteStorageGroupReq req0 = new DeleteStorageGroupReq(storageGroupSchema); - req0.serialize(buffer); - buffer.flip(); - DeleteStorageGroupReq req1 = (DeleteStorageGroupReq) ConfigRequest.Factory.create(buffer); + DeleteStorageGroupReq req0 = new DeleteStorageGroupReq("root.sg"); + DeleteStorageGroupReq req1 = + (DeleteStorageGroupReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void SetTTLReqTest() throws IOException { SetTTLReq req0 = new SetTTLReq("root.sg0", Long.MAX_VALUE); - req0.serialize(buffer); - buffer.flip(); - SetTTLReq req1 = (SetTTLReq) ConfigRequest.Factory.create(buffer); + SetTTLReq req1 = (SetTTLReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void SetSchemaReplicationFactorReqTest() throws IOException { SetSchemaReplicationFactorReq req0 = new SetSchemaReplicationFactorReq("root.sg0", 3); - req0.serialize(buffer); - buffer.flip(); SetSchemaReplicationFactorReq req1 = - (SetSchemaReplicationFactorReq) ConfigRequest.Factory.create(buffer); + (SetSchemaReplicationFactorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void SetDataReplicationFactorReqTest() throws IOException { SetDataReplicationFactorReq req0 = new SetDataReplicationFactorReq("root.sg0", 3); - req0.serialize(buffer); - buffer.flip(); SetDataReplicationFactorReq req1 = - (SetDataReplicationFactorReq) ConfigRequest.Factory.create(buffer); + (SetDataReplicationFactorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void SetTimePartitionIntervalReqTest() throws IOException { SetTimePartitionIntervalReq req0 = new SetTimePartitionIntervalReq("root.sg0", 6048000L); - req0.serialize(buffer); - buffer.flip(); SetTimePartitionIntervalReq req1 = - (SetTimePartitionIntervalReq) ConfigRequest.Factory.create(buffer); + (SetTimePartitionIntervalReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); + Assert.assertEquals(req0, req1); + } + + @Test + public void AdjustMaxRegionGroupCountReqTest() throws IOException { + AdjustMaxRegionGroupCountReq req0 = new AdjustMaxRegionGroupCountReq(); + for (int i = 0; i < 3; i++) { + req0.putEntry("root.sg" + i, new Pair<>(i, i)); + } + + AdjustMaxRegionGroupCountReq req1 = + (AdjustMaxRegionGroupCountReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void CountStorageGroupReqTest() throws IOException { CountStorageGroupReq req0 = new CountStorageGroupReq(Arrays.asList("root", "sg")); - req0.serialize(buffer); - buffer.flip(); - CountStorageGroupReq req1 = (CountStorageGroupReq) ConfigRequest.Factory.create(buffer); + CountStorageGroupReq req1 = + (CountStorageGroupReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void GetStorageGroupReqTest() throws IOException { GetStorageGroupReq req0 = new GetStorageGroupReq(Arrays.asList("root", "sg")); - req0.serialize(buffer); - buffer.flip(); - CountStorageGroupReq req1 = (CountStorageGroupReq) ConfigRequest.Factory.create(buffer); + CountStorageGroupReq req1 = + (CountStorageGroupReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -201,37 +197,36 @@ public void DeleteStorageGroupPlanTest() { public void CreateRegionsPlanTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); dataNodeLocation.setDataNodeId(0); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010)); CreateRegionsReq req0 = new CreateRegionsReq(); TRegionReplicaSet dataRegionSet = new TRegionReplicaSet(); dataRegionSet.setRegionId(new TConsensusGroupId(TConsensusGroupType.DataRegion, 0)); dataRegionSet.setDataNodeLocations(Collections.singletonList(dataNodeLocation)); - req0.addRegion("root.sg0", dataRegionSet); + req0.addRegionGroup("root.sg0", dataRegionSet); TRegionReplicaSet schemaRegionSet = new TRegionReplicaSet(); schemaRegionSet.setRegionId(new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1)); schemaRegionSet.setDataNodeLocations(Collections.singletonList(dataNodeLocation)); - req0.addRegion("root.sg1", schemaRegionSet); + req0.addRegionGroup("root.sg1", schemaRegionSet); - req0.serialize(buffer); - buffer.flip(); - CreateRegionsReq req1 = (CreateRegionsReq) ConfigRequest.Factory.create(buffer); + CreateRegionsReq req1 = + (CreateRegionsReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @Test public void DeleteRegionsPlanTest() throws IOException { DeleteRegionsReq req0 = new DeleteRegionsReq(); - req0.addConsensusGroupId(new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 0)); - req0.addConsensusGroupId(new TConsensusGroupId(TConsensusGroupType.DataRegion, 1)); + req0.addDeleteRegion("sg", new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 0)); + req0.addDeleteRegion("sg", new TConsensusGroupId(TConsensusGroupType.DataRegion, 1)); - req0.serialize(buffer); - buffer.flip(); - DeleteRegionsReq req1 = (DeleteRegionsReq) ConfigRequest.Factory.create(buffer); + DeleteRegionsReq req1 = + (DeleteRegionsReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -239,27 +234,25 @@ public void DeleteRegionsPlanTest() throws IOException { public void CreateSchemaPartitionPlanTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); dataNodeLocation.setDataNodeId(0); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010)); String storageGroup = "root.sg0"; TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); - TRegionReplicaSet regionReplicaSet = new TRegionReplicaSet(); - regionReplicaSet.setRegionId(new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 0)); - regionReplicaSet.setDataNodeLocations(Collections.singletonList(dataNodeLocation)); + TConsensusGroupId consensusGroupId = new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 0); - Map> assignedSchemaPartition = - new HashMap<>(); - assignedSchemaPartition.put(storageGroup, new HashMap<>()); - assignedSchemaPartition.get(storageGroup).put(seriesPartitionSlot, regionReplicaSet); + Map assignedSchemaPartition = new HashMap<>(); + Map schemaPartitionMap = new HashMap<>(); + schemaPartitionMap.put(seriesPartitionSlot, consensusGroupId); + assignedSchemaPartition.put(storageGroup, new SchemaPartitionTable(schemaPartitionMap)); CreateSchemaPartitionReq req0 = new CreateSchemaPartitionReq(); req0.setAssignedSchemaPartition(assignedSchemaPartition); - req0.serialize(buffer); - buffer.flip(); - CreateSchemaPartitionReq req1 = (CreateSchemaPartitionReq) ConfigRequest.Factory.create(buffer); + CreateSchemaPartitionReq req1 = + (CreateSchemaPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -273,9 +266,8 @@ public void GetSchemaPartitionPlanTest() throws IOException { GetSchemaPartitionReq req0 = new GetSchemaPartitionReq(); req0.setPartitionSlotsMap(partitionSlotsMap); - req0.serialize(buffer); - buffer.flip(); - GetSchemaPartitionReq req1 = (GetSchemaPartitionReq) ConfigRequest.Factory.create(buffer); + GetSchemaPartitionReq req1 = + (GetSchemaPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -289,10 +281,8 @@ public void GetOrCreateSchemaPartitionPlanTest() throws IOException { GetOrCreateSchemaPartitionReq req0 = new GetOrCreateSchemaPartitionReq(); req0.setPartitionSlotsMap(partitionSlotsMap); - req0.serialize(buffer); - buffer.flip(); GetOrCreateSchemaPartitionReq req1 = - (GetOrCreateSchemaPartitionReq) ConfigRequest.Factory.create(buffer); + (GetOrCreateSchemaPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -300,10 +290,11 @@ public void GetOrCreateSchemaPartitionPlanTest() throws IOException { public void CreateDataPartitionPlanTest() throws IOException { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); dataNodeLocation.setDataNodeId(0); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010)); String storageGroup = "root.sg0"; TSeriesPartitionSlot seriesPartitionSlot = new TSeriesPartitionSlot(10); @@ -312,25 +303,20 @@ public void CreateDataPartitionPlanTest() throws IOException { regionReplicaSet.setRegionId(new TConsensusGroupId(TConsensusGroupType.DataRegion, 0)); regionReplicaSet.setDataNodeLocations(Collections.singletonList(dataNodeLocation)); - Map>>> - assignedDataPartition = new HashMap<>(); - assignedDataPartition.put(storageGroup, new HashMap<>()); - assignedDataPartition.get(storageGroup).put(seriesPartitionSlot, new HashMap<>()); - assignedDataPartition - .get(storageGroup) - .get(seriesPartitionSlot) - .put(timePartitionSlot, new ArrayList<>()); - assignedDataPartition - .get(storageGroup) - .get(seriesPartitionSlot) - .get(timePartitionSlot) - .add(regionReplicaSet); + Map assignedDataPartition = new HashMap<>(); + Map dataPartitionMap = new HashMap<>(); + Map> seriesPartitionMap = new HashMap<>(); + + seriesPartitionMap.put( + timePartitionSlot, + Collections.singletonList(new TConsensusGroupId(TConsensusGroupType.DataRegion, 0))); + dataPartitionMap.put(seriesPartitionSlot, new SeriesPartitionTable(seriesPartitionMap)); + assignedDataPartition.put(storageGroup, new DataPartitionTable(dataPartitionMap)); CreateDataPartitionReq req0 = new CreateDataPartitionReq(); req0.setAssignedDataPartition(assignedDataPartition); - req0.serialize(buffer); - buffer.flip(); - CreateDataPartitionReq req1 = (CreateDataPartitionReq) ConfigRequest.Factory.create(buffer); + CreateDataPartitionReq req1 = + (CreateDataPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -348,9 +334,8 @@ public void GetDataPartitionPlanTest() throws IOException { GetDataPartitionReq req0 = new GetDataPartitionReq(); req0.setPartitionSlotsMap(partitionSlotsMap); - req0.serialize(buffer); - buffer.flip(); - GetDataPartitionReq req1 = (GetDataPartitionReq) ConfigRequest.Factory.create(buffer); + GetDataPartitionReq req1 = + (GetDataPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -368,10 +353,8 @@ public void GetOrCreateDataPartitionPlanTest() throws IOException { GetOrCreateDataPartitionReq req0 = new GetOrCreateDataPartitionReq(); req0.setPartitionSlotsMap(partitionSlotsMap); - req0.serialize(buffer); - buffer.flip(); GetOrCreateDataPartitionReq req1 = - (GetOrCreateDataPartitionReq) ConfigRequest.Factory.create(buffer); + (GetOrCreateDataPartitionReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -388,38 +371,26 @@ public void AuthorReqTest() throws IOException, AuthException { req0 = new AuthorReq( ConfigRequestType.CreateUser, "thulab", "", "passwd", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // create role req0 = new AuthorReq(ConfigRequestType.CreateRole, "", "admin", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // alter user req0 = new AuthorReq( ConfigRequestType.UpdateUser, "tempuser", "", "", "newpwd", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // grant user req0 = new AuthorReq(ConfigRequestType.GrantUser, "tempuser", "", "", "", permissions, "root.ln"); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // grant role req0 = @@ -431,37 +402,25 @@ public void AuthorReqTest() throws IOException, AuthException { "", permissions, "root.ln"); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // grant role to user req0 = new AuthorReq(ConfigRequestType.GrantRole, "", "temprole", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // revoke user req0 = new AuthorReq(ConfigRequestType.RevokeUser, "tempuser", "", "", "", permissions, "root.ln"); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // revoke role req0 = new AuthorReq(ConfigRequestType.RevokeRole, "", "temprole", "", "", permissions, "root.ln"); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // revoke role from user req0 = @@ -473,91 +432,58 @@ public void AuthorReqTest() throws IOException, AuthException { "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // drop user req0 = new AuthorReq(ConfigRequestType.DropUser, "xiaoming", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // drop role req0 = new AuthorReq(ConfigRequestType.DropRole, "", "admin", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list user req0 = new AuthorReq(ConfigRequestType.ListUser, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list role req0 = new AuthorReq(ConfigRequestType.ListRole, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list privileges user req0 = new AuthorReq(ConfigRequestType.ListUserPrivilege, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list privileges role req0 = new AuthorReq(ConfigRequestType.ListRolePrivilege, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list user privileges req0 = new AuthorReq(ConfigRequestType.ListUserPrivilege, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list role privileges req0 = new AuthorReq(ConfigRequestType.ListRolePrivilege, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list all role of user req0 = new AuthorReq(ConfigRequestType.ListUserRoles, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); // list all user of role req0 = new AuthorReq(ConfigRequestType.ListRoleUsers, "", "", "", "", new HashSet<>(), ""); - req0.serialize(buffer); - buffer.flip(); - req1 = (AuthorReq) ConfigRequest.Factory.create(buffer); + req1 = (AuthorReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); - cleanBuffer(); } @Test @@ -565,10 +491,20 @@ public void registerConfigNodeReqTest() throws IOException { ApplyConfigNodeReq req0 = new ApplyConfigNodeReq( new TConfigNodeLocation( - new TEndPoint("0.0.0.0", 22277), new TEndPoint("0.0.0.0", 22278))); - req0.serialize(buffer); - buffer.flip(); - ApplyConfigNodeReq req1 = (ApplyConfigNodeReq) ConfigRequest.Factory.create(buffer); + 0, new TEndPoint("0.0.0.0", 22277), new TEndPoint("0.0.0.0", 22278))); + ApplyConfigNodeReq req1 = + (ApplyConfigNodeReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); + Assert.assertEquals(req0, req1); + } + + @Test + public void removeConfigNodeReqTest() throws IOException { + RemoveConfigNodeReq req0 = + new RemoveConfigNodeReq( + new TConfigNodeLocation( + 0, new TEndPoint("0.0.0.0", 22277), new TEndPoint("0.0.0.0", 22278))); + RemoveConfigNodeReq req1 = + (RemoveConfigNodeReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -577,16 +513,12 @@ public void updateProcedureTest() throws IOException { DeleteStorageGroupProcedure procedure = new DeleteStorageGroupProcedure(); TStorageGroupSchema storageGroupSchema = new TStorageGroupSchema(); storageGroupSchema.setName("root.sg"); - storageGroupSchema.setSchemaRegionGroupIds( - Collections.singletonList(new TConsensusGroupId(TConsensusGroupType.DataRegion, 0))); - storageGroupSchema.setDataRegionGroupIds( - Collections.singletonList(new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 1))); procedure.setDeleteSgSchema(storageGroupSchema); UpdateProcedureReq updateProcedureReq = new UpdateProcedureReq(); updateProcedureReq.setProcedure(procedure); - updateProcedureReq.serialize(buffer); - buffer.flip(); - UpdateProcedureReq reqNew = (UpdateProcedureReq) ConfigRequest.Factory.create(buffer); + UpdateProcedureReq reqNew = + (UpdateProcedureReq) + ConfigRequest.Factory.create(updateProcedureReq.serializeToByteBuffer()); Procedure proc = reqNew.getProcedure(); Assert.assertEquals(proc, procedure); } @@ -597,19 +529,10 @@ public void UpdateProcedureReqTest() throws IOException { DeleteStorageGroupProcedure deleteStorageGroupProcedure = new DeleteStorageGroupProcedure(); TStorageGroupSchema tStorageGroupSchema = new TStorageGroupSchema(); tStorageGroupSchema.setName("root.sg"); - List dataRegionIds = new ArrayList<>(); - List schemaRegionIds = new ArrayList<>(); - TConsensusGroupId dataRegionId = new TConsensusGroupId(TConsensusGroupType.DataRegion, 1); - dataRegionIds.add(dataRegionId); - TConsensusGroupId schemaRegionId = new TConsensusGroupId(TConsensusGroupType.SchemaRegion, 2); - schemaRegionIds.add(schemaRegionId); - tStorageGroupSchema.setDataRegionGroupIds(dataRegionIds); - tStorageGroupSchema.setSchemaRegionGroupIds(schemaRegionIds); deleteStorageGroupProcedure.setDeleteSgSchema(tStorageGroupSchema); req0.setProcedure(deleteStorageGroupProcedure); - req0.serialize(buffer); - buffer.flip(); - UpdateProcedureReq req1 = (UpdateProcedureReq) ConfigRequest.Factory.create(buffer); + UpdateProcedureReq req1 = + (UpdateProcedureReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } @@ -617,9 +540,18 @@ public void UpdateProcedureReqTest() throws IOException { public void DeleteProcedureReqTest() throws IOException { DeleteProcedureReq req0 = new DeleteProcedureReq(); req0.setProcId(1L); - req0.serialize(buffer); - buffer.flip(); - DeleteProcedureReq req1 = (DeleteProcedureReq) ConfigRequest.Factory.create(buffer); + DeleteProcedureReq req1 = + (DeleteProcedureReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); Assert.assertEquals(req0, req1); } + + @Test + public void GetRegionLocaltionsReqTest() throws IOException { + GetRegionInfoListReq req0 = new GetRegionInfoListReq(); + req0.setRegionType(TConsensusGroupType.DataRegion); + GetRegionInfoListReq req1 = + (GetRegionInfoListReq) ConfigRequest.Factory.create(req0.serializeToByteBuffer()); + Assert.assertEquals(req0.getType(), req1.getType()); + Assert.assertEquals(req0.getRegionType(), req1.getRegionType()); + } } diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java index 4f668d407b04..0cb02123fb5f 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/manager/hash/DeviceGroupHashExecutorManualTest.java @@ -20,6 +20,7 @@ import org.apache.iotdb.confignode.manager.ConfigManager; import org.apache.iotdb.confignode.manager.PartitionManager; +import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; import java.io.IOException; import java.util.ArrayList; @@ -61,7 +62,7 @@ private List genBatchDevices() { } public void GeneralIndexTest() throws IOException { - PartitionManager manager = new PartitionManager(new ConfigManager()); + PartitionManager manager = new PartitionManager(new ConfigManager(), new PartitionInfo()); int[] bucket = new int[deviceGroupCount]; Arrays.fill(bucket, 0); diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java index eae73ac4d799..2e2d96ef3f99 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/AuthorInfoTest.java @@ -29,24 +29,42 @@ import org.apache.iotdb.confignode.rpc.thrift.TCheckUserPrivilegesReq; import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.commons.io.FileUtils; import org.apache.thrift.TException; +import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import java.io.File; +import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.apache.iotdb.db.constant.TestConstant.BASE_OUTPUT_PATH; + public class AuthorInfoTest { private static AuthorInfo authorInfo; + private static final File snapshotDir = new File(BASE_OUTPUT_PATH, "authorInfo-snapshot"); @BeforeClass public static void setup() { - authorInfo = AuthorInfo.getInstance(); + authorInfo = new AuthorInfo(); + if (!snapshotDir.exists()) { + snapshotDir.mkdirs(); + } + } + + @AfterClass + public static void cleanup() throws IOException, AuthException { + authorInfo.clear(); + if (snapshotDir.exists()) { + FileUtils.deleteDirectory(snapshotDir); + } } @Test @@ -94,7 +112,10 @@ public void permissionTest() throws TException, AuthException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // check user privileges - status = authorInfo.checkUserPrivileges("user0", paths, PrivilegeType.DELETE_USER.ordinal()); + status = + authorInfo + .checkUserPrivileges("user0", paths, PrivilegeType.DELETE_USER.ordinal()) + .getStatus(); Assert.assertEquals(TSStatusCode.NO_PERMISSION_ERROR.getStatusCode(), status.getCode()); // drop user @@ -145,7 +166,10 @@ public void permissionTest() throws TException, AuthException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // check user privileges - status = authorInfo.checkUserPrivileges("user0", paths, PrivilegeType.DELETE_USER.ordinal()); + status = + authorInfo + .checkUserPrivileges("user0", paths, PrivilegeType.DELETE_USER.ordinal()) + .getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // grant role @@ -289,4 +313,27 @@ private void cleanUserAndRole() throws TException, AuthException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); } } + + @Test + public void takeSnapshot() throws TException, IOException, AuthException { + cleanUserAndRole(); + // create role + AuthorReq createRoleReq = new AuthorReq(ConfigRequestType.CreateRole); + createRoleReq.setRoleName("testRole"); + TSStatus status = authorInfo.authorNonQuery(createRoleReq); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + AuthorReq createUserReq = new AuthorReq(ConfigRequestType.CreateUser); + createUserReq.setUserName("testUser"); + createUserReq.setPassword("testPassword"); + status = authorInfo.authorNonQuery(createUserReq); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + + Assert.assertEquals(1, authorInfo.executeListRole().getPermissionInfo().get("role").size()); + Assert.assertEquals(2, authorInfo.executeListUser().getPermissionInfo().get("user").size()); + Assert.assertTrue(authorInfo.processTakeSnapshot(snapshotDir)); + authorInfo.clear(); + authorInfo.processLoadSnapshot(snapshotDir); + Assert.assertEquals(1, authorInfo.executeListRole().getPermissionInfo().get("role").size()); + Assert.assertEquals(2, authorInfo.executeListUser().getPermissionInfo().get("user").size()); + } } diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfoTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfoTest.java index 629d2165975f..388bda099899 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfoTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/ClusterSchemaInfoTest.java @@ -47,8 +47,8 @@ public class ClusterSchemaInfoTest { private static final File snapshotDir = new File(BASE_OUTPUT_PATH, "snapshot"); @BeforeClass - public static void setup() { - clusterSchemaInfo = ClusterSchemaInfo.getInstance(); + public static void setup() throws IOException { + clusterSchemaInfo = new ClusterSchemaInfo(); if (!snapshotDir.exists()) { snapshotDir.mkdirs(); } @@ -91,7 +91,7 @@ public void testSnapshot() throws IOException, IllegalPathException { storageGroupPathList.size(), clusterSchemaInfo.getStorageGroupNames().size()); GetStorageGroupReq getStorageGroupReq = - new GetStorageGroupReq(Arrays.asList(PathUtils.splitPathToDetachedPath("root.**"))); + new GetStorageGroupReq(Arrays.asList(PathUtils.splitPathToDetachedNodes("root.**"))); Map reloadResult = clusterSchemaInfo.getMatchedStorageGroupSchemas(getStorageGroupReq).getSchemaMap(); Assert.assertEquals(testMap, reloadResult); diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/NodeInfoTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/NodeInfoTest.java new file mode 100644 index 000000000000..93c59cdda874 --- /dev/null +++ b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/NodeInfoTest.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.confignode.persistence; + +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; +import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.confignode.consensus.request.write.RegisterDataNodeReq; + +import org.apache.commons.io.FileUtils; +import org.apache.thrift.TException; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.iotdb.db.constant.TestConstant.BASE_OUTPUT_PATH; + +public class NodeInfoTest { + + private static NodeInfo nodeInfo; + private static final File snapshotDir = new File(BASE_OUTPUT_PATH, "snapshot"); + + @BeforeClass + public static void setup() { + nodeInfo = new NodeInfo(); + if (!snapshotDir.exists()) { + snapshotDir.mkdirs(); + } + } + + @AfterClass + public static void cleanup() throws IOException { + nodeInfo.clear(); + if (snapshotDir.exists()) { + FileUtils.deleteDirectory(snapshotDir); + } + } + + @Test + public void testSnapshot() throws TException, IOException { + + RegisterDataNodeReq registerDataNodeReq = + new RegisterDataNodeReq(new TDataNodeInfo(generateTDataNodeLocation(1), 16, 34359738368L)); + nodeInfo.registerDataNode(registerDataNodeReq); + + registerDataNodeReq = + new RegisterDataNodeReq(new TDataNodeInfo(generateTDataNodeLocation(2), 16, 34359738368L)); + nodeInfo.registerDataNode(registerDataNodeReq); + + Set drainingDataNodes_before = new HashSet<>(); + // parameter i is used to be flag in generateTDataNodeLocation + for (int i = 3; i < 8; i++) { + drainingDataNodes_before.add(generateTDataNodeLocation(i)); + } + nodeInfo.setDrainingDataNodes(drainingDataNodes_before); + + int nextId = nodeInfo.getNextNodeId(); + List onlineDataNodes_before = nodeInfo.getOnlineDataNodes(-1); + + nodeInfo.processTakeSnapshot(snapshotDir); + nodeInfo.clear(); + nodeInfo.processLoadSnapshot(snapshotDir); + + Assert.assertEquals(nextId, nodeInfo.getNextNodeId()); + + Set drainingDataNodes_after = nodeInfo.getDrainingDataNodes(); + Assert.assertEquals(drainingDataNodes_before, drainingDataNodes_after); + + List onlineDataNodes_after = nodeInfo.getOnlineDataNodes(-1); + Assert.assertEquals(onlineDataNodes_before, onlineDataNodes_after); + } + + private TDataNodeLocation generateTDataNodeLocation(int flag) { + return new TDataNodeLocation( + 10000 + flag, + new TEndPoint("127.0.0.1", 6600 + flag), + new TEndPoint("127.0.0.1", 7700 + flag), + new TEndPoint("127.0.0.1", 8800 + flag), + new TEndPoint("127.0.0.1", 9900 + flag), + new TEndPoint("127.0.0.1", 11000 + flag)); + } +} diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java index 6e120afd86a8..3739b34c7039 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/persistence/PartitionInfoTest.java @@ -26,9 +26,17 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; import org.apache.iotdb.common.rpc.thrift.TSeriesPartitionSlot; import org.apache.iotdb.common.rpc.thrift.TTimePartitionSlot; +import org.apache.iotdb.commons.partition.DataPartitionTable; +import org.apache.iotdb.commons.partition.SchemaPartitionTable; +import org.apache.iotdb.commons.partition.SeriesPartitionTable; +import org.apache.iotdb.confignode.consensus.request.read.GetRegionInfoListReq; import org.apache.iotdb.confignode.consensus.request.write.CreateDataPartitionReq; import org.apache.iotdb.confignode.consensus.request.write.CreateRegionsReq; import org.apache.iotdb.confignode.consensus.request.write.CreateSchemaPartitionReq; +import org.apache.iotdb.confignode.consensus.request.write.SetStorageGroupReq; +import org.apache.iotdb.confignode.consensus.response.RegionInfoListResp; +import org.apache.iotdb.confignode.persistence.partition.PartitionInfo; +import org.apache.iotdb.confignode.rpc.thrift.TStorageGroupSchema; import org.apache.commons.io.FileUtils; import org.apache.thrift.TException; @@ -52,8 +60,7 @@ public class PartitionInfoTest { private static PartitionInfo partitionInfo; private static final File snapshotDir = new File(BASE_OUTPUT_PATH, "snapshot"); - enum testFlag { - RegionReplica(10), + public enum testFlag { DataPartition(20), SchemaPartition(30); @@ -70,7 +77,7 @@ public int getFlag() { @BeforeClass public static void setup() { - partitionInfo = PartitionInfo.getInstance(); + partitionInfo = new PartitionInfo(); if (!snapshotDir.exists()) { snapshotDir.mkdirs(); } @@ -89,47 +96,117 @@ public void testSnapshot() throws TException, IOException { partitionInfo.generateNextRegionGroupId(); - CreateRegionsReq createRegionsReq = new CreateRegionsReq(); + // Set StorageGroup + partitionInfo.setStorageGroup(new SetStorageGroupReq(new TStorageGroupSchema("root.test"))); + + // Create a SchemaRegion + CreateRegionsReq createRegionGroupsReq = new CreateRegionsReq(); + TRegionReplicaSet schemaRegionReplicaSet = + generateTRegionReplicaSet( + testFlag.SchemaPartition.getFlag(), + generateTConsensusGroupId( + testFlag.SchemaPartition.getFlag(), TConsensusGroupType.SchemaRegion)); + createRegionGroupsReq.addRegionGroup("root.test", schemaRegionReplicaSet); + partitionInfo.createRegionGroups(createRegionGroupsReq); - TRegionReplicaSet tRegionReplicaSet = + // Create a DataRegion + createRegionGroupsReq = new CreateRegionsReq(); + TRegionReplicaSet dataRegionReplicaSet = generateTRegionReplicaSet( - testFlag.RegionReplica.getFlag(), - generateTConsensusGroupId(testFlag.RegionReplica.getFlag())); - createRegionsReq.addRegion("root.test", tRegionReplicaSet); - partitionInfo.createRegions(createRegionsReq); + testFlag.DataPartition.getFlag(), + generateTConsensusGroupId( + testFlag.DataPartition.getFlag(), TConsensusGroupType.DataRegion)); + createRegionGroupsReq.addRegionGroup("root.test", dataRegionReplicaSet); + partitionInfo.createRegionGroups(createRegionGroupsReq); + // Create a SchemaPartition CreateSchemaPartitionReq createSchemaPartitionReq = generateCreateSchemaPartitionReq( testFlag.SchemaPartition.getFlag(), - generateTConsensusGroupId(testFlag.SchemaPartition.getFlag())); + generateTConsensusGroupId( + testFlag.SchemaPartition.getFlag(), TConsensusGroupType.SchemaRegion)); partitionInfo.createSchemaPartition(createSchemaPartitionReq); + // Create a DataPartition CreateDataPartitionReq createDataPartitionReq = generateCreateDataPartitionReq( testFlag.DataPartition.getFlag(), - generateTConsensusGroupId(testFlag.DataPartition.getFlag())); + generateTConsensusGroupId( + testFlag.DataPartition.getFlag(), TConsensusGroupType.DataRegion)); partitionInfo.createDataPartition(createDataPartitionReq); - int nextId = partitionInfo.getNextRegionGroupId(); + + partitionInfo.getDeletedRegionSet().add(dataRegionReplicaSet); + partitionInfo.getDeletedRegionSet().add(schemaRegionReplicaSet); partitionInfo.processTakeSnapshot(snapshotDir); - partitionInfo.clear(); - partitionInfo.processLoadSnapshot(snapshotDir); - Assert.assertEquals(nextId, (int) partitionInfo.getNextRegionGroupId()); + PartitionInfo partitionInfo1 = new PartitionInfo(); + partitionInfo1.processLoadSnapshot(snapshotDir); + Assert.assertEquals(partitionInfo, partitionInfo1); + } - List reloadTRegionReplicaSet = - partitionInfo.getRegionReplicaSets( - Collections.singletonList(generateTConsensusGroupId(testFlag.RegionReplica.getFlag()))); - Assert.assertEquals(1, reloadTRegionReplicaSet.size()); - Assert.assertEquals(tRegionReplicaSet, reloadTRegionReplicaSet.get(0)); + @Test + public void testShowRegion() { + partitionInfo.generateNextRegionGroupId(); - Assert.assertEquals( - createDataPartitionReq.getAssignedDataPartition(), - partitionInfo.getDataPartition().getDataPartitionMap()); + // Set StorageGroup + partitionInfo.setStorageGroup(new SetStorageGroupReq(new TStorageGroupSchema("root.test"))); - Assert.assertEquals( - createSchemaPartitionReq.getAssignedSchemaPartition(), - partitionInfo.getSchemaPartition().getSchemaPartitionMap()); + // Create a SchemaRegion + CreateRegionsReq createRegionsReq = new CreateRegionsReq(); + TRegionReplicaSet schemaRegionReplicaSet = + generateTRegionReplicaSet( + testFlag.SchemaPartition.getFlag(), + generateTConsensusGroupId( + testFlag.SchemaPartition.getFlag(), TConsensusGroupType.SchemaRegion)); + createRegionsReq.addRegionGroup("root.test", schemaRegionReplicaSet); + partitionInfo.createRegionGroups(createRegionsReq); + + // Create a DataRegion + createRegionsReq = new CreateRegionsReq(); + TRegionReplicaSet dataRegionReplicaSet = + generateTRegionReplicaSet( + testFlag.DataPartition.getFlag(), + generateTConsensusGroupId( + testFlag.DataPartition.getFlag(), TConsensusGroupType.DataRegion)); + createRegionsReq.addRegionGroup("root.test", dataRegionReplicaSet); + partitionInfo.createRegionGroups(createRegionsReq); + + GetRegionInfoListReq regionReq = new GetRegionInfoListReq(); + regionReq.setRegionType(null); + RegionInfoListResp regionInfoList1 = + (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); + Assert.assertEquals(regionInfoList1.getRegionInfoList().size(), 10); + regionInfoList1 + .getRegionInfoList() + .forEach( + (regionInfo) -> { + Assert.assertEquals(regionInfo.getClientRpcIp(), "127.0.0.1"); + }); + + regionReq.setRegionType(TConsensusGroupType.SchemaRegion); + RegionInfoListResp regionInfoList2 = + (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); + Assert.assertEquals(regionInfoList2.getRegionInfoList().size(), 5); + regionInfoList2 + .getRegionInfoList() + .forEach( + (regionInfo) -> { + Assert.assertEquals( + regionInfo.getConsensusGroupId().getType(), TConsensusGroupType.SchemaRegion); + }); + + regionReq.setRegionType(TConsensusGroupType.DataRegion); + RegionInfoListResp regionInfoList3 = + (RegionInfoListResp) partitionInfo.getRegionInfoList(regionReq); + Assert.assertEquals(regionInfoList3.getRegionInfoList().size(), 5); + regionInfoList3 + .getRegionInfoList() + .forEach( + (regionInfo) -> { + Assert.assertEquals( + regionInfo.getConsensusGroupId().getType(), TConsensusGroupType.DataRegion); + }); } private TRegionReplicaSet generateTRegionReplicaSet( @@ -141,10 +218,11 @@ private TRegionReplicaSet generateTRegionReplicaSet( for (int i = startFlag; i < locationNum + startFlag; i++) { TDataNodeLocation tDataNodeLocation = new TDataNodeLocation(); tDataNodeLocation.setDataNodeId(i); - tDataNodeLocation.setExternalEndPoint(new TEndPoint("127.0.0.1", 6000 + i)); + tDataNodeLocation.setClientRpcEndPoint(new TEndPoint("127.0.0.1", 6000 + i)); tDataNodeLocation.setInternalEndPoint(new TEndPoint("127.0.0.1", 7000 + i)); - tDataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("127.0.0.1", 8000 + i)); - tDataNodeLocation.setConsensusEndPoint(new TEndPoint("127.0.0.1", 9000 + i)); + tDataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("127.0.0.1", 8000 + i)); + tDataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("127.0.0.1", 9000 + i)); + tDataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("127.0.0.1", 10000 + i)); dataNodeLocations.add(tDataNodeLocation); } tRegionReplicaSet.setDataNodeLocations(dataNodeLocations); @@ -155,39 +233,42 @@ private CreateSchemaPartitionReq generateCreateSchemaPartitionReq( int startFlag, TConsensusGroupId tConsensusGroupId) { CreateSchemaPartitionReq createSchemaPartitionReq = new CreateSchemaPartitionReq(); // Map> - Map> assignedSchemaPartition = - new HashMap<>(); - Map relationInfo = new HashMap<>(); - relationInfo.put( - new TSeriesPartitionSlot(startFlag), - generateTRegionReplicaSet(startFlag, tConsensusGroupId)); - assignedSchemaPartition.put("root.test.sg", relationInfo); + Map assignedSchemaPartition = new HashMap<>(); + Map relationInfo = new HashMap<>(); + relationInfo.put(new TSeriesPartitionSlot(startFlag), tConsensusGroupId); + assignedSchemaPartition.put("root.test", new SchemaPartitionTable(relationInfo)); createSchemaPartitionReq.setAssignedSchemaPartition(assignedSchemaPartition); return createSchemaPartitionReq; } private CreateDataPartitionReq generateCreateDataPartitionReq( int startFlag, TConsensusGroupId tConsensusGroupId) { + startFlag = startFlag / 10; CreateDataPartitionReq createSchemaPartitionReq = new CreateDataPartitionReq(); // Map>>> - Map>>> - dataPartitionMap = new HashMap<>(); + Map dataPartitionMap = new HashMap<>(); + + Map> relationInfo = new HashMap<>(); - Map> relationInfo = new HashMap<>(); - relationInfo.put( - new TTimePartitionSlot(System.currentTimeMillis() / 1000), - Collections.singletonList(generateTRegionReplicaSet(startFlag, tConsensusGroupId))); + for (int i = 0; i <= startFlag; i++) { + relationInfo.put( + new TTimePartitionSlot((System.currentTimeMillis() / 1000) + i), + Collections.singletonList(tConsensusGroupId)); + } - Map>> slotInfo = - new HashMap<>(); - slotInfo.put(new TSeriesPartitionSlot(startFlag), relationInfo); + Map slotInfo = new HashMap<>(); + + for (int i = 0; i <= startFlag; i++) { + slotInfo.put(new TSeriesPartitionSlot(startFlag + i), new SeriesPartitionTable(relationInfo)); + } - dataPartitionMap.put("root.test.data.sg", slotInfo); + dataPartitionMap.put("root.test", new DataPartitionTable(slotInfo)); createSchemaPartitionReq.setAssignedDataPartition(dataPartitionMap); return createSchemaPartitionReq; } - private TConsensusGroupId generateTConsensusGroupId(int startFlag) { - return new TConsensusGroupId(TConsensusGroupType.PartitionRegion, 111000 + startFlag); + private TConsensusGroupId generateTConsensusGroupId( + int startFlag, TConsensusGroupType consensusGroupType) { + return new TConsensusGroupId(consensusGroupType, 111000 + startFlag); } } diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/NoopProcedureStore.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/NoopProcedureStore.java similarity index 93% rename from procedure/src/test/java/org/apache/iotdb/procedure/NoopProcedureStore.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/NoopProcedureStore.java index 35875af12efe..07e35a6f2695 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/NoopProcedureStore.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/NoopProcedureStore.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.store.IProcedureStore; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; import java.util.List; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/TestLockRegime.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestLockRegime.java similarity index 89% rename from procedure/src/test/java/org/apache/iotdb/procedure/TestLockRegime.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestLockRegime.java index 82e19013d0e8..500a51e9e3d0 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/TestLockRegime.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestLockRegime.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.entity.SimpleLockProcedure; -import org.apache.iotdb.procedure.util.ProcedureTestUtil; +import org.apache.iotdb.confignode.procedure.entity.SimpleLockProcedure; +import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; import org.junit.Assert; import org.junit.Test; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureBase.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureBase.java similarity index 92% rename from procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureBase.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureBase.java index 9edf1af549e9..bce76d3f45b1 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureBase.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureBase.java @@ -17,9 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.store.IProcedureStore; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; import org.junit.After; import org.junit.Before; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureExecutor.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureExecutor.java similarity index 90% rename from procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureExecutor.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureExecutor.java index 1f750115112c..046e4f9ffa96 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcedureExecutor.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestProcedureExecutor.java @@ -17,12 +17,13 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.entity.IncProcedure; -import org.apache.iotdb.procedure.entity.NoopProcedure; -import org.apache.iotdb.procedure.entity.StuckProcedure; -import org.apache.iotdb.procedure.util.ProcedureTestUtil; +import org.apache.iotdb.confignode.procedure.entity.IncProcedure; +import org.apache.iotdb.confignode.procedure.entity.NoopProcedure; +import org.apache.iotdb.confignode.procedure.entity.StuckProcedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; import org.junit.Assert; import org.junit.Test; @@ -54,7 +55,6 @@ public void testSubmitProcedure() { @Test public void testWorkerThreadStuck() throws InterruptedException { - procExecutor.setKeepAliveTime(10, TimeUnit.SECONDS); Semaphore latch1 = new Semaphore(2); latch1.acquire(2); StuckProcedure busyProc1 = new StuckProcedure(latch1); @@ -83,7 +83,6 @@ public void testWorkerThreadStuck() throws InterruptedException { latch2.release(); LOG.info("set keep alive and wait threads being removed"); - procExecutor.setKeepAliveTime(500L, TimeUnit.MILLISECONDS); int threads2 = waitThreadCount(2); LOG.info("threads got removed: " + (threads1 - threads2)); Assert.assertEquals(2, threads2); diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/TestSTMProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestSTMProcedure.java similarity index 88% rename from procedure/src/test/java/org/apache/iotdb/procedure/TestSTMProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestSTMProcedure.java index 286dcbb165c3..7b387d3d438a 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/TestSTMProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/TestSTMProcedure.java @@ -17,10 +17,11 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure; -import org.apache.iotdb.procedure.entity.SimpleSTMProcedure; -import org.apache.iotdb.procedure.util.ProcedureTestUtil; +import org.apache.iotdb.confignode.procedure.entity.SimpleSTMProcedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; import org.junit.Assert; import org.junit.Test; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/IncProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java similarity index 75% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/IncProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java index 5459d082312a..2a86110825d3 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/IncProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/IncProcedure.java @@ -17,15 +17,15 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import java.io.DataOutputStream; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicInteger; public class IncProcedure extends Procedure { @@ -57,8 +57,8 @@ protected boolean abort(TestProcEnv testProcEnv) { } @Override - public void serialize(ByteBuffer byteBuffer) { - byteBuffer.putInt(TestProcedureFactory.TestProcedureType.INC_PROCEDURE.ordinal()); - super.serialize(byteBuffer); + public void serialize(DataOutputStream stream) throws IOException { + stream.writeInt(TestProcedureFactory.TestProcedureType.INC_PROCEDURE.ordinal()); + super.serialize(stream); } } diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/NoopProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java similarity index 79% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/NoopProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java index 7afe81860b9c..bdaf0401cc8f 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/NoopProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/NoopProcedure.java @@ -17,12 +17,12 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import java.io.IOException; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleLockProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java similarity index 83% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleLockProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java index c0710962c150..564b98079420 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleLockProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleLockProcedure.java @@ -17,14 +17,14 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.ProcedureLockState; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; -import org.apache.iotdb.procedure.scheduler.SimpleProcedureScheduler; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.scheduler.SimpleProcedureScheduler; +import org.apache.iotdb.confignode.procedure.state.ProcedureLockState; import java.io.IOException; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleSTMProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java similarity index 85% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleSTMProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java index 1ab2c36c0d31..86161619b399 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/SimpleSTMProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SimpleSTMProcedure.java @@ -17,13 +17,13 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.StateMachineProcedure; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.StateMachineProcedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java new file mode 100644 index 000000000000..f3b2abf054a5 --- /dev/null +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/SleepProcedure.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.entity; + +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; + +import java.io.DataOutputStream; +import java.io.IOException; + +public class SleepProcedure extends Procedure { + @Override + protected Procedure[] execute(TestProcEnv testProcEnv) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + System.out.println("Procedure is sleeping."); + ProcedureTestUtil.sleepWithoutInterrupt(2000); + return null; + } + + @Override + protected void rollback(TestProcEnv testProcEnv) throws IOException, InterruptedException {} + + @Override + protected boolean abort(TestProcEnv testProcEnv) { + return false; + } + + @Override + public void serialize(DataOutputStream stream) throws IOException { + stream.writeInt(TestProcedureFactory.TestProcedureType.SLEEP_PROCEDURE.ordinal()); + super.serialize(stream); + } +} diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java similarity index 90% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java index 35f86bd15b5b..1db02597c412 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckProcedure.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.TestProcEnv; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; import java.io.IOException; import java.util.concurrent.Semaphore; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckSTMProcedure.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java similarity index 80% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckSTMProcedure.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java index 1bab63028ab7..71a43580d5c9 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/StuckSTMProcedure.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/StuckSTMProcedure.java @@ -17,14 +17,15 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.StateMachineProcedure; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.exception.ProcedureException; -import org.apache.iotdb.procedure.exception.ProcedureSuspendedException; -import org.apache.iotdb.procedure.exception.ProcedureYieldException; +import org.apache.iotdb.confignode.procedure.StateMachineProcedure; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.exception.ProcedureException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureSuspendedException; +import org.apache.iotdb.confignode.procedure.exception.ProcedureYieldException; +import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicInteger; @@ -99,10 +100,10 @@ protected TestState getInitialState() { } @Override - public void serialize(ByteBuffer byteBuffer) { - byteBuffer.putInt(TestProcedureFactory.TestProcedureType.STUCK_STM_PROCEDURE.ordinal()); - super.serialize(byteBuffer); - byteBuffer.putInt(childCount); + public void serialize(DataOutputStream stream) throws IOException { + stream.writeInt(TestProcedureFactory.TestProcedureType.STUCK_STM_PROCEDURE.ordinal()); + super.serialize(stream); + stream.writeInt(childCount); } @Override diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/entity/TestProcedureFactory.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/TestProcedureFactory.java similarity index 92% rename from procedure/src/test/java/org/apache/iotdb/procedure/entity/TestProcedureFactory.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/TestProcedureFactory.java index 1b248793f18b..421fe7cc46a4 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/entity/TestProcedureFactory.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/entity/TestProcedureFactory.java @@ -17,10 +17,10 @@ * under the License. */ -package org.apache.iotdb.procedure.entity; +package org.apache.iotdb.confignode.procedure.entity; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.store.IProcedureFactory; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.store.IProcedureFactory; import java.io.IOException; import java.nio.ByteBuffer; diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestConfigNodeEnv.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestConfigNodeEnv.java new file mode 100644 index 000000000000..a27b2244eadc --- /dev/null +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestConfigNodeEnv.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.confignode.procedure.env; + +public class TestConfigNodeEnv {} diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcEnv.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestProcEnv.java similarity index 92% rename from procedure/src/test/java/org/apache/iotdb/procedure/TestProcEnv.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestProcEnv.java index 3bf0bd6c0f80..63e4f1997b9b 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/TestProcEnv.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/env/TestProcEnv.java @@ -17,9 +17,9 @@ * under the License. */ -package org.apache.iotdb.procedure; +package org.apache.iotdb.confignode.procedure.env; -import org.apache.iotdb.procedure.scheduler.ProcedureScheduler; +import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/store/TestProcedureStore.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/store/TestProcedureStore.java similarity index 85% rename from procedure/src/test/java/org/apache/iotdb/procedure/store/TestProcedureStore.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/store/TestProcedureStore.java index 24091df56552..9e1139adc300 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/store/TestProcedureStore.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/store/TestProcedureStore.java @@ -17,17 +17,17 @@ * under the License. */ -package org.apache.iotdb.procedure.store; +package org.apache.iotdb.confignode.procedure.store; -import org.apache.iotdb.procedure.Procedure; -import org.apache.iotdb.procedure.ProcedureExecutor; -import org.apache.iotdb.procedure.TestProcEnv; -import org.apache.iotdb.procedure.TestProcedureBase; -import org.apache.iotdb.procedure.entity.IncProcedure; -import org.apache.iotdb.procedure.entity.StuckSTMProcedure; -import org.apache.iotdb.procedure.entity.TestProcedureFactory; -import org.apache.iotdb.procedure.util.ProcedureTestUtil; -import org.apache.iotdb.service.rpc.thrift.ProcedureState; +import org.apache.iotdb.confignode.procedure.Procedure; +import org.apache.iotdb.confignode.procedure.ProcedureExecutor; +import org.apache.iotdb.confignode.procedure.TestProcedureBase; +import org.apache.iotdb.confignode.procedure.entity.IncProcedure; +import org.apache.iotdb.confignode.procedure.entity.StuckSTMProcedure; +import org.apache.iotdb.confignode.procedure.entity.TestProcedureFactory; +import org.apache.iotdb.confignode.procedure.env.TestProcEnv; +import org.apache.iotdb.confignode.procedure.state.ProcedureState; +import org.apache.iotdb.confignode.procedure.util.ProcedureTestUtil; import org.apache.commons.io.FileUtils; import org.junit.Assert; diff --git a/procedure/src/test/java/org/apache/iotdb/procedure/util/ProcedureTestUtil.java b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/util/ProcedureTestUtil.java similarity index 88% rename from procedure/src/test/java/org/apache/iotdb/procedure/util/ProcedureTestUtil.java rename to confignode/src/test/java/org/apache/iotdb/confignode/procedure/util/ProcedureTestUtil.java index e23622381857..fbabd754ff34 100644 --- a/procedure/src/test/java/org/apache/iotdb/procedure/util/ProcedureTestUtil.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/procedure/util/ProcedureTestUtil.java @@ -17,11 +17,11 @@ * under the License. */ -package org.apache.iotdb.procedure.util; +package org.apache.iotdb.confignode.procedure.util; -import org.apache.iotdb.procedure.ProcedureExecutor; -import org.apache.iotdb.procedure.scheduler.ProcedureScheduler; -import org.apache.iotdb.procedure.store.IProcedureStore; +import org.apache.iotdb.confignode.procedure.ProcedureExecutor; +import org.apache.iotdb.confignode.procedure.scheduler.ProcedureScheduler; +import org.apache.iotdb.confignode.procedure.store.IProcedureStore; import java.util.concurrent.TimeUnit; diff --git a/confignode/src/test/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessorTest.java b/confignode/src/test/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessorTest.java index 95ae39602064..434e76a721f0 100644 --- a/confignode/src/test/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessorTest.java +++ b/confignode/src/test/java/org/apache/iotdb/confignode/service/thrift/ConfigNodeRPCServiceProcessorTest.java @@ -18,7 +18,9 @@ */ package org.apache.iotdb.confignode.service.thrift; +import org.apache.iotdb.common.rpc.thrift.TConfigNodeLocation; import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; +import org.apache.iotdb.common.rpc.thrift.TDataNodeInfo; import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet; @@ -32,25 +34,29 @@ import org.apache.iotdb.commons.exception.IllegalPathException; import org.apache.iotdb.commons.exception.StartupException; import org.apache.iotdb.commons.path.PartialPath; +import org.apache.iotdb.commons.udf.service.UDFClassLoaderManager; +import org.apache.iotdb.commons.udf.service.UDFExecutableManager; +import org.apache.iotdb.commons.udf.service.UDFRegistrationService; +import org.apache.iotdb.confignode.conf.ConfigNodeConfig; import org.apache.iotdb.confignode.conf.ConfigNodeConstant; import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor; import org.apache.iotdb.confignode.conf.ConfigNodeStartupCheck; import org.apache.iotdb.confignode.manager.ConfigManager; -import org.apache.iotdb.confignode.persistence.ClusterSchemaInfo; -import org.apache.iotdb.confignode.persistence.NodeInfo; -import org.apache.iotdb.confignode.persistence.PartitionInfo; -import org.apache.iotdb.confignode.procedure.DeleteStorageGroupProcedure; +import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; import org.apache.iotdb.confignode.rpc.thrift.TAuthorizerReq; import org.apache.iotdb.confignode.rpc.thrift.TAuthorizerResp; import org.apache.iotdb.confignode.rpc.thrift.TCheckUserPrivilegesReq; +import org.apache.iotdb.confignode.rpc.thrift.TClusterNodeInfos; import org.apache.iotdb.confignode.rpc.thrift.TCountStorageGroupResp; -import org.apache.iotdb.confignode.rpc.thrift.TDataNodeLocationResp; +import org.apache.iotdb.confignode.rpc.thrift.TDataNodeInfoResp; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterReq; import org.apache.iotdb.confignode.rpc.thrift.TDataNodeRegisterResp; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TDataPartitionResp; import org.apache.iotdb.confignode.rpc.thrift.TDeleteStorageGroupsReq; import org.apache.iotdb.confignode.rpc.thrift.TGlobalConfig; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementReq; +import org.apache.iotdb.confignode.rpc.thrift.TSchemaNodeManagementResp; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionReq; import org.apache.iotdb.confignode.rpc.thrift.TSchemaPartitionResp; import org.apache.iotdb.confignode.rpc.thrift.TSetDataReplicationFactorReq; @@ -85,7 +91,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.TimeUnit; public class ConfigNodeRPCServiceProcessorTest { @@ -93,21 +98,22 @@ public class ConfigNodeRPCServiceProcessorTest { @BeforeClass public static void beforeClass() throws StartupException, ConfigurationException, IOException { + final ConfigNodeConfig configNodeConfig = ConfigNodeDescriptor.getInstance().getConf(); + UDFExecutableManager.setupAndGetInstance( + configNodeConfig.getTemporaryLibDir(), configNodeConfig.getUdfLibDir()); + UDFClassLoaderManager.setupAndGetInstance(configNodeConfig.getUdfLibDir()); + UDFRegistrationService.setupAndGetInstance(configNodeConfig.getSystemUdfDir()); ConfigNodeStartupCheck.getInstance().startUpCheck(); } @Before - public void before() throws IOException, InterruptedException { + public void before() throws IOException { processor = new ConfigNodeRPCServiceProcessor(new ConfigManager()); - // Sleep 1s to make sure the Consensus group has done leader election - TimeUnit.SECONDS.sleep(1); + processor.getConsensusManager().singleCopyMayWaitUntilLeaderReady(); } @After public void after() throws IOException { - NodeInfo.getInstance().clear(); - ClusterSchemaInfo.getInstance().clear(); - PartitionInfo.getInstance().clear(); processor.close(); FileUtils.deleteFully(new File(ConfigNodeDescriptor.getInstance().getConf().getConsensusDir())); FileUtils.deleteFully( @@ -116,13 +122,19 @@ public void after() throws IOException { @AfterClass public static void afterClass() throws IOException { + UDFExecutableManager.getInstance().stop(); + UDFClassLoaderManager.getInstance().stop(); + UDFRegistrationService.getInstance().stop(); FileUtils.deleteFully(new File(ConfigNodeConstant.DATA_DIR)); } private void checkGlobalConfig(TGlobalConfig globalConfig) { Assert.assertEquals( - ConfigNodeDescriptor.getInstance().getConf().getDataNodeConsensusProtocolClass(), - globalConfig.getDataNodeConsensusProtocolClass()); + ConfigNodeDescriptor.getInstance().getConf().getDataRegionConsensusProtocolClass(), + globalConfig.getDataRegionConsensusProtocolClass()); + Assert.assertEquals( + ConfigNodeDescriptor.getInstance().getConf().getSchemaRegionConsensusProtocolClass(), + globalConfig.getSchemaRegionConsensusProtocolClass()); Assert.assertEquals( ConfigNodeDescriptor.getInstance().getConf().getSeriesPartitionSlotNum(), globalConfig.getSeriesPartitionSlotNum()); @@ -134,72 +146,112 @@ private void checkGlobalConfig(TGlobalConfig globalConfig) { private void registerDataNodes() throws TException { for (int i = 0; i < 3; i++) { TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667 + i)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667 + i)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003 + i)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777 + i)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40010 + i)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777 + i)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010 + i)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010 + i)); + + TDataNodeInfo dataNodeInfo = new TDataNodeInfo(); + dataNodeInfo.setLocation(dataNodeLocation); + dataNodeInfo.setCpuCoreNum(8); + dataNodeInfo.setMaxMemory(1024 * 1024); - TDataNodeRegisterReq req = new TDataNodeRegisterReq(dataNodeLocation); + TDataNodeRegisterReq req = new TDataNodeRegisterReq(dataNodeInfo); TDataNodeRegisterResp resp = processor.registerDataNode(req); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), resp.getStatus().getCode()); - Assert.assertEquals(i, resp.getDataNodeId()); + Assert.assertEquals(i + 1, resp.getDataNodeId()); checkGlobalConfig(resp.getGlobalConfig()); } } @Test - public void registerAndQueryDataNodeTest() throws TException { + public void testRegisterAndQueryDataNode() throws TException { registerDataNodes(); // test success re-register TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6668)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6668)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9004)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8778)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40011)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8778)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40011)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50011)); + + TDataNodeInfo dataNodeInfo = new TDataNodeInfo(); + dataNodeInfo.setLocation(dataNodeLocation); + dataNodeInfo.setCpuCoreNum(8); + dataNodeInfo.setMaxMemory(1024 * 1024); - TDataNodeRegisterReq req = new TDataNodeRegisterReq(dataNodeLocation); + TDataNodeRegisterReq req = new TDataNodeRegisterReq(dataNodeInfo); TDataNodeRegisterResp resp = processor.registerDataNode(req); Assert.assertEquals( TSStatusCode.DATANODE_ALREADY_REGISTERED.getStatusCode(), resp.getStatus().getCode()); - Assert.assertEquals(1, resp.getDataNodeId()); + Assert.assertEquals(2, resp.getDataNodeId()); checkGlobalConfig(resp.getGlobalConfig()); // test query DataNodeInfo - TDataNodeLocationResp locationResp = processor.getDataNodeLocations(-1); + TDataNodeInfoResp infoResp = processor.getDataNodeInfo(-1); Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), locationResp.getStatus().getCode()); - Map locationMap = locationResp.getDataNodeLocationMap(); - Assert.assertEquals(3, locationMap.size()); - List> locationList = - new ArrayList<>(locationMap.entrySet()); - locationList.sort(Comparator.comparingInt(Map.Entry::getKey)); + TSStatusCode.SUCCESS_STATUS.getStatusCode(), infoResp.getStatus().getCode()); + Map infoMap = infoResp.getDataNodeInfoMap(); + Assert.assertEquals(3, infoMap.size()); + List> infoList = new ArrayList<>(infoMap.entrySet()); + infoList.sort(Comparator.comparingInt(Map.Entry::getKey)); for (int i = 0; i < 3; i++) { - dataNodeLocation.setDataNodeId(i); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6667 + i)); + dataNodeLocation.setDataNodeId(i + 1); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667 + i)); dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003 + i)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8777 + i)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40010 + i)); - Assert.assertEquals(dataNodeLocation, locationList.get(i).getValue()); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777 + i)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010 + i)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010 + i)); + Assert.assertEquals(dataNodeLocation, infoList.get(i).getValue().getLocation()); } - locationResp = processor.getDataNodeLocations(1); + infoResp = processor.getDataNodeInfo(1); Assert.assertEquals( - TSStatusCode.SUCCESS_STATUS.getStatusCode(), locationResp.getStatus().getCode()); - locationMap = locationResp.getDataNodeLocationMap(); - Assert.assertEquals(1, locationMap.size()); - Assert.assertNotNull(locationMap.get(1)); + TSStatusCode.SUCCESS_STATUS.getStatusCode(), infoResp.getStatus().getCode()); + infoMap = infoResp.getDataNodeInfoMap(); + Assert.assertEquals(1, infoMap.size()); + Assert.assertNotNull(infoMap.get(1)); dataNodeLocation.setDataNodeId(1); - dataNodeLocation.setExternalEndPoint(new TEndPoint("0.0.0.0", 6668)); - dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9004)); - dataNodeLocation.setDataBlockManagerEndPoint(new TEndPoint("0.0.0.0", 8778)); - dataNodeLocation.setConsensusEndPoint(new TEndPoint("0.0.0.0", 40011)); - Assert.assertEquals(dataNodeLocation, locationMap.get(1)); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667)); + dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010)); + Assert.assertEquals(dataNodeLocation, infoMap.get(1).getLocation()); + } + + @Test + public void getAllClusterNodeInfosTest() throws TException { + registerDataNodes(); + + TClusterNodeInfos clusterNodes = processor.getAllClusterNodeInfos(); + + List configNodeInfos = clusterNodes.getConfigNodeList(); + Assert.assertEquals(1, configNodeInfos.size()); + TConfigNodeLocation configNodeLocation = + new TConfigNodeLocation( + 0, new TEndPoint("0.0.0.0", 22277), new TEndPoint("0.0.0.0", 22278)); + Assert.assertEquals(configNodeLocation, configNodeInfos.get(0)); + + List dataNodeInfos = clusterNodes.getDataNodeList(); + Assert.assertEquals(3, dataNodeInfos.size()); + TDataNodeLocation dataNodeLocation = new TDataNodeLocation(); + for (int i = 0; i < 3; i++) { + dataNodeLocation.setDataNodeId(i + 1); + dataNodeLocation.setClientRpcEndPoint(new TEndPoint("0.0.0.0", 6667 + i)); + dataNodeLocation.setInternalEndPoint(new TEndPoint("0.0.0.0", 9003 + i)); + dataNodeLocation.setMPPDataExchangeEndPoint(new TEndPoint("0.0.0.0", 8777 + i)); + dataNodeLocation.setDataRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 40010 + i)); + dataNodeLocation.setSchemaRegionConsensusEndPoint(new TEndPoint("0.0.0.0", 50010 + i)); + Assert.assertEquals(dataNodeLocation, dataNodeInfos.get(i)); + } } @Test - public void setAndQueryStorageGroupTest() throws TException { + public void testSetAndQueryStorageGroup() throws TException { TSStatus status; final String sg0 = "root.sg0"; final String sg1 = "root.sg1"; @@ -246,8 +298,8 @@ public void setAndQueryStorageGroupTest() throws TException { Assert.assertNotNull(storageGroupSchema); Assert.assertEquals(sg0, storageGroupSchema.getName()); Assert.assertEquals(Long.MAX_VALUE, storageGroupSchema.getTTL()); - Assert.assertEquals(3, storageGroupSchema.getSchemaReplicationFactor()); - Assert.assertEquals(3, storageGroupSchema.getDataReplicationFactor()); + Assert.assertEquals(1, storageGroupSchema.getSchemaReplicationFactor()); + Assert.assertEquals(1, storageGroupSchema.getDataReplicationFactor()); Assert.assertEquals(604800, storageGroupSchema.getTimePartitionInterval()); storageGroupSchema = schemaMap.get(sg1); Assert.assertNotNull(storageGroupSchema); @@ -265,9 +317,9 @@ public void setAndQueryStorageGroupTest() throws TException { // test StorageGroup setter interfaces status = processor.setTTL(new TSetTTLReq(sg1, Long.MAX_VALUE)); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); - status = processor.setSchemaReplicationFactor(new TSetSchemaReplicationFactorReq(sg1, 3)); + status = processor.setSchemaReplicationFactor(new TSetSchemaReplicationFactorReq(sg1, 1)); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); - status = processor.setDataReplicationFactor(new TSetDataReplicationFactorReq(sg1, 3)); + status = processor.setDataReplicationFactor(new TSetDataReplicationFactorReq(sg1, 1)); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); status = processor.setTimePartitionInterval(new TSetTimePartitionIntervalReq(sg1, 604800L)); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); @@ -281,8 +333,8 @@ public void setAndQueryStorageGroupTest() throws TException { Assert.assertNotNull(storageGroupSchema); Assert.assertEquals(sg1, storageGroupSchema.getName()); Assert.assertEquals(Long.MAX_VALUE, storageGroupSchema.getTTL()); - Assert.assertEquals(3, storageGroupSchema.getSchemaReplicationFactor()); - Assert.assertEquals(3, storageGroupSchema.getDataReplicationFactor()); + Assert.assertEquals(1, storageGroupSchema.getSchemaReplicationFactor()); + Assert.assertEquals(1, storageGroupSchema.getDataReplicationFactor()); Assert.assertEquals(604800, storageGroupSchema.getTimePartitionInterval()); } @@ -291,7 +343,7 @@ private ByteBuffer generatePatternTreeBuffer(String[] paths) throws IllegalPathException, IOException { PathPatternTree patternTree = new PathPatternTree(); for (String path : paths) { - patternTree.appendPath(new PartialPath(path)); + patternTree.appendPathPattern(new PartialPath(path)); } patternTree.constructTree(); @@ -301,7 +353,7 @@ private ByteBuffer generatePatternTreeBuffer(String[] paths) } @Test - public void getAndCreateSchemaPartitionTest() + public void testGetAndCreateSchemaPartition() throws TException, IOException, IllegalPathException { final String sg = "root.sg"; final String sg0 = "root.sg0"; @@ -323,15 +375,24 @@ public void getAndCreateSchemaPartitionTest() Map> schemaPartitionMap; - // register DataNodes - registerDataNodes(); - // Set StorageGroups status = processor.setStorageGroup(new TSetStorageGroupReq(new TStorageGroupSchema(sg0))); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); status = processor.setStorageGroup(new TSetStorageGroupReq(new TStorageGroupSchema(sg1))); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + // Test getOrCreateSchemaPartition, the result should be NOT_ENOUGH_DATANODE + buffer = generatePatternTreeBuffer(new String[] {d00, d01, allSg1}); + schemaPartitionReq = new TSchemaPartitionReq(buffer); + schemaPartitionResp = processor.getOrCreateSchemaPartition(schemaPartitionReq); + Assert.assertEquals( + TSStatusCode.NOT_ENOUGH_DATA_NODE.getStatusCode(), + schemaPartitionResp.getStatus().getCode()); + Assert.assertNull(schemaPartitionResp.getSchemaRegionMap()); + + // register DataNodes + registerDataNodes(); + // Test getSchemaPartition, the result should be empty buffer = generatePatternTreeBuffer(new String[] {d00, d01, allSg1}); schemaPartitionReq = new TSchemaPartitionReq(buffer); @@ -355,7 +416,7 @@ public void getAndCreateSchemaPartitionTest() .get(sg + i) .forEach( (tSeriesPartitionSlot, tRegionReplicaSet) -> { - Assert.assertEquals(3, tRegionReplicaSet.getDataNodeLocationsSize()); + Assert.assertEquals(1, tRegionReplicaSet.getDataNodeLocationsSize()); Assert.assertEquals( TConsensusGroupType.SchemaRegion, tRegionReplicaSet.getRegionId().getType()); }); @@ -378,7 +439,7 @@ public void getAndCreateSchemaPartitionTest() .get(sg + i) .forEach( (tSeriesPartitionSlot, tRegionReplicaSet) -> { - Assert.assertEquals(3, tRegionReplicaSet.getDataNodeLocationsSize()); + Assert.assertEquals(1, tRegionReplicaSet.getDataNodeLocationsSize()); Assert.assertEquals( TConsensusGroupType.SchemaRegion, tRegionReplicaSet.getRegionId().getType()); }); @@ -400,7 +461,7 @@ public void getAndCreateSchemaPartitionTest() .get(sg0) .forEach( (tSeriesPartitionSlot, tRegionReplicaSet) -> { - Assert.assertEquals(3, tRegionReplicaSet.getDataNodeLocationsSize()); + Assert.assertEquals(1, tRegionReplicaSet.getDataNodeLocationsSize()); Assert.assertEquals( TConsensusGroupType.SchemaRegion, tRegionReplicaSet.getRegionId().getType()); }); @@ -411,7 +472,7 @@ public void getAndCreateSchemaPartitionTest() .get(sg1) .forEach( (tSeriesPartitionSlot, tRegionReplicaSet) -> { - Assert.assertEquals(3, tRegionReplicaSet.getDataNodeLocationsSize()); + Assert.assertEquals(1, tRegionReplicaSet.getDataNodeLocationsSize()); Assert.assertEquals( TConsensusGroupType.SchemaRegion, tRegionReplicaSet.getRegionId().getType()); }); @@ -482,9 +543,9 @@ private void checkDataPartitionMap( .get(0) .getRegionId() .getType()); - // Including three RegionReplica + // Including one RegionReplica Assert.assertEquals( - 3, + 1, dataPartitionMap .get(storageGroup) .get(seriesPartitionSlot) @@ -497,7 +558,7 @@ private void checkDataPartitionMap( } @Test - public void getAndCreateDataPartitionTest() throws TException { + public void testGetAndCreateDataPartition() throws TException { final String sg = "root.sg"; final int storageGroupNum = 2; final int seriesPartitionSlotNum = 4; @@ -507,9 +568,6 @@ public void getAndCreateDataPartitionTest() throws TException { TDataPartitionReq dataPartitionReq; TDataPartitionResp dataPartitionResp; - // register DataNodes - registerDataNodes(); - // Prepare partitionSlotsMap Map>> partitionSlotsMap0 = constructPartitionSlotsMap(storageGroupNum, seriesPartitionSlotNum, timePartitionSlotNum); @@ -524,6 +582,16 @@ public void getAndCreateDataPartitionTest() throws TException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); } + // Test getOrCreateDataPartition, the result should be NOT_ENOUGH_DATANODE + dataPartitionReq = new TDataPartitionReq(partitionSlotsMap0); + dataPartitionResp = processor.getOrCreateDataPartition(dataPartitionReq); + Assert.assertEquals( + TSStatusCode.NOT_ENOUGH_DATA_NODE.getStatusCode(), dataPartitionResp.getStatus().getCode()); + Assert.assertNull(dataPartitionResp.getDataPartitionMap()); + + // register DataNodes + registerDataNodes(); + // Test getDataPartition, the result should be empty dataPartitionReq = new TDataPartitionReq(partitionSlotsMap0); dataPartitionResp = processor.getDataPartition(dataPartitionReq); @@ -557,7 +625,7 @@ public void getAndCreateDataPartitionTest() throws TException { } @Test - public void permissionTest() throws TException { + public void testPermission() throws TException { TSStatus status; List userList = new ArrayList<>(); @@ -609,7 +677,7 @@ public void permissionTest() throws TException { // check user privileges checkUserPrivilegesReq = new TCheckUserPrivilegesReq("tempuser0", paths, PrivilegeType.DELETE_USER.ordinal()); - status = processor.checkUserPrivileges(checkUserPrivilegesReq); + status = processor.checkUserPrivileges(checkUserPrivilegesReq).getStatus(); Assert.assertEquals(TSStatusCode.NO_PERMISSION_ERROR.getStatusCode(), status.getCode()); // drop user @@ -705,7 +773,7 @@ public void permissionTest() throws TException { // check user privileges checkUserPrivilegesReq = new TCheckUserPrivilegesReq("tempuser0", paths, PrivilegeType.DELETE_USER.ordinal()); - status = processor.checkUserPrivileges(checkUserPrivilegesReq); + status = processor.checkUserPrivileges(checkUserPrivilegesReq).getStatus(); Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); // grant role @@ -894,12 +962,13 @@ public void permissionTest() throws TException { } @Test - public void deleteStorageGroupTest() throws TException { + public void testDeleteStorageGroup() throws TException { TSStatus status; final String sg0 = "root.sg0"; final String sg1 = "root.sg1"; // register DataNodes registerDataNodes(); + ConfigNodeProcedureEnv.setSkipForTest(true); TSetStorageGroupReq setReq0 = new TSetStorageGroupReq(new TStorageGroupSchema(sg0)); // set StorageGroup0 by default values status = processor.setStorageGroup(setReq0); @@ -911,7 +980,6 @@ public void deleteStorageGroupTest() throws TException { TDeleteStorageGroupsReq deleteStorageGroupsReq = new TDeleteStorageGroupsReq(); List sgs = Arrays.asList(sg0, sg1); deleteStorageGroupsReq.setPrefixPathList(sgs); - DeleteStorageGroupProcedure.setByPassForTest(true); TSStatus deleteSgStatus = processor.deleteStorageGroups(deleteStorageGroupsReq); TStorageGroupSchemaResp root = processor.getMatchedStorageGroupSchemas(Arrays.asList("root", "*")); @@ -919,6 +987,34 @@ public void deleteStorageGroupTest() throws TException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), deleteSgStatus.getCode()); } + @Test + public void testDeleteStorageGroupInvalidateCacheFailed() throws TException { + TSStatus status; + final String sg0 = "root.sg0"; + final String sg1 = "root.sg1"; + // register DataNodes + registerDataNodes(); + ConfigNodeProcedureEnv.setSkipForTest(true); + ConfigNodeProcedureEnv.setInvalidCacheResult(false); + TSetStorageGroupReq setReq0 = new TSetStorageGroupReq(new TStorageGroupSchema(sg0)); + // set StorageGroup0 by default values + status = processor.setStorageGroup(setReq0); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + // set StorageGroup1 by specific values + TSetStorageGroupReq setReq1 = new TSetStorageGroupReq(new TStorageGroupSchema(sg1)); + status = processor.setStorageGroup(setReq1); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + TDeleteStorageGroupsReq deleteStorageGroupsReq = new TDeleteStorageGroupsReq(); + List sgs = Arrays.asList(sg0, sg1); + deleteStorageGroupsReq.setPrefixPathList(sgs); + TSStatus deleteSgStatus = processor.deleteStorageGroups(deleteStorageGroupsReq); + TStorageGroupSchemaResp root = + processor.getMatchedStorageGroupSchemas(Arrays.asList("root", "*")); + // rollback success + Assert.assertEquals(root.getStorageGroupSchemaMap().size(), 2); + Assert.assertEquals(TSStatusCode.MULTIPLE_ERROR.getStatusCode(), deleteSgStatus.getCode()); + } + private void cleanUserAndRole() throws TException { TSStatus status; @@ -970,4 +1066,35 @@ private void cleanUserAndRole() throws TException { Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); } } + + @Test + public void testGetSchemaNodeManagementPartition() + throws TException, IllegalPathException, IOException { + final String sg = "root.sg"; + final int storageGroupNum = 2; + + TSStatus status; + TSchemaNodeManagementReq nodeManagementReq; + TSchemaNodeManagementResp nodeManagementResp; + + // register DataNodes + registerDataNodes(); + + // set StorageGroups + for (int i = 0; i < storageGroupNum; i++) { + TSetStorageGroupReq setReq = new TSetStorageGroupReq(new TStorageGroupSchema(sg + i)); + status = processor.setStorageGroup(setReq); + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + } + + ByteBuffer byteBuffer = generatePatternTreeBuffer(new String[] {"root"}); + nodeManagementReq = new TSchemaNodeManagementReq(byteBuffer); + nodeManagementReq.setLevel(-1); + nodeManagementResp = processor.getSchemaNodeManagementPartition(nodeManagementReq); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), nodeManagementResp.getStatus().getCode()); + Assert.assertEquals(2, nodeManagementResp.getMatchedNodeSize()); + Assert.assertNotNull(nodeManagementResp.getSchemaRegionMap()); + Assert.assertEquals(0, nodeManagementResp.getSchemaRegionMapSize()); + } } diff --git a/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties index 576c3192624e..c786e2021ccd 100644 --- a/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties +++ b/confignode/src/test/resources/confignode1conf/iotdb-confignode.properties @@ -20,10 +20,19 @@ rpc_address=0.0.0.0 rpc_port=22277 consensus_port=22278 -target_confignode=0.0.0.0:22277 +config_nodes=0.0.0.0:22277 config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus -data_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +data_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +schema_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +# Default number of SchemaRegion replicas +# Datatype: int +#schema_replication_factor=1 + + +# Default number of DataRegion replicas +# Datatype: int +#data_replication_factor=1 system_dir=target/confignode1/system data_dirs=target/confignode1/data consensus_dir=target/confignode1/consensus diff --git a/confignode/src/test/resources/confignode1conf/iotdb-metric.yml b/confignode/src/test/resources/confignode1conf/iotdb-metric.yml new file mode 100644 index 000000000000..cf64f6ad8818 --- /dev/null +++ b/confignode/src/test/resources/confignode1conf/iotdb-metric.yml @@ -0,0 +1,53 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# whether enable the module +enableMetric: false + +# Is stat performance of operation latency +enablePerformanceStat: false + +# Multiple reporter, options: [JMX, PROMETHEUS, IOTDB], IOTDB is off by default +metricReporterList: + - JMX + - PROMETHEUS + +# Type of monitor frame, options: [MICROMETER, DROPWIZARD] +monitorType: MICROMETER + +# Level of metric level, options: [CORE, IMPORTANT, NORMAL, ALL] +metricLevel: IMPORTANT + +# Predefined metric, options: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] +predefinedMetrics: + - JVM + - FILE + +# The http server's port for prometheus exporter to get metric data. +prometheusExporterPort: 9091 + +# The config of iotdb reporter +ioTDBReporterConfig: + host: 127.0.0.1 + port: 6667 + username: root + password: root + maxConnectionNumber: 3 + database: _metric + pushPeriodInSecond: 15 \ No newline at end of file diff --git a/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties index bcd433ef35e3..68854e3f4bcf 100644 --- a/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties +++ b/confignode/src/test/resources/confignode2conf/iotdb-confignode.properties @@ -20,9 +20,10 @@ rpc_address=0.0.0.0 rpc_port=22279 consensus_port=22280 -target_confignode=0.0.0.0:22277 +config_nodes=0.0.0.0:22277 config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus -data_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +data_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +schema_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus system_dir=target/confignode2/system data_dirs=target/confignode2/data diff --git a/confignode/src/test/resources/confignode2conf/iotdb-metric.yml b/confignode/src/test/resources/confignode2conf/iotdb-metric.yml new file mode 100644 index 000000000000..26fdd958340b --- /dev/null +++ b/confignode/src/test/resources/confignode2conf/iotdb-metric.yml @@ -0,0 +1,53 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# whether enable the module +enableMetric: false + +# Is stat performance of operation latency +enablePerformanceStat: false + +# Multiple reporter, options: [JMX, PROMETHEUS, IOTDB], IOTDB is off by default +metricReporterList: + - JMX + - PROMETHEUS + +# Type of monitor frame, options: [MICROMETER, DROPWIZARD] +monitorType: MICROMETER + +# Level of metric level, options: [CORE, IMPORTANT, NORMAL, ALL] +metricLevel: IMPORTANT + +# Predefined metric, options: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] +predefinedMetrics: + - JVM + - FILE + +# The http server's port for prometheus exporter to get metric data. +prometheusExporterPort: 9093 + +# The config of iotdb reporter +ioTDBReporterConfig: + host: 127.0.0.1 + port: 6667 + username: root + password: root + maxConnectionNumber: 3 + database: _metric + pushPeriodInSecond: 15 \ No newline at end of file diff --git a/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties b/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties index 1adf1d6e5417..79e1f871e60f 100644 --- a/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties +++ b/confignode/src/test/resources/confignode3conf/iotdb-confignode.properties @@ -20,9 +20,10 @@ rpc_address=0.0.0.0 rpc_port=22281 consensus_port=22282 -target_confignode=0.0.0.0:22277 +config_nodes=0.0.0.0:22277 config_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus -data_node_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +data_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus +schema_region_consensus_protocol_class=org.apache.iotdb.consensus.ratis.RatisConsensus system_dir=target/confignode3/system data_dirs=target/confignode3/data diff --git a/confignode/src/test/resources/confignode3conf/iotdb-metric.yml b/confignode/src/test/resources/confignode3conf/iotdb-metric.yml new file mode 100644 index 000000000000..85f12c4f234e --- /dev/null +++ b/confignode/src/test/resources/confignode3conf/iotdb-metric.yml @@ -0,0 +1,53 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# whether enable the module +enableMetric: false + +# Is stat performance of operation latency +enablePerformanceStat: false + +# Multiple reporter, options: [JMX, PROMETHEUS, IOTDB], IOTDB is off by default +metricReporterList: + - JMX + - PROMETHEUS + +# Type of monitor frame, options: [MICROMETER, DROPWIZARD] +monitorType: MICROMETER + +# Level of metric level, options: [CORE, IMPORTANT, NORMAL, ALL] +metricLevel: IMPORTANT + +# Predefined metric, options: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] +predefinedMetrics: + - JVM + - FILE + +# The http server's port for prometheus exporter to get metric data. +prometheusExporterPort: 9095 + +# The config of iotdb reporter +ioTDBReporterConfig: + host: 127.0.0.1 + port: 6667 + username: root + password: root + maxConnectionNumber: 3 + database: _metric + pushPeriodInSecond: 15 \ No newline at end of file diff --git a/consensus/README.md b/consensus/README.md index c0e77910ed33..ecd5c35eae91 100644 --- a/consensus/README.md +++ b/consensus/README.md @@ -69,7 +69,7 @@ on [Apache Ratis](https://ratis.apache.org/). ```java IConsensus consensusImpl = ConsensusFactory.getConsensusImpl( - "org.apache.iotdb.consensus.ratis.RatisConsensus", + ConsensusFactory.RatisConsensus, new Endpoint(conf.getRpcAddress(), conf.getInternalPort()), new File(conf.getConsensusDir()), gid -> new PartitionRegionStateMachine()) @@ -77,7 +77,7 @@ IConsensus consensusImpl = new IllegalArgumentException( String.format( ConsensusFactory.CONSTRUCT_FAILED_MSG, - "org.apache.iotdb.consensus.ratis.RatisConsensus"))); + ConsensusFactory.RatisConsensus))); consensusImpl.start(); ``` diff --git a/consensus/pom.xml b/consensus/pom.xml index 7284a5fee79c..23f5a5cb1b25 100644 --- a/consensus/pom.xml +++ b/consensus/pom.xml @@ -29,6 +29,14 @@ 4.0.0 iotdb-consensus IoTDB Consensus + + 8 + 8 + 2.3.0 + false + ${consensus.test.skip} + ${consensus.test.skip} + @@ -52,10 +60,64 @@ ${project.version} compile + + org.apache.iotdb + thrift-multi-leader-consensus + ${project.version} + - - 8 - 8 - 2.2.0 - + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${consensus.ut.skip} + pertest + + + + + + + skipConsensusTests + + + skipTests + true + + + + true + true + true + + + + skipUT_Consensus_Tests + + + skipUTs + true + + + + true + + + diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java b/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java index efc40e31e62c..8146fcf3a4a9 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ConsensusFactory.java @@ -19,12 +19,11 @@ package org.apache.iotdb.consensus; -import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.Optional; @@ -33,18 +32,22 @@ public class ConsensusFactory { public static final String CONSTRUCT_FAILED_MSG = "Construct consensusImpl failed, Please check your consensus className %s"; + public static final String StandAloneConsensus = + "org.apache.iotdb.consensus.standalone.StandAloneConsensus"; + public static final String RatisConsensus = "org.apache.iotdb.consensus.ratis.RatisConsensus"; + public static final String MultiLeaderConsensus = + "org.apache.iotdb.consensus.multileader.MultiLeaderConsensus"; + private static final Logger logger = LoggerFactory.getLogger(ConsensusFactory.class); public static Optional getConsensusImpl( - String className, TEndPoint endpoint, File storageDir, IStateMachine.Registry registry) { + String className, ConsensusConfig config, IStateMachine.Registry registry) { try { Class executor = Class.forName(className); Constructor executorConstructor = - executor.getDeclaredConstructor( - TEndPoint.class, File.class, IStateMachine.Registry.class); + executor.getDeclaredConstructor(ConsensusConfig.class, IStateMachine.Registry.class); executorConstructor.setAccessible(true); - return Optional.of( - (IConsensus) executorConstructor.newInstance(endpoint, storageDir, registry)); + return Optional.of((IConsensus) executorConstructor.newInstance(config, registry)); } catch (ClassNotFoundException | NoSuchMethodException | InstantiationException diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java b/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java index f7c195eefccc..60982cc49bdd 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/IConsensus.java @@ -63,4 +63,6 @@ public interface IConsensus { boolean isLeader(ConsensusGroupId groupId); Peer getLeader(ConsensusGroupId groupId); + + List getAllConsensusGroupIds(); } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java index 99379156fbaf..ad962c2525aa 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/Peer.java @@ -21,7 +21,10 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils; +import java.io.DataOutputStream; +import java.nio.ByteBuffer; import java.util.Objects; // TODO Use a mature IDL framework such as Protobuf to manage this structure @@ -43,6 +46,19 @@ public TEndPoint getEndpoint() { return endpoint; } + public void serialize(DataOutputStream stream) { + ThriftCommonsSerDeUtils.serializeTConsensusGroupId( + groupId.convertToTConsensusGroupId(), stream); + ThriftCommonsSerDeUtils.serializeTEndPoint(endpoint, stream); + } + + public static Peer deserialize(ByteBuffer buffer) { + return new Peer( + ConsensusGroupId.Factory.createFromTConsensusGroupId( + ThriftCommonsSerDeUtils.deserializeTConsensusGroupId(buffer)), + ThriftCommonsSerDeUtils.deserializeTEndPoint(buffer)); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/request/ByteBufferConsensusRequest.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/ByteBufferConsensusRequest.java index 19ec78908a76..0310a4e3d300 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/common/request/ByteBufferConsensusRequest.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/ByteBufferConsensusRequest.java @@ -36,9 +36,7 @@ public ByteBufferConsensusRequest(ByteBuffer byteBuffer) { } @Override - public void serializeRequest(ByteBuffer buffer) {} - - public ByteBuffer getContent() { + public ByteBuffer serializeToByteBuffer() { return byteBuffer; } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java index 0a5aacfdf26c..daa2a7d7d003 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IConsensusRequest.java @@ -22,6 +22,17 @@ import java.nio.ByteBuffer; public interface IConsensusRequest { - - void serializeRequest(ByteBuffer buffer); + /** + * Serialize all the data to a ByteBuffer. + * + *

In a specific implementation, ByteBuf or PublicBAOS can be used to reduce the number of + * memory copies. + * + *

To improve efficiency, a specific implementation could return a DirectByteBuffer to reduce + * the memory copy required to send an RPC + * + *

Note: The implementation needs to ensure that the data in the returned Bytebuffer cannot be + * changed or an error may occur + */ + ByteBuffer serializeToByteBuffer(); } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java new file mode 100644 index 000000000000..3578fd565640 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/request/IndexedConsensusRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.common.request; + +import java.nio.ByteBuffer; +import java.util.Objects; + +/** only used for multi-leader consensus. */ +public class IndexedConsensusRequest implements IConsensusRequest { + + /** we do not need to serialize these two fields as they are useless in other nodes. */ + private final long searchIndex; + + private final long safelyDeletedSearchIndex; + + private final IConsensusRequest request; + + public IndexedConsensusRequest( + long searchIndex, long safelyDeletedSearchIndex, IConsensusRequest request) { + this.searchIndex = searchIndex; + this.safelyDeletedSearchIndex = safelyDeletedSearchIndex; + this.request = request; + } + + @Override + public ByteBuffer serializeToByteBuffer() { + return request.serializeToByteBuffer(); + } + + public IConsensusRequest getRequest() { + return request; + } + + public long getSearchIndex() { + return searchIndex; + } + + public long getSafelyDeletedSearchIndex() { + return safelyDeletedSearchIndex; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + IndexedConsensusRequest that = (IndexedConsensusRequest) o; + return searchIndex == that.searchIndex + && safelyDeletedSearchIndex == that.safelyDeletedSearchIndex + && Objects.equals(request, that.request); + } + + @Override + public int hashCode() { + return Objects.hash(searchIndex, safelyDeletedSearchIndex, request); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusReadResponse.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusReadResponse.java index a8bec60d3f97..7c3a45bd90b6 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusReadResponse.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusReadResponse.java @@ -35,9 +35,13 @@ public DataSet getDataset() { return dataset; } + public boolean isSuccess() { + return exception == null; + } + @Override public String toString() { - return "ConsensusReadResponse{" + "dataset=" + dataset + "} " + super.toString(); + return "ConsensusReadResponse{" + "dataset=" + dataset + ", exception=" + exception + "}"; } public static ConsensusReadResponse.Builder newBuilder() { diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusResponse.java b/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusResponse.java index 355b0d035a49..2a14ada29db3 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusResponse.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/common/response/ConsensusResponse.java @@ -22,7 +22,7 @@ import org.apache.iotdb.consensus.exception.ConsensusException; public abstract class ConsensusResponse { - private final ConsensusException exception; + protected final ConsensusException exception; public ConsensusResponse(ConsensusException exception) { this.exception = exception; diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/config/ConsensusConfig.java b/consensus/src/main/java/org/apache/iotdb/consensus/config/ConsensusConfig.java new file mode 100644 index 000000000000..be3b4bf34258 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/config/ConsensusConfig.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.config; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; + +public class ConsensusConfig { + + private final TEndPoint thisNode; + private final String storageDir; + private final RatisConfig ratisConfig; + private final MultiLeaderConfig multiLeaderConfig; + + private ConsensusConfig( + TEndPoint thisNode, + String storageDir, + RatisConfig ratisConfig, + MultiLeaderConfig multiLeaderConfig) { + this.thisNode = thisNode; + this.storageDir = storageDir; + this.ratisConfig = ratisConfig; + this.multiLeaderConfig = multiLeaderConfig; + } + + public TEndPoint getThisNode() { + return thisNode; + } + + public String getStorageDir() { + return storageDir; + } + + public RatisConfig getRatisConfig() { + return ratisConfig; + } + + public MultiLeaderConfig getMultiLeaderConfig() { + return multiLeaderConfig; + } + + public static ConsensusConfig.Builder newBuilder() { + return new ConsensusConfig.Builder(); + } + + public static class Builder { + + private TEndPoint thisNode; + private String storageDir; + private RatisConfig ratisConfig; + private MultiLeaderConfig multiLeaderConfig; + + public ConsensusConfig build() { + return new ConsensusConfig( + thisNode, + storageDir, + ratisConfig != null ? ratisConfig : RatisConfig.newBuilder().build(), + multiLeaderConfig != null ? multiLeaderConfig : MultiLeaderConfig.newBuilder().build()); + } + + public Builder setThisNode(TEndPoint thisNode) { + this.thisNode = thisNode; + return this; + } + + public Builder setStorageDir(String storageDir) { + this.storageDir = storageDir; + return this; + } + + public Builder setRatisConfig(RatisConfig ratisConfig) { + this.ratisConfig = ratisConfig; + return this; + } + + public Builder setMultiLeaderConfig(MultiLeaderConfig multiLeaderConfig) { + this.multiLeaderConfig = multiLeaderConfig; + return this; + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/config/MultiLeaderConfig.java b/consensus/src/main/java/org/apache/iotdb/consensus/config/MultiLeaderConfig.java new file mode 100644 index 000000000000..119bd17b0a67 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/config/MultiLeaderConfig.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.config; + +import java.util.concurrent.TimeUnit; + +public class MultiLeaderConfig { + + private final RPC rpc; + private final Replication replication; + + private MultiLeaderConfig(RPC rpc, Replication replication) { + this.rpc = rpc; + this.replication = replication; + } + + public RPC getRpc() { + return rpc; + } + + public Replication getReplication() { + return replication; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static class Builder { + + private RPC rpc; + private Replication replication; + + public MultiLeaderConfig build() { + return new MultiLeaderConfig( + rpc != null ? rpc : new RPC.Builder().build(), + replication != null ? replication : new Replication.Builder().build()); + } + + public Builder setRpc(RPC rpc) { + this.rpc = rpc; + return this; + } + + public Builder setReplication(Replication replication) { + this.replication = replication; + return this; + } + } + + public static class RPC { + private final int rpcMaxConcurrentClientNum; + private final int thriftServerAwaitTimeForStopService; + private final boolean isRpcThriftCompressionEnabled; + private final int selectorNumOfClientManager; + private final int connectionTimeoutInMs; + + private RPC( + int rpcMaxConcurrentClientNum, + int thriftServerAwaitTimeForStopService, + boolean isRpcThriftCompressionEnabled, + int selectorNumOfClientManager, + int connectionTimeoutInMs) { + this.rpcMaxConcurrentClientNum = rpcMaxConcurrentClientNum; + this.thriftServerAwaitTimeForStopService = thriftServerAwaitTimeForStopService; + this.isRpcThriftCompressionEnabled = isRpcThriftCompressionEnabled; + this.selectorNumOfClientManager = selectorNumOfClientManager; + this.connectionTimeoutInMs = connectionTimeoutInMs; + } + + public int getRpcMaxConcurrentClientNum() { + return rpcMaxConcurrentClientNum; + } + + public int getThriftServerAwaitTimeForStopService() { + return thriftServerAwaitTimeForStopService; + } + + public boolean isRpcThriftCompressionEnabled() { + return isRpcThriftCompressionEnabled; + } + + public int getSelectorNumOfClientManager() { + return selectorNumOfClientManager; + } + + public int getConnectionTimeoutInMs() { + return connectionTimeoutInMs; + } + + public static RPC.Builder newBuilder() { + return new RPC.Builder(); + } + + public static class Builder { + private int rpcMaxConcurrentClientNum = 65535; + private int thriftServerAwaitTimeForStopService = 60; + private boolean isRpcThriftCompressionEnabled = false; + private int selectorNumOfClientManager = 1; + private int connectionTimeoutInMs = (int) TimeUnit.SECONDS.toMillis(20); + + public RPC.Builder setRpcMaxConcurrentClientNum(int rpcMaxConcurrentClientNum) { + this.rpcMaxConcurrentClientNum = rpcMaxConcurrentClientNum; + return this; + } + + public RPC.Builder setThriftServerAwaitTimeForStopService( + int thriftServerAwaitTimeForStopService) { + this.thriftServerAwaitTimeForStopService = thriftServerAwaitTimeForStopService; + return this; + } + + public RPC.Builder setRpcThriftCompressionEnabled(boolean rpcThriftCompressionEnabled) { + isRpcThriftCompressionEnabled = rpcThriftCompressionEnabled; + return this; + } + + public RPC.Builder setSelectorNumOfClientManager(int selectorNumOfClientManager) { + this.selectorNumOfClientManager = selectorNumOfClientManager; + return this; + } + + public RPC.Builder setConnectionTimeoutInMs(int connectionTimeoutInMs) { + this.connectionTimeoutInMs = connectionTimeoutInMs; + return this; + } + + public RPC build() { + return new RPC( + rpcMaxConcurrentClientNum, + thriftServerAwaitTimeForStopService, + isRpcThriftCompressionEnabled, + selectorNumOfClientManager, + connectionTimeoutInMs); + } + } + } + + public static class Replication { + private final int maxPendingRequestNumPerNode; + private final int maxRequestPerBatch; + private final int maxPendingBatch; + private final int maxWaitingTimeForAccumulatingBatchInMs; + private final long basicRetryWaitTimeMs; + private final long maxRetryWaitTimeMs; + + private Replication( + int maxPendingRequestNumPerNode, + int maxRequestPerBatch, + int maxPendingBatch, + int maxWaitingTimeForAccumulatingBatchInMs, + long basicRetryWaitTimeMs, + long maxRetryWaitTimeMs) { + this.maxPendingRequestNumPerNode = maxPendingRequestNumPerNode; + this.maxRequestPerBatch = maxRequestPerBatch; + this.maxPendingBatch = maxPendingBatch; + this.maxWaitingTimeForAccumulatingBatchInMs = maxWaitingTimeForAccumulatingBatchInMs; + this.basicRetryWaitTimeMs = basicRetryWaitTimeMs; + this.maxRetryWaitTimeMs = maxRetryWaitTimeMs; + } + + public int getMaxPendingRequestNumPerNode() { + return maxPendingRequestNumPerNode; + } + + public int getMaxRequestPerBatch() { + return maxRequestPerBatch; + } + + public int getMaxPendingBatch() { + return maxPendingBatch; + } + + public int getMaxWaitingTimeForAccumulatingBatchInMs() { + return maxWaitingTimeForAccumulatingBatchInMs; + } + + public long getBasicRetryWaitTimeMs() { + return basicRetryWaitTimeMs; + } + + public long getMaxRetryWaitTimeMs() { + return maxRetryWaitTimeMs; + } + + public static Replication.Builder newBuilder() { + return new Replication.Builder(); + } + + public static class Builder { + private int maxPendingRequestNumPerNode = 200; + private int maxRequestPerBatch = 40; + private int maxPendingBatch = 6; + private int maxWaitingTimeForAccumulatingBatchInMs = 500; + private long basicRetryWaitTimeMs = TimeUnit.MILLISECONDS.toMillis(100); + private long maxRetryWaitTimeMs = TimeUnit.SECONDS.toMillis(20); + + public Replication.Builder setMaxPendingRequestNumPerNode(int maxPendingRequestNumPerNode) { + this.maxPendingRequestNumPerNode = maxPendingRequestNumPerNode; + return this; + } + + public Replication.Builder setMaxRequestPerBatch(int maxRequestPerBatch) { + this.maxRequestPerBatch = maxRequestPerBatch; + return this; + } + + public Replication.Builder setMaxPendingBatch(int maxPendingBatch) { + this.maxPendingBatch = maxPendingBatch; + return this; + } + + public Replication.Builder setMaxWaitingTimeForAccumulatingBatchInMs( + int maxWaitingTimeForAccumulatingBatchInMs) { + this.maxWaitingTimeForAccumulatingBatchInMs = maxWaitingTimeForAccumulatingBatchInMs; + return this; + } + + public Replication.Builder setBasicRetryWaitTimeMs(long basicRetryWaitTimeMs) { + this.basicRetryWaitTimeMs = basicRetryWaitTimeMs; + return this; + } + + public Replication.Builder setMaxRetryWaitTimeMs(long maxRetryWaitTimeMs) { + this.maxRetryWaitTimeMs = maxRetryWaitTimeMs; + return this; + } + + public Replication build() { + return new Replication( + maxPendingRequestNumPerNode, + maxRequestPerBatch, + maxPendingBatch, + maxWaitingTimeForAccumulatingBatchInMs, + basicRetryWaitTimeMs, + maxRetryWaitTimeMs); + } + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java b/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java new file mode 100644 index 000000000000..5a1574177c1d --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/config/RatisConfig.java @@ -0,0 +1,695 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.config; + +import org.apache.ratis.grpc.GrpcConfigKeys.Server; +import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.util.SizeInBytes; +import org.apache.ratis.util.TimeDuration; + +import java.util.concurrent.TimeUnit; + +public class RatisConfig { + + private final Rpc rpc; + private final LeaderElection leaderElection; + private final Snapshot snapshot; + private final ThreadPool threadPool; + private final Log log; + private final Grpc grpc; + + private RatisConfig( + Rpc rpc, + LeaderElection leaderElection, + Snapshot snapshot, + ThreadPool threadPool, + Log log, + Grpc grpc) { + this.rpc = rpc; + this.leaderElection = leaderElection; + this.snapshot = snapshot; + this.threadPool = threadPool; + this.log = log; + this.grpc = grpc; + } + + public Rpc getRpc() { + return rpc; + } + + public LeaderElection getLeaderElection() { + return leaderElection; + } + + public Snapshot getSnapshot() { + return snapshot; + } + + public ThreadPool getThreadPool() { + return threadPool; + } + + public Log getLog() { + return log; + } + + public Grpc getGrpc() { + return grpc; + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static class Builder { + private Rpc rpc; + private LeaderElection leaderElection; + private Snapshot snapshot; + private ThreadPool threadPool; + private Log log; + private Grpc grpc; + + public RatisConfig build() { + return new RatisConfig( + rpc != null ? rpc : Rpc.newBuilder().build(), + leaderElection != null ? leaderElection : LeaderElection.newBuilder().build(), + snapshot != null ? snapshot : Snapshot.newBuilder().build(), + threadPool != null ? threadPool : ThreadPool.newBuilder().build(), + log != null ? log : Log.newBuilder().build(), + grpc != null ? grpc : Grpc.newBuilder().build()); + } + + public Builder setRpc(Rpc rpc) { + this.rpc = rpc; + return this; + } + + public Builder setLeaderElection(LeaderElection leaderElection) { + this.leaderElection = leaderElection; + return this; + } + + public Builder setSnapshot(Snapshot snapshot) { + this.snapshot = snapshot; + return this; + } + + public Builder setThreadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + return this; + } + + public Builder setLog(Log log) { + this.log = log; + return this; + } + + public Builder setGrpc(Grpc grpc) { + this.grpc = grpc; + return this; + } + } + + /** server rpc timeout related */ + public static class Rpc { + private final TimeDuration timeoutMin; + private final TimeDuration timeoutMax; + private final TimeDuration requestTimeout; + private final TimeDuration sleepTime; + private final TimeDuration slownessTimeout; + + private Rpc( + TimeDuration timeoutMin, + TimeDuration timeoutMax, + TimeDuration requestTimeout, + TimeDuration sleepTime, + TimeDuration slownessTimeout) { + this.timeoutMin = timeoutMin; + this.timeoutMax = timeoutMax; + this.requestTimeout = requestTimeout; + this.sleepTime = sleepTime; + this.slownessTimeout = slownessTimeout; + } + + public TimeDuration getTimeoutMin() { + return timeoutMin; + } + + public TimeDuration getTimeoutMax() { + return timeoutMax; + } + + public TimeDuration getRequestTimeout() { + return requestTimeout; + } + + public TimeDuration getSleepTime() { + return sleepTime; + } + + public TimeDuration getSlownessTimeout() { + return slownessTimeout; + } + + public static Rpc.Builder newBuilder() { + return new Rpc.Builder(); + } + + public static class Builder { + private TimeDuration timeoutMin = TimeDuration.valueOf(2, TimeUnit.SECONDS); + private TimeDuration timeoutMax = TimeDuration.valueOf(4, TimeUnit.SECONDS); + private TimeDuration requestTimeout = TimeDuration.valueOf(20, TimeUnit.SECONDS); + private TimeDuration sleepTime = TimeDuration.valueOf(1, TimeUnit.SECONDS); + private TimeDuration slownessTimeout = TimeDuration.valueOf(10, TimeUnit.MINUTES); + + public Rpc build() { + return new Rpc(timeoutMin, timeoutMax, requestTimeout, sleepTime, slownessTimeout); + } + + public Rpc.Builder setTimeoutMin(TimeDuration timeoutMin) { + this.timeoutMin = timeoutMin; + return this; + } + + public Rpc.Builder setTimeoutMax(TimeDuration timeoutMax) { + this.timeoutMax = timeoutMax; + return this; + } + + public Rpc.Builder setRequestTimeout(TimeDuration requestTimeout) { + this.requestTimeout = requestTimeout; + return this; + } + + public Rpc.Builder setSleepTime(TimeDuration sleepTime) { + this.sleepTime = sleepTime; + return this; + } + + public Rpc.Builder setSlownessTimeout(TimeDuration slownessTimeout) { + this.slownessTimeout = slownessTimeout; + return this; + } + } + } + + public static class LeaderElection { + private final TimeDuration leaderStepDownWaitTimeKey; + private final boolean preVote; + + private LeaderElection(TimeDuration leaderStepDownWaitTimeKey, boolean preVote) { + this.leaderStepDownWaitTimeKey = leaderStepDownWaitTimeKey; + this.preVote = preVote; + } + + public TimeDuration getLeaderStepDownWaitTimeKey() { + return leaderStepDownWaitTimeKey; + } + + public boolean isPreVote() { + return preVote; + } + + public static LeaderElection.Builder newBuilder() { + return new LeaderElection.Builder(); + } + + public static class Builder { + private TimeDuration leaderStepDownWaitTimeKey = TimeDuration.valueOf(30, TimeUnit.SECONDS); + private boolean preVote = RaftServerConfigKeys.LeaderElection.PRE_VOTE_DEFAULT; + + public LeaderElection build() { + return new LeaderElection(leaderStepDownWaitTimeKey, preVote); + } + + public LeaderElection.Builder setLeaderStepDownWaitTimeKey( + TimeDuration leaderStepDownWaitTimeKey) { + this.leaderStepDownWaitTimeKey = leaderStepDownWaitTimeKey; + return this; + } + + public LeaderElection.Builder setPreVote(boolean preVote) { + this.preVote = preVote; + return this; + } + } + } + + public static class Snapshot { + private final boolean autoTriggerEnabled; + private final long creationGap; + private final long autoTriggerThreshold; + private final int retentionFileNum; + + private Snapshot( + boolean autoTriggerEnabled, + long creationGap, + long autoTriggerThreshold, + int retentionFileNum) { + this.autoTriggerEnabled = autoTriggerEnabled; + this.creationGap = creationGap; + this.autoTriggerThreshold = autoTriggerThreshold; + this.retentionFileNum = retentionFileNum; + } + + public boolean isAutoTriggerEnabled() { + return autoTriggerEnabled; + } + + public long getCreationGap() { + return creationGap; + } + + public long getAutoTriggerThreshold() { + return autoTriggerThreshold; + } + + public int getRetentionFileNum() { + return retentionFileNum; + } + + public static Snapshot.Builder newBuilder() { + return new Snapshot.Builder(); + } + + public static class Builder { + private boolean autoTriggerEnabled = true; + private long creationGap = RaftServerConfigKeys.Snapshot.CREATION_GAP_DEFAULT; + private long autoTriggerThreshold = + RaftServerConfigKeys.Snapshot.AUTO_TRIGGER_THRESHOLD_DEFAULT; + private int retentionFileNum = RaftServerConfigKeys.Snapshot.RETENTION_FILE_NUM_DEFAULT; + + public Snapshot build() { + return new Snapshot( + autoTriggerEnabled, creationGap, autoTriggerThreshold, retentionFileNum); + } + + public Snapshot.Builder setAutoTriggerEnabled(boolean autoTriggerEnabled) { + this.autoTriggerEnabled = autoTriggerEnabled; + return this; + } + + public Snapshot.Builder setCreationGap(long creationGap) { + this.creationGap = creationGap; + return this; + } + + public Snapshot.Builder setAutoTriggerThreshold(long autoTriggerThreshold) { + this.autoTriggerThreshold = autoTriggerThreshold; + return this; + } + + public Snapshot.Builder setRetentionFileNum(int retentionFileNum) { + this.retentionFileNum = retentionFileNum; + return this; + } + } + } + + public static class ThreadPool { + private final boolean proxyCached; + private final int proxySize; + private final boolean serverCached; + private final int serverSize; + private final boolean clientCached; + private final int clientSize; + + private ThreadPool( + boolean proxyCached, + int proxySize, + boolean serverCached, + int serverSize, + boolean clientCached, + int clientSize) { + this.proxyCached = proxyCached; + this.proxySize = proxySize; + this.serverCached = serverCached; + this.serverSize = serverSize; + this.clientCached = clientCached; + this.clientSize = clientSize; + } + + public boolean isProxyCached() { + return proxyCached; + } + + public int getProxySize() { + return proxySize; + } + + public boolean isServerCached() { + return serverCached; + } + + public int getServerSize() { + return serverSize; + } + + public boolean isClientCached() { + return clientCached; + } + + public int getClientSize() { + return clientSize; + } + + public static ThreadPool.Builder newBuilder() { + return new ThreadPool.Builder(); + } + + public static class Builder { + private boolean proxyCached = RaftServerConfigKeys.ThreadPool.PROXY_CACHED_DEFAULT; + private int proxySize = RaftServerConfigKeys.ThreadPool.PROXY_SIZE_DEFAULT; + private boolean serverCached = RaftServerConfigKeys.ThreadPool.SERVER_CACHED_DEFAULT; + private int serverSize = RaftServerConfigKeys.ThreadPool.SERVER_SIZE_DEFAULT; + private boolean clientCached = RaftServerConfigKeys.ThreadPool.CLIENT_CACHED_DEFAULT; + private int clientSize = RaftServerConfigKeys.ThreadPool.CLIENT_SIZE_DEFAULT; + + public ThreadPool build() { + return new ThreadPool( + proxyCached, proxySize, serverCached, serverSize, clientCached, clientSize); + } + + public ThreadPool.Builder setProxyCached(boolean proxyCached) { + this.proxyCached = proxyCached; + return this; + } + + public ThreadPool.Builder setProxySize(int proxySize) { + this.proxySize = proxySize; + return this; + } + + public ThreadPool.Builder setServerCached(boolean serverCached) { + this.serverCached = serverCached; + return this; + } + + public ThreadPool.Builder setServerSize(int serverSize) { + this.serverSize = serverSize; + return this; + } + + public ThreadPool.Builder setClientCached(boolean clientCached) { + this.clientCached = clientCached; + return this; + } + + public ThreadPool.Builder setClientSize(int clientSize) { + this.clientSize = clientSize; + return this; + } + } + } + + public static class Log { + + private final boolean useMemory; + private final int queueElementLimit; + private final SizeInBytes queueByteLimit; + private final int purgeGap; + private final boolean purgeUptoSnapshotIndex; + private final SizeInBytes segmentSizeMax; + private final int segmentCacheNumMax; + private final SizeInBytes segmentCacheSizeMax; + private final SizeInBytes preallocatedSize; + private final SizeInBytes writeBufferSize; + private final int forceSyncNum; + private final boolean unsafeFlushEnabled; + + private Log( + boolean useMemory, + int queueElementLimit, + SizeInBytes queueByteLimit, + int purgeGap, + boolean purgeUptoSnapshotIndex, + SizeInBytes segmentSizeMax, + int segmentCacheNumMax, + SizeInBytes segmentCacheSizeMax, + SizeInBytes preallocatedSize, + SizeInBytes writeBufferSize, + int forceSyncNum, + boolean unsafeFlushEnabled) { + this.useMemory = useMemory; + this.queueElementLimit = queueElementLimit; + this.queueByteLimit = queueByteLimit; + this.purgeGap = purgeGap; + this.purgeUptoSnapshotIndex = purgeUptoSnapshotIndex; + this.segmentSizeMax = segmentSizeMax; + this.segmentCacheNumMax = segmentCacheNumMax; + this.segmentCacheSizeMax = segmentCacheSizeMax; + this.preallocatedSize = preallocatedSize; + this.writeBufferSize = writeBufferSize; + this.forceSyncNum = forceSyncNum; + this.unsafeFlushEnabled = unsafeFlushEnabled; + } + + public boolean isUseMemory() { + return useMemory; + } + + public int getQueueElementLimit() { + return queueElementLimit; + } + + public SizeInBytes getQueueByteLimit() { + return queueByteLimit; + } + + public int getPurgeGap() { + return purgeGap; + } + + public boolean isPurgeUptoSnapshotIndex() { + return purgeUptoSnapshotIndex; + } + + public SizeInBytes getSegmentSizeMax() { + return segmentSizeMax; + } + + public int getSegmentCacheNumMax() { + return segmentCacheNumMax; + } + + public SizeInBytes getSegmentCacheSizeMax() { + return segmentCacheSizeMax; + } + + public SizeInBytes getPreallocatedSize() { + return preallocatedSize; + } + + public SizeInBytes getWriteBufferSize() { + return writeBufferSize; + } + + public int getForceSyncNum() { + return forceSyncNum; + } + + public boolean isUnsafeFlushEnabled() { + return unsafeFlushEnabled; + } + + public static Log.Builder newBuilder() { + return new Log.Builder(); + } + + public static class Builder { + private boolean useMemory = false; + private int queueElementLimit = 4096; + private SizeInBytes queueByteLimit = SizeInBytes.valueOf("64MB"); + private int purgeGap = 1024; + private boolean purgeUptoSnapshotIndex = false; + private SizeInBytes segmentSizeMax = SizeInBytes.valueOf("24MB"); + private int segmentCacheNumMax = 2; + private SizeInBytes segmentCacheSizeMax = SizeInBytes.valueOf("200MB"); + private SizeInBytes preallocatedSize = SizeInBytes.valueOf("4MB"); + private SizeInBytes writeBufferSize = SizeInBytes.valueOf("64KB"); + private int forceSyncNum = 128; + private boolean unsafeFlushEnabled = false; + + public Log build() { + return new Log( + useMemory, + queueElementLimit, + queueByteLimit, + purgeGap, + purgeUptoSnapshotIndex, + segmentSizeMax, + segmentCacheNumMax, + segmentCacheSizeMax, + preallocatedSize, + writeBufferSize, + forceSyncNum, + unsafeFlushEnabled); + } + + public Log.Builder setUseMemory(boolean useMemory) { + this.useMemory = useMemory; + return this; + } + + public Log.Builder setQueueElementLimit(int queueElementLimit) { + this.queueElementLimit = queueElementLimit; + return this; + } + + public Log.Builder setQueueByteLimit(SizeInBytes queueByteLimit) { + this.queueByteLimit = queueByteLimit; + return this; + } + + public Log.Builder setPurgeGap(int purgeGap) { + this.purgeGap = purgeGap; + return this; + } + + public Log.Builder setPurgeUptoSnapshotIndex(boolean purgeUptoSnapshotIndex) { + this.purgeUptoSnapshotIndex = purgeUptoSnapshotIndex; + return this; + } + + public Log.Builder setSegmentSizeMax(SizeInBytes segmentSizeMax) { + this.segmentSizeMax = segmentSizeMax; + return this; + } + + public Log.Builder setSegmentCacheNumMax(int segmentCacheNumMax) { + this.segmentCacheNumMax = segmentCacheNumMax; + return this; + } + + public Log.Builder setSegmentCacheSizeMax(SizeInBytes segmentCacheSizeMax) { + this.segmentCacheSizeMax = segmentCacheSizeMax; + return this; + } + + public Log.Builder setPreallocatedSize(SizeInBytes preallocatedSize) { + this.preallocatedSize = preallocatedSize; + return this; + } + + public Log.Builder setWriteBufferSize(SizeInBytes writeBufferSize) { + this.writeBufferSize = writeBufferSize; + return this; + } + + public Log.Builder setForceSyncNum(int forceSyncNum) { + this.forceSyncNum = forceSyncNum; + return this; + } + + public Log.Builder setUnsafeFlushEnabled(boolean unsafeFlushEnabled) { + this.unsafeFlushEnabled = unsafeFlushEnabled; + return this; + } + } + } + + public static class Grpc { + private final SizeInBytes messageSizeMax; + private final SizeInBytes flowControlWindow; + private final boolean asyncRequestThreadPoolCached; + private final int asyncRequestThreadPoolSize; + private final int leaderOutstandingAppendsMax; + + private Grpc( + SizeInBytes messageSizeMax, + SizeInBytes flowControlWindow, + boolean asyncRequestThreadPoolCached, + int asyncRequestThreadPoolSize, + int leaderOutstandingAppendsMax) { + this.messageSizeMax = messageSizeMax; + this.flowControlWindow = flowControlWindow; + this.asyncRequestThreadPoolCached = asyncRequestThreadPoolCached; + this.asyncRequestThreadPoolSize = asyncRequestThreadPoolSize; + this.leaderOutstandingAppendsMax = leaderOutstandingAppendsMax; + } + + public SizeInBytes getMessageSizeMax() { + return messageSizeMax; + } + + public SizeInBytes getFlowControlWindow() { + return flowControlWindow; + } + + public boolean isAsyncRequestThreadPoolCached() { + return asyncRequestThreadPoolCached; + } + + public int getAsyncRequestThreadPoolSize() { + return asyncRequestThreadPoolSize; + } + + public int getLeaderOutstandingAppendsMax() { + return leaderOutstandingAppendsMax; + } + + public static Grpc.Builder newBuilder() { + return new Grpc.Builder(); + } + + public static class Builder { + private SizeInBytes messageSizeMax = SizeInBytes.valueOf("512MB"); + private SizeInBytes flowControlWindow = SizeInBytes.valueOf("4MB"); + private boolean asyncRequestThreadPoolCached = + Server.ASYNC_REQUEST_THREAD_POOL_CACHED_DEFAULT; + private int asyncRequestThreadPoolSize = Server.ASYNC_REQUEST_THREAD_POOL_SIZE_DEFAULT; + private int leaderOutstandingAppendsMax = Server.LEADER_OUTSTANDING_APPENDS_MAX_DEFAULT; + + public Grpc build() { + return new Grpc( + messageSizeMax, + flowControlWindow, + asyncRequestThreadPoolCached, + asyncRequestThreadPoolSize, + leaderOutstandingAppendsMax); + } + + public Grpc.Builder setMessageSizeMax(SizeInBytes messageSizeMax) { + this.messageSizeMax = messageSizeMax; + return this; + } + + public Grpc.Builder setFlowControlWindow(SizeInBytes flowControlWindow) { + this.flowControlWindow = flowControlWindow; + return this; + } + + public Grpc.Builder setAsyncRequestThreadPoolCached(boolean asyncRequestThreadPoolCached) { + this.asyncRequestThreadPoolCached = asyncRequestThreadPoolCached; + return this; + } + + public Grpc.Builder setAsyncRequestThreadPoolSize(int asyncRequestThreadPoolSize) { + this.asyncRequestThreadPoolSize = asyncRequestThreadPoolSize; + return this; + } + + public Grpc.Builder setLeaderOutstandingAppendsMax(int leaderOutstandingAppendsMax) { + this.leaderOutstandingAppendsMax = leaderOutstandingAppendsMax; + return this; + } + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java b/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java index 7591345acc0c..969ed3e38e0f 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerEndpointException.java @@ -20,13 +20,16 @@ package org.apache.iotdb.consensus.exception; import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.consensus.common.Peer; + +import java.util.List; public class IllegalPeerEndpointException extends ConsensusException { - public IllegalPeerEndpointException(TEndPoint currentNode, TEndPoint newNode) { + public IllegalPeerEndpointException(TEndPoint currentNode, List peers) { super( String.format( - "Illegal creation for node %s in node %s in StandAloneConsensus Mode", - newNode, currentNode)); + "Illegal addConsensusGroup because currentNode %s is not in consensusGroup %s", + currentNode, peers)); } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java b/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java index d2026a551516..c08ef050576b 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/exception/IllegalPeerNumException.java @@ -22,8 +22,6 @@ public class IllegalPeerNumException extends ConsensusException { public IllegalPeerNumException(int size) { - super( - String.format( - "Illegal Peer num %d, only support one peer in StandAloneConsensus Mode", size)); + super(String.format("Illegal peer num %d when adding consensus group", size)); } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensus.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensus.java new file mode 100644 index 000000000000..eecfa473c607 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensus.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.exception.StartupException; +import org.apache.iotdb.commons.service.RegisterManager; +import org.apache.iotdb.commons.utils.FileUtils; +import org.apache.iotdb.consensus.IConsensus; +import org.apache.iotdb.consensus.IStateMachine; +import org.apache.iotdb.consensus.IStateMachine.Registry; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.request.IConsensusRequest; +import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; +import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; +import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; +import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; +import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; +import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException; +import org.apache.iotdb.consensus.exception.IllegalPeerNumException; +import org.apache.iotdb.consensus.multileader.client.AsyncMultiLeaderServiceClient; +import org.apache.iotdb.consensus.multileader.client.MultiLeaderConsensusClientPool.AsyncMultiLeaderServiceClientPoolFactory; +import org.apache.iotdb.consensus.multileader.service.MultiLeaderRPCService; +import org.apache.iotdb.consensus.multileader.service.MultiLeaderRPCServiceProcessor; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; + +public class MultiLeaderConsensus implements IConsensus { + + private final Logger logger = LoggerFactory.getLogger(MultiLeaderConsensus.class); + + private final TEndPoint thisNode; + private final File storageDir; + private final IStateMachine.Registry registry; + private final Map stateMachineMap = + new ConcurrentHashMap<>(); + private final MultiLeaderRPCService service; + private final RegisterManager registerManager = new RegisterManager(); + private final MultiLeaderConfig config; + private final IClientManager clientManager; + + public MultiLeaderConsensus(ConsensusConfig config, Registry registry) { + this.thisNode = config.getThisNode(); + this.storageDir = new File(config.getStorageDir()); + this.config = config.getMultiLeaderConfig(); + this.registry = registry; + this.service = new MultiLeaderRPCService(thisNode, config.getMultiLeaderConfig()); + this.clientManager = + new IClientManager.Factory() + .createClientManager( + new AsyncMultiLeaderServiceClientPoolFactory(config.getMultiLeaderConfig())); + } + + @Override + public void start() throws IOException { + initAndRecover(); + service.initSyncedServiceImpl(new MultiLeaderRPCServiceProcessor(this)); + try { + registerManager.register(service); + } catch (StartupException e) { + throw new IOException(e); + } + } + + private void initAndRecover() throws IOException { + if (!storageDir.exists()) { + if (!storageDir.mkdirs()) { + logger.warn("Unable to create consensus dir at {}", storageDir); + } + } else { + try (DirectoryStream stream = Files.newDirectoryStream(storageDir.toPath())) { + for (Path path : stream) { + String[] items = path.getFileName().toString().split("_"); + ConsensusGroupId consensusGroupId = + ConsensusGroupId.Factory.create( + Integer.parseInt(items[0]), Integer.parseInt(items[1])); + MultiLeaderServerImpl consensus = + new MultiLeaderServerImpl( + path.toString(), + new Peer(consensusGroupId, thisNode), + new ArrayList<>(), + registry.apply(consensusGroupId), + clientManager, + config); + stateMachineMap.put(consensusGroupId, consensus); + consensus.start(); + } + } + } + } + + @Override + public void stop() { + clientManager.close(); + stateMachineMap.values().parallelStream().forEach(MultiLeaderServerImpl::stop); + registerManager.deregisterAll(); + } + + @Override + public ConsensusWriteResponse write(ConsensusGroupId groupId, IConsensusRequest request) { + MultiLeaderServerImpl impl = stateMachineMap.get(groupId); + if (impl == null) { + return ConsensusWriteResponse.newBuilder() + .setException(new ConsensusGroupNotExistException(groupId)) + .build(); + } + return ConsensusWriteResponse.newBuilder().setStatus(impl.write(request)).build(); + } + + @Override + public ConsensusReadResponse read(ConsensusGroupId groupId, IConsensusRequest request) { + MultiLeaderServerImpl impl = stateMachineMap.get(groupId); + if (impl == null) { + return ConsensusReadResponse.newBuilder() + .setException(new ConsensusGroupNotExistException(groupId)) + .build(); + } + return ConsensusReadResponse.newBuilder().setDataSet(impl.read(request)).build(); + } + + @Override + public ConsensusGenericResponse addConsensusGroup(ConsensusGroupId groupId, List peers) { + int consensusGroupSize = peers.size(); + if (consensusGroupSize == 0) { + return ConsensusGenericResponse.newBuilder() + .setException(new IllegalPeerNumException(consensusGroupSize)) + .build(); + } + if (!peers.contains(new Peer(groupId, thisNode))) { + return ConsensusGenericResponse.newBuilder() + .setException(new IllegalPeerEndpointException(thisNode, peers)) + .build(); + } + AtomicBoolean exist = new AtomicBoolean(true); + stateMachineMap.computeIfAbsent( + groupId, + k -> { + exist.set(false); + String path = buildPeerDir(groupId); + File file = new File(path); + if (!file.mkdirs()) { + logger.warn("Unable to create consensus dir for group {} at {}", groupId, path); + } + MultiLeaderServerImpl impl = + new MultiLeaderServerImpl( + path, + new Peer(groupId, thisNode), + peers, + registry.apply(groupId), + clientManager, + config); + impl.start(); + return impl; + }); + if (exist.get()) { + return ConsensusGenericResponse.newBuilder() + .setException(new ConsensusGroupAlreadyExistException(groupId)) + .build(); + } + return ConsensusGenericResponse.newBuilder().setSuccess(true).build(); + } + + @Override + public ConsensusGenericResponse removeConsensusGroup(ConsensusGroupId groupId) { + AtomicBoolean exist = new AtomicBoolean(false); + stateMachineMap.computeIfPresent( + groupId, + (k, v) -> { + exist.set(true); + v.stop(); + FileUtils.deleteDirectory(new File(buildPeerDir(groupId))); + return null; + }); + + if (!exist.get()) { + return ConsensusGenericResponse.newBuilder() + .setException(new ConsensusGroupNotExistException(groupId)) + .build(); + } + return ConsensusGenericResponse.newBuilder().setSuccess(true).build(); + } + + @Override + public ConsensusGenericResponse addPeer(ConsensusGroupId groupId, Peer peer) { + return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + } + + @Override + public ConsensusGenericResponse removePeer(ConsensusGroupId groupId, Peer peer) { + return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + } + + @Override + public ConsensusGenericResponse changePeer(ConsensusGroupId groupId, List newPeers) { + return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + } + + @Override + public ConsensusGenericResponse transferLeader(ConsensusGroupId groupId, Peer newLeader) { + return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + } + + @Override + public ConsensusGenericResponse triggerSnapshot(ConsensusGroupId groupId) { + return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + } + + @Override + public boolean isLeader(ConsensusGroupId groupId) { + return true; + } + + @Override + public Peer getLeader(ConsensusGroupId groupId) { + if (!stateMachineMap.containsKey(groupId)) { + return null; + } + return new Peer(groupId, thisNode); + } + + @Override + public List getAllConsensusGroupIds() { + return new ArrayList<>(stateMachineMap.keySet()); + } + + public MultiLeaderServerImpl getImpl(ConsensusGroupId groupId) { + return stateMachineMap.get(groupId); + } + + private String buildPeerDir(ConsensusGroupId groupId) { + return storageDir + File.separator + groupId.getType().getValue() + "_" + groupId.getId(); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderServerImpl.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderServerImpl.java new file mode 100644 index 000000000000..272c244a1e6e --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/MultiLeaderServerImpl.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.consensus.IStateMachine; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; +import org.apache.iotdb.consensus.common.request.IConsensusRequest; +import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; +import org.apache.iotdb.consensus.multileader.client.AsyncMultiLeaderServiceClient; +import org.apache.iotdb.consensus.multileader.logdispatcher.IndexController; +import org.apache.iotdb.consensus.multileader.logdispatcher.LogDispatcher; +import org.apache.iotdb.consensus.multileader.wal.ConsensusReqReader; +import org.apache.iotdb.consensus.ratis.Utils; +import org.apache.iotdb.tsfile.utils.PublicBAOS; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; + +public class MultiLeaderServerImpl { + + private static final String CONFIGURATION_FILE_NAME = "configuration.dat"; + + private final Logger logger = LoggerFactory.getLogger(MultiLeaderServerImpl.class); + + private final Peer thisNode; + private final IStateMachine stateMachine; + private final String storageDir; + private final List configuration; + private final IndexController controller; + private final LogDispatcher logDispatcher; + private final MultiLeaderConfig config; + + public MultiLeaderServerImpl( + String storageDir, + Peer thisNode, + List configuration, + IStateMachine stateMachine, + IClientManager clientManager, + MultiLeaderConfig config) { + this.storageDir = storageDir; + this.thisNode = thisNode; + this.stateMachine = stateMachine; + this.controller = + new IndexController(storageDir, Utils.fromTEndPointToString(thisNode.getEndpoint()), true); + this.configuration = configuration; + if (configuration.isEmpty()) { + recoverConfiguration(); + } else { + persistConfiguration(); + } + this.config = config; + this.logDispatcher = new LogDispatcher(this, clientManager); + } + + public IStateMachine getStateMachine() { + return stateMachine; + } + + public void start() { + stateMachine.start(); + logDispatcher.start(); + } + + public void stop() { + logDispatcher.stop(); + stateMachine.stop(); + } + + /** + * records the index of the log and writes locally, and then asynchronous replication is performed + */ + public TSStatus write(IConsensusRequest request) { + synchronized (stateMachine) { + IndexedConsensusRequest indexedConsensusRequest = + buildIndexedConsensusRequestForLocalRequest(request); + TSStatus result = stateMachine.write(indexedConsensusRequest); + logDispatcher.offer(indexedConsensusRequest); + return result; + } + } + + public DataSet read(IConsensusRequest request) { + return stateMachine.read(request); + } + + public boolean takeSnapshot(File snapshotDir) { + return stateMachine.takeSnapshot(snapshotDir); + } + + public void loadSnapshot(File latestSnapshotRootDir) { + stateMachine.loadSnapshot(latestSnapshotRootDir); + } + + public void persistConfiguration() { + try (PublicBAOS publicBAOS = new PublicBAOS(); + DataOutputStream outputStream = new DataOutputStream(publicBAOS)) { + outputStream.writeInt(configuration.size()); + for (Peer peer : configuration) { + peer.serialize(outputStream); + } + Files.write( + Paths.get(new File(storageDir, CONFIGURATION_FILE_NAME).getAbsolutePath()), + publicBAOS.getBuf()); + } catch (IOException e) { + logger.error("Unexpected error occurs when persisting configuration", e); + } + } + + public void recoverConfiguration() { + ByteBuffer buffer; + try { + buffer = + ByteBuffer.wrap( + Files.readAllBytes( + Paths.get(new File(storageDir, CONFIGURATION_FILE_NAME).getAbsolutePath()))); + int size = buffer.getInt(); + for (int i = 0; i < size; i++) { + configuration.add(Peer.deserialize(buffer)); + } + } catch (IOException e) { + logger.error("Unexpected error occurs when recovering configuration", e); + } + } + + public IndexedConsensusRequest buildIndexedConsensusRequestForLocalRequest( + IConsensusRequest request) { + return new IndexedConsensusRequest( + controller.incrementAndGet(), getCurrentSafelyDeletedSearchIndex(), request); + } + + public IndexedConsensusRequest buildIndexedConsensusRequestForRemoteRequest( + ByteBufferConsensusRequest request) { + return new IndexedConsensusRequest( + ConsensusReqReader.DEFAULT_SEARCH_INDEX, getCurrentSafelyDeletedSearchIndex(), request); + } + + /** + * In the case of multiple copies, the minimum synchronization index is selected. In the case of + * single copies, the current index is selected + */ + public long getCurrentSafelyDeletedSearchIndex() { + return logDispatcher.getMinSyncIndex().orElseGet(controller::getCurrentIndex); + } + + public String getStorageDir() { + return storageDir; + } + + public Peer getThisNode() { + return thisNode; + } + + public List getConfiguration() { + return configuration; + } + + public IndexController getController() { + return controller; + } + + public MultiLeaderConfig getConfig() { + return config; + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/AsyncMultiLeaderServiceClient.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/AsyncMultiLeaderServiceClient.java new file mode 100644 index 000000000000..98e2e82b18a0 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/AsyncMultiLeaderServiceClient.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.client; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.client.AsyncBaseClientFactory; +import org.apache.iotdb.commons.client.ClientFactoryProperty; +import org.apache.iotdb.commons.client.ClientManager; +import org.apache.iotdb.consensus.multileader.thrift.MultiLeaderConsensusIService; +import org.apache.iotdb.rpc.TNonblockingSocketWrapper; + +import org.apache.commons.pool2.PooledObject; +import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.thrift.async.TAsyncClientManager; +import org.apache.thrift.protocol.TProtocolFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class AsyncMultiLeaderServiceClient extends MultiLeaderConsensusIService.AsyncClient { + + private static final Logger logger = LoggerFactory.getLogger(AsyncMultiLeaderServiceClient.class); + + private final TEndPoint endpoint; + private final ClientManager clientManager; + + public AsyncMultiLeaderServiceClient( + TProtocolFactory protocolFactory, + int connectionTimeout, + TEndPoint endpoint, + TAsyncClientManager tClientManager, + ClientManager clientManager) + throws IOException { + super( + protocolFactory, + tClientManager, + TNonblockingSocketWrapper.wrap(endpoint.getIp(), endpoint.getPort(), connectionTimeout)); + this.endpoint = endpoint; + this.clientManager = clientManager; + } + + public void close() { + ___transport.close(); + ___currentMethod = null; + } + + /** + * return self if clientManager is not null, the method doesn't need to call by user, it will + * trigger once client transport complete. + */ + private void returnSelf() { + if (clientManager != null) { + clientManager.returnClient(endpoint, this); + } + } + + /** + * This method will be automatically called by the thrift selector thread, and we'll just simulate + * the behavior in our test + */ + @Override + public void onComplete() { + super.onComplete(); + returnSelf(); + } + + /** + * This method will be automatically called by the thrift selector thread, and we'll just simulate + * the behavior in our test + */ + @Override + public void onError(Exception e) { + super.onError(e); + returnSelf(); + } + + public boolean isReady() { + try { + checkReady(); + return true; + } catch (Exception e) { + logger.info("Unexpected exception occurs in {} :", this, e); + return false; + } + } + + @Override + public String toString() { + return String.format("AsyncConfigNodeIServiceClient{%s}", endpoint); + } + + public static class Factory + extends AsyncBaseClientFactory { + + public Factory( + ClientManager clientManager, + ClientFactoryProperty clientFactoryProperty) { + super(clientManager, clientFactoryProperty); + } + + @Override + public void destroyObject( + TEndPoint endPoint, PooledObject pooledObject) { + pooledObject.getObject().close(); + } + + @Override + public PooledObject makeObject(TEndPoint endPoint) + throws Exception { + TAsyncClientManager tManager = tManagers[clientCnt.incrementAndGet() % tManagers.length]; + tManager = tManager == null ? new TAsyncClientManager() : tManager; + return new DefaultPooledObject<>( + new AsyncMultiLeaderServiceClient( + clientFactoryProperty.getProtocolFactory(), + clientFactoryProperty.getConnectionTimeoutMs(), + endPoint, + tManager, + clientManager)); + } + + @Override + public boolean validateObject( + TEndPoint endPoint, PooledObject pooledObject) { + return pooledObject.getObject() != null && pooledObject.getObject().isReady(); + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/DispatchLogHandler.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/DispatchLogHandler.java new file mode 100644 index 000000000000..14a2b4ad583e --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/DispatchLogHandler.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.client; + +import org.apache.iotdb.consensus.multileader.logdispatcher.LogDispatcher.LogDispatcherThread; +import org.apache.iotdb.consensus.multileader.logdispatcher.PendingBatch; +import org.apache.iotdb.consensus.multileader.thrift.TSyncLogRes; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.async.AsyncMethodCallback; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.CompletableFuture; + +public class DispatchLogHandler implements AsyncMethodCallback { + + private final Logger logger = LoggerFactory.getLogger(DispatchLogHandler.class); + + private final LogDispatcherThread thread; + private final PendingBatch batch; + private int retryCount; + + public DispatchLogHandler(LogDispatcherThread thread, PendingBatch batch) { + this.thread = thread; + this.batch = batch; + } + + @Override + public void onComplete(TSyncLogRes response) { + if (response.getStatus().size() == 1 + && response.getStatus().get(0).getCode() + == TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()) { + logger.warn( + "Can not send {} to peer {} for {} times because {}", + batch, + thread.getPeer(), + ++retryCount, + response.getStatus().get(0).getMessage()); + sleepCorrespondingTimeAndRetryAsynchronous(); + } else { + thread.getSyncStatus().removeBatch(batch); + } + } + + @Override + public void onError(Exception exception) { + logger.warn( + "Can not send {} to peer for {} times {} because {}", + batch, + thread.getPeer(), + ++retryCount, + exception); + sleepCorrespondingTimeAndRetryAsynchronous(); + } + + private void sleepCorrespondingTimeAndRetryAsynchronous() { + // TODO handle forever retry + CompletableFuture.runAsync( + () -> { + try { + long defaultSleepTime = + (long) + (thread.getConfig().getReplication().getBasicRetryWaitTimeMs() + * Math.pow(2, retryCount)); + Thread.sleep( + Math.min( + defaultSleepTime, thread.getConfig().getReplication().getMaxRetryWaitTimeMs())); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Unexpected interruption during retry pending batch"); + } + if (thread.isStopped()) { + logger.debug( + "LogDispatcherThread {} has been stopped, we will ignore the retry PendingBatch {} after {} times", + thread.getPeer(), + batch, + retryCount); + } else { + thread.sendBatchAsync(batch, this); + } + }); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/MultiLeaderConsensusClientPool.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/MultiLeaderConsensusClientPool.java new file mode 100644 index 000000000000..e3687edf8f2d --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/client/MultiLeaderConsensusClientPool.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.client; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.client.ClientFactoryProperty; +import org.apache.iotdb.commons.client.ClientManager; +import org.apache.iotdb.commons.client.ClientPoolProperty; +import org.apache.iotdb.commons.client.IClientPoolFactory; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; + +import org.apache.commons.pool2.KeyedObjectPool; +import org.apache.commons.pool2.impl.GenericKeyedObjectPool; + +public class MultiLeaderConsensusClientPool { + + private MultiLeaderConsensusClientPool() {} + + public static class AsyncMultiLeaderServiceClientPoolFactory + implements IClientPoolFactory { + + private final MultiLeaderConfig config; + + public AsyncMultiLeaderServiceClientPoolFactory(MultiLeaderConfig config) { + this.config = config; + } + + @Override + public KeyedObjectPool createClientPool( + ClientManager manager) { + return new GenericKeyedObjectPool<>( + new AsyncMultiLeaderServiceClient.Factory( + manager, + new ClientFactoryProperty.Builder() + .setConnectionTimeoutMs(config.getRpc().getConnectionTimeoutInMs()) + .setRpcThriftCompressionEnabled(config.getRpc().isRpcThriftCompressionEnabled()) + .setSelectorNumOfAsyncClientManager( + config.getRpc().getSelectorNumOfClientManager()) + .build()), + new ClientPoolProperty.Builder().build().getConfig()); + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexController.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexController.java new file mode 100644 index 000000000000..6a7396076d5b --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexController.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.iotdb.commons.utils.TestOnly; + +import org.apache.commons.io.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.ThreadSafe; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; + +/** An index controller class to balance the performance degradation of frequent disk I/O. */ +@ThreadSafe +public class IndexController { + + private final Logger logger = LoggerFactory.getLogger(IndexController.class); + + public static final int FLUSH_INTERVAL = 500; + + private volatile long lastFlushedIndex; + private volatile long currentIndex; + + private final String storageDir; + private final String prefix; + // Indicates whether currentIndex needs to be incremented by FLUSH_INTERVAL interval after restart + private final boolean incrementIntervalAfterRestart; + + public IndexController(String storageDir, String prefix, boolean incrementIntervalAfterRestart) { + this.storageDir = storageDir; + this.prefix = prefix + '-'; + this.incrementIntervalAfterRestart = incrementIntervalAfterRestart; + restore(); + } + + public synchronized long incrementAndGet() { + currentIndex++; + checkPersist(); + return currentIndex; + } + + public synchronized long updateAndGet(long index) { + long newCurrentIndex = Math.max(currentIndex, index); + logger.debug( + "update index from currentIndex {} to {} for file prefix {} in {}", + currentIndex, + newCurrentIndex, + prefix, + storageDir); + currentIndex = newCurrentIndex; + checkPersist(); + return currentIndex; + } + + public long getCurrentIndex() { + return currentIndex; + } + + @TestOnly + public long getLastFlushedIndex() { + return lastFlushedIndex; + } + + private void checkPersist() { + if (currentIndex - lastFlushedIndex >= FLUSH_INTERVAL) { + persist(); + } + } + + private void persist() { + long flushIndex = currentIndex - currentIndex % FLUSH_INTERVAL; + File oldFile = new File(storageDir, prefix + lastFlushedIndex); + File newFile = new File(storageDir, prefix + flushIndex); + try { + if (oldFile.exists()) { + FileUtils.moveFile(oldFile, newFile); + } + logger.info( + "Version file updated, previous: {}, current: {}", + oldFile.getAbsolutePath(), + newFile.getAbsolutePath()); + lastFlushedIndex = flushIndex; + } catch (IOException e) { + logger.error("Error occurred when flushing next version", e); + } + } + + private void restore() { + File directory = new File(storageDir); + File[] versionFiles = directory.listFiles((dir, name) -> name.startsWith(prefix)); + File versionFile; + if (versionFiles != null && versionFiles.length > 0) { + long maxVersion = 0; + int maxVersionIndex = 0; + for (int i = 0; i < versionFiles.length; i++) { + long fileVersion = Long.parseLong(versionFiles[i].getName().split("-")[1]); + if (fileVersion > maxVersion) { + maxVersion = fileVersion; + maxVersionIndex = i; + } + } + lastFlushedIndex = maxVersion; + for (int i = 0; i < versionFiles.length; i++) { + if (i != maxVersionIndex) { + try { + Files.delete(versionFiles[i].toPath()); + } catch (IOException e) { + logger.error( + "Delete outdated version file {} failed", versionFiles[i].getAbsolutePath(), e); + } + } + } + if (incrementIntervalAfterRestart) { + // prevent overlapping in case of failure + currentIndex = lastFlushedIndex + FLUSH_INTERVAL; + persist(); + } else { + currentIndex = lastFlushedIndex; + } + } else { + versionFile = new File(directory, prefix + "0"); + try { + Files.createFile(versionFile.toPath()); + } catch (IOException e) { + logger.error("Error occurred when creating new file {}", versionFile.getAbsolutePath(), e); + } + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/LogDispatcher.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/LogDispatcher.java new file mode 100644 index 000000000000..1d651a0ba548 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/LogDispatcher.java @@ -0,0 +1,302 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.client.IClientManager; +import org.apache.iotdb.commons.concurrent.IoTDBThreadPoolFactory; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.request.IConsensusRequest; +import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; +import org.apache.iotdb.consensus.multileader.MultiLeaderServerImpl; +import org.apache.iotdb.consensus.multileader.client.AsyncMultiLeaderServiceClient; +import org.apache.iotdb.consensus.multileader.client.DispatchLogHandler; +import org.apache.iotdb.consensus.multileader.thrift.TLogBatch; +import org.apache.iotdb.consensus.multileader.thrift.TSyncLogReq; +import org.apache.iotdb.consensus.multileader.wal.ConsensusReqReader; +import org.apache.iotdb.consensus.multileader.wal.GetConsensusReqReaderPlan; +import org.apache.iotdb.consensus.ratis.Utils; + +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import java.util.OptionalLong; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** Manage all asynchronous replication threads and corresponding async clients */ +public class LogDispatcher { + + private final Logger logger = LoggerFactory.getLogger(LogDispatcher.class); + + private final MultiLeaderServerImpl impl; + private final List threads; + private final IClientManager clientManager; + private ExecutorService executorService; + + public LogDispatcher( + MultiLeaderServerImpl impl, + IClientManager clientManager) { + this.impl = impl; + this.clientManager = clientManager; + this.threads = + impl.getConfiguration().stream() + .filter(x -> !Objects.equals(x, impl.getThisNode())) + .map(x -> new LogDispatcherThread(x, impl.getConfig())) + .collect(Collectors.toList()); + if (!threads.isEmpty()) { + this.executorService = + IoTDBThreadPoolFactory.newFixedThreadPool( + threads.size(), "LogDispatcher-" + impl.getThisNode().getGroupId()); + } + } + + public void start() { + if (!threads.isEmpty()) { + threads.forEach(executorService::submit); + } + } + + public void stop() { + if (!threads.isEmpty()) { + threads.forEach(LogDispatcherThread::stop); + executorService.shutdownNow(); + int timeout = 10; + try { + if (!executorService.awaitTermination(timeout, TimeUnit.SECONDS)) { + logger.error("Unable to shutdown LogDispatcher service after {} seconds", timeout); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.error("Unexpected Interruption when closing LogDispatcher service "); + } + } + } + + public OptionalLong getMinSyncIndex() { + return threads.stream().mapToLong(LogDispatcherThread::getCurrentSyncIndex).min(); + } + + public void offer(IndexedConsensusRequest request) { + threads.forEach( + thread -> { + logger.debug( + "{}: Push a log to the queue, where the queue length is {}", + impl.getThisNode().getGroupId(), + thread.getPendingRequest().size()); + if (!thread.getPendingRequest().offer(request)) { + logger.debug( + "{}: Log queue of {} is full, ignore the log to this node", + impl.getThisNode().getGroupId(), + thread.getPeer()); + } + }); + } + + public class LogDispatcherThread implements Runnable { + + private final MultiLeaderConfig config; + private final Peer peer; + private final IndexController controller; + // A sliding window class that manages asynchronously pendingBatches + private final SyncStatus syncStatus; + // A queue used to receive asynchronous replication requests + private final BlockingQueue pendingRequest; + // A container used to cache requests, whose size changes dynamically + private final List bufferedRequest = new LinkedList<>(); + // A reader management class that gets requests from the DataRegion + private final ConsensusReqReader reader = + (ConsensusReqReader) impl.getStateMachine().read(new GetConsensusReqReaderPlan()); + private volatile boolean stopped = false; + + public LogDispatcherThread(Peer peer, MultiLeaderConfig config) { + this.peer = peer; + this.config = config; + this.pendingRequest = + new ArrayBlockingQueue<>(config.getReplication().getMaxPendingRequestNumPerNode()); + this.controller = + new IndexController( + impl.getStorageDir(), Utils.fromTEndPointToString(peer.getEndpoint()), false); + this.syncStatus = new SyncStatus(controller, config); + } + + public IndexController getController() { + return controller; + } + + public long getCurrentSyncIndex() { + return controller.getCurrentIndex(); + } + + public Peer getPeer() { + return peer; + } + + public MultiLeaderConfig getConfig() { + return config; + } + + public BlockingQueue getPendingRequest() { + return pendingRequest; + } + + public void stop() { + stopped = true; + } + + public boolean isStopped() { + return stopped; + } + + @Override + public void run() { + logger.info("{}: Dispatcher for {} starts", impl.getThisNode(), peer); + try { + PendingBatch batch; + while (!Thread.interrupted() && !stopped) { + while ((batch = getBatch()).isEmpty()) { + // we may block here if there is no requests in the queue + bufferedRequest.add(pendingRequest.take()); + // If write pressure is low, we simply sleep a little to reduce the number of RPC + if (pendingRequest.size() <= config.getReplication().getMaxRequestPerBatch()) { + Thread.sleep(config.getReplication().getMaxWaitingTimeForAccumulatingBatchInMs()); + } + } + // we may block here if the synchronization pipeline is full + syncStatus.addNextBatch(batch); + // sends batch asynchronously and migrates the retry logic into the callback handler + sendBatchAsync(batch, new DispatchLogHandler(this, batch)); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (Exception e) { + logger.error("Unexpected error in logDispatcher for peer {}", peer, e); + } + logger.info("{}: Dispatcher for {} exits", impl.getThisNode(), peer); + } + + public PendingBatch getBatch() { + PendingBatch batch; + List logBatches = new ArrayList<>(); + long startIndex = syncStatus.getNextSendingIndex(); + long maxIndex = impl.getController().getCurrentIndex() + 1; + long endIndex; + if (bufferedRequest.size() <= config.getReplication().getMaxRequestPerBatch()) { + // Use drainTo instead of poll to reduce lock overhead + pendingRequest.drainTo( + bufferedRequest, + config.getReplication().getMaxRequestPerBatch() - bufferedRequest.size()); + } + if (bufferedRequest.isEmpty()) { + // only execute this after a restart + endIndex = constructBatchFromWAL(startIndex, maxIndex, logBatches); + batch = new PendingBatch(startIndex, endIndex, logBatches); + logger.debug("{} : accumulated a {} from wal", impl.getThisNode().getGroupId(), batch); + } else { + Iterator iterator = bufferedRequest.iterator(); + IndexedConsensusRequest prev = iterator.next(); + // Prevents gap between logs. For example, some requests are not written into the queue when + // the queue is full. In this case, requests need to be loaded from the WAL + endIndex = constructBatchFromWAL(startIndex, prev.getSearchIndex(), logBatches); + if (logBatches.size() == config.getReplication().getMaxRequestPerBatch()) { + batch = new PendingBatch(startIndex, endIndex, logBatches); + logger.debug("{} : accumulated a {} from wal", impl.getThisNode().getGroupId(), batch); + return batch; + } + constructBatchIndexedFromConsensusRequest(prev, logBatches); + endIndex = prev.getSearchIndex(); + iterator.remove(); + while (iterator.hasNext() + && logBatches.size() <= config.getReplication().getMaxRequestPerBatch()) { + IndexedConsensusRequest current = iterator.next(); + // Prevents gap between logs. For example, some logs are not written into the queue when + // the queue is full. In this case, requests need to be loaded from the WAL + if (current.getSearchIndex() != prev.getSearchIndex() + 1) { + endIndex = + constructBatchFromWAL(prev.getSearchIndex(), current.getSearchIndex(), logBatches); + if (logBatches.size() == config.getReplication().getMaxRequestPerBatch()) { + batch = new PendingBatch(startIndex, endIndex, logBatches); + logger.debug( + "{} : accumulated a {} from queue and wal", + impl.getThisNode().getGroupId(), + batch); + return batch; + } + } + constructBatchIndexedFromConsensusRequest(current, logBatches); + endIndex = current.getSearchIndex(); + prev = current; + // We might not be able to remove all the elements in the bufferedRequest in the + // current function, but that's fine, we'll continue processing these elements in the + // bufferedRequest the next time we go into the function, they're never lost + iterator.remove(); + } + batch = new PendingBatch(startIndex, endIndex, logBatches); + logger.debug( + "{} : accumulated a {} from queue and wal", impl.getThisNode().getGroupId(), batch); + } + return batch; + } + + public void sendBatchAsync(PendingBatch batch, DispatchLogHandler handler) { + try { + AsyncMultiLeaderServiceClient client = clientManager.borrowClient(peer.getEndpoint()); + TSyncLogReq req = + new TSyncLogReq(peer.getGroupId().convertToTConsensusGroupId(), batch.getBatches()); + client.syncLog(req, handler); + } catch (IOException | TException e) { + logger.error("Can not sync logs to peer {} because", peer, e); + } + } + + public SyncStatus getSyncStatus() { + return syncStatus; + } + + private long constructBatchFromWAL( + long currentIndex, long maxIndex, List logBatches) { + while (currentIndex < maxIndex + && logBatches.size() < config.getReplication().getMaxRequestPerBatch()) { + // TODO iterator + IConsensusRequest data = reader.getReq(currentIndex++); + if (data != null) { + logBatches.add(new TLogBatch(data.serializeToByteBuffer())); + } + } + return currentIndex - 1; + } + + private void constructBatchIndexedFromConsensusRequest( + IndexedConsensusRequest request, List logBatches) { + logBatches.add(new TLogBatch(request.serializeToByteBuffer())); + } + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/PendingBatch.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/PendingBatch.java new file mode 100644 index 000000000000..b385f73c974c --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/PendingBatch.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.iotdb.consensus.multileader.thrift.TLogBatch; + +import java.util.List; + +public class PendingBatch { + + private final long startIndex; + private final long endIndex; + private final List batches; + // indicates whether this batch has been successfully synchronized to another node + private boolean synced; + + public PendingBatch(long startIndex, long endIndex, List batches) { + this.startIndex = startIndex; + this.endIndex = endIndex; + this.batches = batches; + this.synced = false; + } + + public long getStartIndex() { + return startIndex; + } + + public long getEndIndex() { + return endIndex; + } + + public List getBatches() { + return batches; + } + + public boolean isSynced() { + return synced; + } + + public void setSynced(boolean synced) { + this.synced = synced; + } + + public boolean isEmpty() { + return batches.isEmpty(); + } + + @Override + public String toString() { + return "PendingBatch{" + + "startIndex=" + + startIndex + + ", endIndex=" + + endIndex + + ", size=" + + batches.size() + + '}'; + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatus.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatus.java new file mode 100644 index 000000000000..e9901d931a00 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatus.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.iotdb.commons.utils.TestOnly; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; + +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; + +public class SyncStatus { + + private final MultiLeaderConfig config; + private final IndexController controller; + private final List pendingBatches = new LinkedList<>(); + + public SyncStatus(IndexController controller, MultiLeaderConfig config) { + this.controller = controller; + this.config = config; + } + + /** we may block here if the synchronization pipeline is full */ + public void addNextBatch(PendingBatch batch) throws InterruptedException { + synchronized (this) { + while (pendingBatches.size() >= config.getReplication().getMaxPendingBatch()) { + wait(); + } + pendingBatches.add(batch); + } + } + + /** + * We only set a flag if this batch is not the first one. Notice, We need to confirm that the + * batch in the parameter is actually in pendingBatches, rather than a reference to a different + * object with equal data, so we do not inherit method equals for PendingBatch + */ + public void removeBatch(PendingBatch batch) { + synchronized (this) { + batch.setSynced(true); + if (pendingBatches.size() > 0 && pendingBatches.get(0).equals(batch)) { + Iterator iterator = pendingBatches.iterator(); + PendingBatch current = iterator.next(); + while (current.isSynced()) { + controller.updateAndGet(current.getEndIndex()); + iterator.remove(); + if (iterator.hasNext()) { + current = iterator.next(); + } else { + break; + } + } + // wake up logDispatcherThread that might be blocked + notifyAll(); + } + } + } + + /** Gets the first index that is not currently synchronized */ + public long getNextSendingIndex() { + // we do not use ReentrantReadWriteLock because there will be only one thread reading this field + synchronized (this) { + return 1 + + (pendingBatches.isEmpty() + ? controller.getCurrentIndex() + : pendingBatches.get(pendingBatches.size() - 1).getEndIndex()); + } + } + + @TestOnly + public List getPendingBatches() { + return pendingBatches; + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCService.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCService.java new file mode 100644 index 000000000000..64dadf2aeaaf --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCService.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.service; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.concurrent.ThreadName; +import org.apache.iotdb.commons.conf.IoTDBConstant; +import org.apache.iotdb.commons.exception.runtime.RPCServiceException; +import org.apache.iotdb.commons.service.ServiceType; +import org.apache.iotdb.commons.service.ThriftService; +import org.apache.iotdb.commons.service.ThriftServiceThread; +import org.apache.iotdb.consensus.config.MultiLeaderConfig; +import org.apache.iotdb.consensus.multileader.thrift.MultiLeaderConsensusIService; + +import java.lang.reflect.InvocationTargetException; + +public class MultiLeaderRPCService extends ThriftService implements MultiLeaderRPCServiceMBean { + + private final TEndPoint thisNode; + private final MultiLeaderConfig config; + private MultiLeaderRPCServiceProcessor multiLeaderRPCServiceProcessor; + + public MultiLeaderRPCService(TEndPoint thisNode, MultiLeaderConfig config) { + this.thisNode = thisNode; + this.config = config; + } + + @Override + public ServiceType getID() { + return ServiceType.MULTI_LEADER_CONSENSUS_SERVICE; + } + + @Override + public void initSyncedServiceImpl(Object multiLeaderRPCServiceProcessor) { + this.multiLeaderRPCServiceProcessor = + (MultiLeaderRPCServiceProcessor) multiLeaderRPCServiceProcessor; + super.mbeanName = + String.format( + "%s:%s=%s", this.getClass().getPackage(), IoTDBConstant.JMX_TYPE, getID().getJmxName()); + super.initSyncedServiceImpl(this.multiLeaderRPCServiceProcessor); + } + + @Override + public void initTProcessor() + throws ClassNotFoundException, IllegalAccessException, InstantiationException, + NoSuchMethodException, InvocationTargetException { + processor = new MultiLeaderConsensusIService.Processor<>(multiLeaderRPCServiceProcessor); + } + + @Override + public void initThriftServiceThread() + throws IllegalAccessException, InstantiationException, ClassNotFoundException { + try { + thriftServiceThread = + new ThriftServiceThread( + processor, + getID().getName(), + ThreadName.MULTI_LEADER_CONSENSUS_RPC_CLIENT.getName(), + getBindIP(), + getBindPort(), + config.getRpc().getRpcMaxConcurrentClientNum(), + config.getRpc().getThriftServerAwaitTimeForStopService(), + new MultiLeaderRPCServiceHandler(multiLeaderRPCServiceProcessor), + config.getRpc().isRpcThriftCompressionEnabled()); + } catch (RPCServiceException e) { + throw new IllegalAccessException(e.getMessage()); + } + thriftServiceThread.setName(ThreadName.MULTI_LEADER_CONSENSUS_RPC_SERVER.getName()); + } + + @Override + public String getBindIP() { + return thisNode.getIp(); + } + + @Override + public int getBindPort() { + return thisNode.getPort(); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceHandler.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceHandler.java new file mode 100644 index 000000000000..889ac7d517d2 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceHandler.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.service; + +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.server.ServerContext; +import org.apache.thrift.server.TServerEventHandler; +import org.apache.thrift.transport.TTransport; + +public class MultiLeaderRPCServiceHandler implements TServerEventHandler { + + private final MultiLeaderRPCServiceProcessor processor; + + public MultiLeaderRPCServiceHandler(MultiLeaderRPCServiceProcessor processor) { + this.processor = processor; + } + + @Override + public void preServe() {} + + @Override + public ServerContext createContext(TProtocol input, TProtocol output) { + return null; + } + + @Override + public void deleteContext(ServerContext serverContext, TProtocol input, TProtocol output) { + processor.handleClientExit(); + } + + @Override + public void processContext( + ServerContext serverContext, TTransport inputTransport, TTransport outputTransport) {} +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceMBean.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceMBean.java new file mode 100644 index 000000000000..9e354ac204a4 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceMBean.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.service; + +public interface MultiLeaderRPCServiceMBean {} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceProcessor.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceProcessor.java new file mode 100644 index 000000000000..d829f403d0ad --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/service/MultiLeaderRPCServiceProcessor.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.service; + +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; +import org.apache.iotdb.consensus.multileader.MultiLeaderConsensus; +import org.apache.iotdb.consensus.multileader.MultiLeaderServerImpl; +import org.apache.iotdb.consensus.multileader.thrift.MultiLeaderConsensusIService; +import org.apache.iotdb.consensus.multileader.thrift.TLogBatch; +import org.apache.iotdb.consensus.multileader.thrift.TSyncLogReq; +import org.apache.iotdb.consensus.multileader.thrift.TSyncLogRes; +import org.apache.iotdb.rpc.TSStatusCode; + +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class MultiLeaderRPCServiceProcessor implements MultiLeaderConsensusIService.Iface { + + private final Logger logger = LoggerFactory.getLogger(MultiLeaderRPCServiceProcessor.class); + + private final MultiLeaderConsensus consensus; + + public MultiLeaderRPCServiceProcessor(MultiLeaderConsensus consensus) { + this.consensus = consensus; + } + + @Override + public TSyncLogRes syncLog(TSyncLogReq req) throws TException { + ConsensusGroupId groupId = + ConsensusGroupId.Factory.createFromTConsensusGroupId(req.getConsensusGroupId()); + MultiLeaderServerImpl impl = consensus.getImpl(groupId); + if (impl == null) { + String message = + String.format( + "Unexpected consensusGroupId %s for TSyncLogReq which size is %s", + groupId, req.getBatches().size()); + logger.error(message); + TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); + status.setMessage(message); + return new TSyncLogRes(Collections.singletonList(status)); + } + List statuses = new ArrayList<>(); + // We use synchronized to ensure atomicity of executing multiple logs + synchronized (impl.getStateMachine()) { + for (TLogBatch batch : req.getBatches()) { + statuses.add( + impl.getStateMachine() + .write( + impl.buildIndexedConsensusRequestForRemoteRequest( + new ByteBufferConsensusRequest(batch.data)))); + } + } + logger.debug("Execute TSyncLogReq for {} with result {}", req.consensusGroupId, statuses); + return new TSyncLogRes(statuses); + } + + public void handleClientExit() {} +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/ConsensusReqReader.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/ConsensusReqReader.java new file mode 100644 index 000000000000..5880e5de2dc2 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/ConsensusReqReader.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.wal; + +import org.apache.iotdb.consensus.common.request.IConsensusRequest; + +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** This interface provides search interface for consensus requests via index. */ +public interface ConsensusReqReader { + + long DEFAULT_SEARCH_INDEX = -1; + + /** + * Gets the consensus request at the specified position. + * + * @param index index of the consensus request to return + * @return the consensus request at the specified position, null if the request doesn't exist + */ + IConsensusRequest getReq(long index); + + /** + * Gets the consensus requests from the specified start position. + * + * @param startIndex index of the start consensus request + * @param num number of consensus requests to return, the number of actual returned consensus + * requests may less than this value + * @return the consensus requests from the specified start position + */ + List getReqs(long startIndex, int num); + + /** + * Gets the consensus requests iterator from the specified start position. + * + * @param startIndex index of the start consensus request + * @return the consensus requests iterator from the specified start position. + */ + ReqIterator getReqIterator(long startIndex); + + /** This iterator provides blocking and non-blocking interfaces to read consensus request. */ + interface ReqIterator { + /** Like {@link Iterator#hasNext()} */ + boolean hasNext(); + + /** + * Like {@link Iterator#next()} + * + * @throws java.util.NoSuchElementException if the iteration has no more elements, wait a moment + * or call {@link this#waitForNextReady} for more elements + */ + IConsensusRequest next(); + + /** + * Wait for the next element in the iteration ready, blocked until next element is available. + */ + void waitForNextReady() throws InterruptedException; + + /** + * Wait for the next element in the iteration ready, blocked until next element is available or + * a specified amount of time has elapsed. + */ + void waitForNextReady(long time, TimeUnit unit) throws InterruptedException, TimeoutException; + + /** + * Skips to target position of next element in the iteration
+ * Notice: The correctness of forward skipping should be guaranteed by the caller + * + * @param targetIndex target position of next element in the iteration + */ + void skipTo(long targetIndex); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/GetConsensusReqReaderPlan.java b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/GetConsensusReqReaderPlan.java new file mode 100644 index 000000000000..303f2eeb8f4d --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/multileader/wal/GetConsensusReqReaderPlan.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.wal; + +import org.apache.iotdb.consensus.common.request.IConsensusRequest; + +import java.nio.ByteBuffer; + +public class GetConsensusReqReaderPlan implements IConsensusRequest { + + @Override + public ByteBuffer serializeToByteBuffer() { + // do not need any data, we will use instanceOf + return ByteBuffer.wrap(new byte[0]); + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java index 3617a36b0c98..6588d5dbb412 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/ApplicationStateMachineProxy.java @@ -37,6 +37,7 @@ import org.apache.ratis.statemachine.StateMachineStorage; import org.apache.ratis.statemachine.TransactionContext; import org.apache.ratis.statemachine.impl.BaseStateMachine; +import org.apache.ratis.util.FileUtils; import org.apache.ratis.util.LifeCycle; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,8 +86,10 @@ public void reinitialize() { @Override public void pause() { - getLifeCycle().transition(LifeCycle.State.PAUSING); - getLifeCycle().transition(LifeCycle.State.PAUSED); + if (getLifeCycleState() == LifeCycle.State.RUNNING) { + getLifeCycle().transition(LifeCycle.State.PAUSING); + getLifeCycle().transition(LifeCycle.State.PAUSED); + } } @Override @@ -113,8 +116,14 @@ public CompletableFuture applyTransaction(TransactionContext trx) { log.getStateMachineLogEntry().getLogData().asReadOnlyByteBuffer()); } - TSStatus result = applicationStateMachine.write(applicationRequest); - Message ret = new ResponseMessage(result); + Message ret; + try { + TSStatus result = applicationStateMachine.write(applicationRequest); + ret = new ResponseMessage(result); + } catch (Exception rte) { + logger.error("application statemachine throws a runtime exception: ", rte); + ret = Message.valueOf("internal error. statemachine throws a runtime exception: " + rte); + } return CompletableFuture.completedFuture(ret); } @@ -141,13 +150,30 @@ public long takeSnapshot() throws IOException { // require the application statemachine to take the latest snapshot String metadata = Utils.getMetadataFromTermIndex(lastApplied); File snapshotDir = snapshotStorage.getSnapshotDir(metadata); + + // delete snapshotDir fully in case of last takeSnapshot() crashed + FileUtils.deleteFully(snapshotDir); + snapshotDir.mkdir(); if (!snapshotDir.isDirectory()) { logger.error("Unable to create snapshotDir at {}", snapshotDir); return RaftLog.INVALID_LOG_INDEX; } - boolean success = applicationStateMachine.takeSnapshot(snapshotDir); - if (!success) { + + boolean applicationTakeSnapshotSuccess = applicationStateMachine.takeSnapshot(snapshotDir); + boolean addTermIndexMetafileSuccess = + snapshotStorage.addTermIndexMetaFile(snapshotDir, metadata); + + if (!applicationTakeSnapshotSuccess || !addTermIndexMetafileSuccess) { + // this takeSnapshot failed, clean up files and directories + // statemachine is supposed to clear snapshotDir on failure + boolean isEmpty = snapshotDir.delete(); + if (!isEmpty) { + logger.warn( + "StateMachine take snapshot failed but leave unexpected remaining files at " + + snapshotDir.getAbsolutePath()); + FileUtils.deleteFully(snapshotDir); + } return RaftLog.INVALID_LOG_INDEX; } return lastApplied.getIndex(); diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/FileInfoWithDelayedMd5Computing.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/FileInfoWithDelayedMd5Computing.java new file mode 100644 index 000000000000..ef584f416ca6 --- /dev/null +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/FileInfoWithDelayedMd5Computing.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.ratis; + +import org.apache.ratis.io.MD5Hash; +import org.apache.ratis.server.storage.FileInfo; +import org.apache.ratis.util.MD5FileUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; + +/** + * When Leader sends snapshots to a follower, it will send the file itself together with md5 digest. + * When snapshot file is large, this computation incurs significant overhead. This class implements + * a strategy that delay the computing when the md5 info is first fetched. + * + *

TODO A better strategy is to calculate the md5 along when sending the InstallSnapshot request + * with file chunks. + */ +public class FileInfoWithDelayedMd5Computing extends FileInfo { + + private static final Logger logger = + LoggerFactory.getLogger(FileInfoWithDelayedMd5Computing.class); + private volatile MD5Hash digest; + + public FileInfoWithDelayedMd5Computing(Path path, MD5Hash fileDigest) { + super(path, fileDigest); + digest = null; + } + + public FileInfoWithDelayedMd5Computing(Path path) { + this(path, null); + } + + // return null iff sync md5 computing failed + @Override + public MD5Hash getFileDigest() { + if (digest == null) { + synchronized (this) { + if (digest == null) { + try { + if (MD5FileUtil.getDigestFileForFile(getPath().toFile()).exists()) { + digest = MD5FileUtil.readStoredMd5ForFile(getPath().toFile()); + } + digest = MD5FileUtil.computeMd5ForFile(getPath().toFile()); + MD5FileUtil.saveMD5File(getPath().toFile(), digest); + } catch (IOException ioException) { + logger.error("compute file digest for {} failed due to {}", getPath(), ioException); + return null; + } + } + } + } + return digest; + } +} diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java index 8e27109533bb..1e6ea9e00001 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisClient.java @@ -28,10 +28,15 @@ import org.apache.ratis.client.RaftClientRpc; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.exceptions.RaftException; +import org.apache.ratis.retry.ExponentialBackoffRetry; +import org.apache.ratis.retry.RetryPolicy; +import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.concurrent.TimeUnit; public class RatisClient { private final Logger logger = LoggerFactory.getLogger(RatisClient.class); @@ -94,6 +99,7 @@ public PooledObject makeObject(RaftGroup group) throws Exception { RaftClient.newBuilder() .setProperties(raftProperties) .setRaftGroup(group) + .setRetryPolicy(new RatisRetryPolicy()) .setClientRpc(clientRpc) .build(), clientManager)); @@ -104,4 +110,44 @@ public boolean validateObject(RaftGroup key, PooledObject pooledObj return true; } } + + /** + * RatisRetryPolicy is similar to ExceptionDependentRetry 1. By default, use + * ExponentialBackoffRetry to handle request failure 2. If unexpected IOException is caught, + * immediately fail the request and let application choose retry action. + * + *

potential IOException can be categorized into expected and unexpected 1. expected, instance + * of RaftException, like LeaderNotReady / GroupMisMatch etc. 2. unexpected, IOException which is + * not an instance of RaftException + */ + private static class RatisRetryPolicy implements RetryPolicy { + + private static final Logger logger = LoggerFactory.getLogger(RatisClient.class); + private static final int maxAttempts = 10; + RetryPolicy defaultPolicy; + + public RatisRetryPolicy() { + defaultPolicy = + ExponentialBackoffRetry.newBuilder() + .setBaseSleepTime(TimeDuration.valueOf(100, TimeUnit.MILLISECONDS)) + .setMaxSleepTime(TimeDuration.valueOf(10, TimeUnit.SECONDS)) + .setMaxAttempts(maxAttempts) + .build(); + } + + @Override + public Action handleAttemptFailure(Event event) { + + if (event.getCause() != null) { + if (event.getCause() instanceof IOException + && !(event.getCause() instanceof RaftException)) { + // unexpected. may be caused by statemachine. + logger.debug("raft client request failed and caught exception: ", event.getCause()); + return NO_RETRY_ACTION; + } + } + + return defaultPolicy.handleAttemptFailure(event); + } + } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java index 8d644bbc81fa..e5dec3fdc0b3 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RatisConsensus.java @@ -36,6 +36,7 @@ import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.apache.iotdb.consensus.exception.ConsensusException; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.exception.PeerAlreadyInConsensusGroupException; @@ -44,7 +45,6 @@ import org.apache.commons.pool2.KeyedObjectPool; import org.apache.commons.pool2.impl.GenericKeyedObjectPool; -import org.apache.ratis.client.RaftClient; import org.apache.ratis.client.RaftClientRpc; import org.apache.ratis.conf.Parameters; import org.apache.ratis.conf.RaftProperties; @@ -57,10 +57,12 @@ import org.apache.ratis.protocol.RaftGroup; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.SnapshotManagementRequest; import org.apache.ratis.protocol.exceptions.NotLeaderException; +import org.apache.ratis.server.DivisionInfo; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; -import org.apache.ratis.util.NetUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -103,21 +106,21 @@ class RatisConsensus implements IConsensus { private static final int DEFAULT_PRIORITY = 0; private static final int LEADER_PRIORITY = 1; - /** - * @param ratisStorageDir different groups of RatisConsensus Peer all share ratisStorageDir as - * root dir - */ - public RatisConsensus(TEndPoint endpoint, File ratisStorageDir, IStateMachine.Registry registry) + // TODO make it configurable + private static final int DEFAULT_WAIT_LEADER_READY_TIMEOUT = (int) TimeUnit.SECONDS.toMillis(20); + + public RatisConsensus(ConsensusConfig config, IStateMachine.Registry registry) throws IOException { - String address = Utils.IPAddress(endpoint); - myself = Utils.fromTEndPointAndPriorityToRaftPeer(endpoint, DEFAULT_PRIORITY); + myself = Utils.fromTEndPointAndPriorityToRaftPeer(config.getThisNode(), DEFAULT_PRIORITY); - RaftServerConfigKeys.setStorageDir(properties, Collections.singletonList(ratisStorageDir)); - RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled(properties, true); + System.setProperty( + "org.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads", "false"); + RaftServerConfigKeys.setStorageDir( + properties, Collections.singletonList(new File(config.getStorageDir()))); + GrpcConfigKeys.Server.setPort(properties, config.getThisNode().getPort()); + + Utils.initRatisConfig(properties, config.getRatisConfig()); - // set the port which server listen to in RaftProperty object - final int port = NetUtils.createSocketAddr(address).getPort(); - GrpcConfigKeys.Server.setPort(properties, port); clientRpc = new GrpcFactory(new Parameters()).newRaftClientRpc(ClientId.randomId(), properties); server = @@ -149,13 +152,13 @@ public void stop() throws IOException { */ @Override public ConsensusWriteResponse write( - ConsensusGroupId groupId, IConsensusRequest IConsensusRequest) { + ConsensusGroupId consensusGroupId, IConsensusRequest IConsensusRequest) { // pre-condition: group exists and myself server serves this group - RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(groupId); + RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(consensusGroupId); RaftGroup raftGroup = getGroupInfo(raftGroupId); if (raftGroup == null || !raftGroup.getPeers().contains(myself)) { - return failedWrite(new ConsensusGroupNotExistException(groupId)); + return failedWrite(new ConsensusGroupNotExistException(consensusGroupId)); } // serialize request into Message @@ -163,22 +166,24 @@ public ConsensusWriteResponse write( // 1. first try the local server RaftClientRequest clientRequest = - buildRawRequest(groupId, message, RaftClientRequest.writeRequestType()); + buildRawRequest(raftGroupId, message, RaftClientRequest.writeRequestType()); RaftClientReply localServerReply; RaftPeer suggestedLeader = null; - try { - localServerReply = server.submitClientRequest(clientRequest); - if (localServerReply.isSuccess()) { - ResponseMessage responseMessage = (ResponseMessage) localServerReply.getMessage(); - TSStatus writeStatus = (TSStatus) responseMessage.getContentHolder(); - return ConsensusWriteResponse.newBuilder().setStatus(writeStatus).build(); + if (isLeader(consensusGroupId) && waitUntilLeaderReady(raftGroupId)) { + try { + localServerReply = server.submitClientRequest(clientRequest); + if (localServerReply.isSuccess()) { + ResponseMessage responseMessage = (ResponseMessage) localServerReply.getMessage(); + TSStatus writeStatus = (TSStatus) responseMessage.getContentHolder(); + return ConsensusWriteResponse.newBuilder().setStatus(writeStatus).build(); + } + NotLeaderException ex = localServerReply.getNotLeaderException(); + if (ex != null) { // local server is not leader + suggestedLeader = ex.getSuggestedLeader(); + } + } catch (IOException e) { + return failedWrite(new RatisRequestFailedException(e)); } - NotLeaderException ex = localServerReply.getNotLeaderException(); - if (ex != null) { // local server is not leader - suggestedLeader = ex.getSuggestedLeader(); - } - } catch (IOException e) { - return failedWrite(new RatisRequestFailedException(e)); } // 2. try raft client @@ -209,11 +214,12 @@ public ConsensusWriteResponse write( /** Read directly from LOCAL COPY notice: May read stale data (not linearizable) */ @Override - public ConsensusReadResponse read(ConsensusGroupId groupId, IConsensusRequest IConsensusRequest) { - - RaftGroup group = getGroupInfo(Utils.fromConsensusGroupIdToRaftGroupId(groupId)); + public ConsensusReadResponse read( + ConsensusGroupId consensusGroupId, IConsensusRequest IConsensusRequest) { + RaftGroupId groupId = Utils.fromConsensusGroupIdToRaftGroupId(consensusGroupId); + RaftGroup group = getGroupInfo(groupId); if (group == null || !group.getPeers().contains(myself)) { - return failedRead(new ConsensusGroupNotExistException(groupId)); + return failedRead(new ConsensusGroupNotExistException(consensusGroupId)); } RaftClientReply reply; @@ -297,7 +303,7 @@ public ConsensusGenericResponse removeConsensusGroup(ConsensusGroupId groupId) { client .getRaftClient() .getGroupManagementApi(myself.getId()) - .remove(raftGroupId, false, false); + .remove(raftGroupId, true, false); if (!reply.isSuccess()) { return failed(new RatisRequestFailedException(reply.getException())); } @@ -404,14 +410,19 @@ public ConsensusGenericResponse changePeer(ConsensusGroupId groupId, List } /** - * transferLeader in Ratis implementation is not guaranteed to transfer leadership to the - * designated peer Thus, it may produce undetermined results. Caller should not count on this API. + * NOTICE: transferLeader *does not guarantee* the leader be transferred to newLeader. + * transferLeader is implemented by 1. modify peer priority 2. ask current leader to step down + * + *

1. call setConfiguration to upgrade newLeader's priority to 1 and degrade all follower peers + * to 0. By default, Ratis gives every Raft Peer same priority 0. Ratis does not allow a peer with + * priority <= currentLeader.priority to becomes the leader, so we have to upgrade leader's + * priority to 1 + * + *

2. call transferLeadership to force current leader to step down and raise a new round of + * election. In this election, the newLeader peer with priority 1 is guaranteed to be elected. */ @Override public ConsensusGenericResponse transferLeader(ConsensusGroupId groupId, Peer newLeader) { - // By default, Ratis gives every Raft Peer same priority 0 - // Ratis does not allow a peer.priority <= currentLeader.priority to becomes the leader - // So we have to enhance to leader's priority // first fetch the newest information @@ -446,11 +457,12 @@ public ConsensusGenericResponse transferLeader(ConsensusGroupId groupId, Peer ne return failed(new RatisRequestFailedException(configChangeReply.getException())); } // TODO tuning for timeoutMs - reply = client.getRaftClient().admin().transferLeadership(newRaftLeader.getId(), 5000); + // when newLeaderPeerId == null, ratis forces current leader to step down and raise new + // election + reply = client.getRaftClient().admin().transferLeadership(null, 5000); if (!reply.isSuccess()) { return failed(new RatisRequestFailedException(reply.getException())); } - } catch (IOException e) { return failed(new RatisRequestFailedException(e)); } finally { @@ -476,27 +488,91 @@ public boolean isLeader(ConsensusGroupId groupId) { return isLeader; } - @Override - public Peer getLeader(ConsensusGroupId groupId) { - if (isLeader(groupId)) { - return new Peer(groupId, Utils.formRaftPeerIdToTEndPoint(myself.getId())); + private boolean waitUntilLeaderReady(RaftGroupId groupId) { + DivisionInfo divisionInfo; + try { + divisionInfo = server.getDivision(groupId).getInfo(); + } catch (IOException e) { + // if the query fails, simply return not leader + logger.info("isLeaderReady checking failed with exception: ", e); + return false; } + long startTime = System.currentTimeMillis(); + try { + while (divisionInfo.isLeader() && !divisionInfo.isLeaderReady()) { + Thread.sleep(10); + long consumedTime = System.currentTimeMillis() - startTime; + if (consumedTime >= DEFAULT_WAIT_LEADER_READY_TIMEOUT) { + logger.warn("{}: leader is still not ready after {}ms", groupId, consumedTime); + return false; + } + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Unexpected interruption", e); + return false; + } + return divisionInfo.isLeader(); + } + /** + * returns the known leader to the given group. NOTICE: if the local peer isn't a member of given + * group, getLeader will return null. + * + * @return null if local peer isn't in group, otherwise group leader. + */ + @Override + public Peer getLeader(ConsensusGroupId groupId) { RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(groupId); - RaftClient client; + RaftPeerId leaderId; + try { - client = server.getDivision(raftGroupId).getRaftClient(); + leaderId = server.getDivision(raftGroupId).getInfo().getLeaderId(); } catch (IOException e) { - logger.warn("cannot find raft client for group " + groupId); + logger.warn("fetch division info for group " + groupId + " failed due to: ", e); return null; } - TEndPoint leaderEndpoint = Utils.formRaftPeerIdToTEndPoint(client.getLeaderId()); + if (leaderId == null) { + return null; + } + TEndPoint leaderEndpoint = Utils.formRaftPeerIdToTEndPoint(leaderId); return new Peer(groupId, leaderEndpoint); } + @Override + public List getAllConsensusGroupIds() { + List ids = new ArrayList<>(); + server + .getGroupIds() + .forEach( + groupId -> { + ids.add(Utils.fromRaftGroupIdToConsensusGroupId(groupId)); + }); + return ids; + } + @Override public ConsensusGenericResponse triggerSnapshot(ConsensusGroupId groupId) { - return ConsensusGenericResponse.newBuilder().setSuccess(false).build(); + RaftGroupId raftGroupId = Utils.fromConsensusGroupIdToRaftGroupId(groupId); + RaftGroup groupInfo = getGroupInfo(raftGroupId); + + if (groupInfo == null || !groupInfo.getPeers().contains(myself)) { + return failed(new ConsensusGroupNotExistException(groupId)); + } + + // TODO tuning snapshot create timeout + SnapshotManagementRequest request = + SnapshotManagementRequest.newCreate( + localFakeId, myself.getId(), raftGroupId, localFakeCallId.incrementAndGet(), 30000); + + RaftClientReply reply; + try { + reply = server.snapshotManagement(request); + } catch (IOException ioException) { + return failed(new RatisRequestFailedException(ioException)); + } + + return ConsensusGenericResponse.newBuilder().setSuccess(reply.isSuccess()).build(); } private ConsensusGenericResponse failed(ConsensusException e) { @@ -512,12 +588,12 @@ private ConsensusReadResponse failedRead(ConsensusException e) { } private RaftClientRequest buildRawRequest( - ConsensusGroupId groupId, Message message, RaftClientRequest.Type type) { + RaftGroupId groupId, Message message, RaftClientRequest.Type type) { return RaftClientRequest.newBuilder() .setServerId(server.getId()) .setClientId(localFakeId) .setCallId(localFakeCallId.incrementAndGet()) - .setGroupId(Utils.fromConsensusGroupIdToRaftGroupId(groupId)) + .setGroupId(groupId) .setType(type) .setMessage(message) .build(); @@ -534,7 +610,7 @@ private RaftGroup getGroupInfo(RaftGroupId raftGroupId) { lastSeen.put(raftGroupId, raftGroup); } } catch (IOException e) { - logger.debug("get group failed ", e); + logger.debug("get group {} failed ", raftGroupId, e); } return raftGroup; } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RequestMessage.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RequestMessage.java index 242573e5be5d..2067a54e420d 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RequestMessage.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/RequestMessage.java @@ -18,19 +18,16 @@ */ package org.apache.iotdb.consensus.ratis; -import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; import org.apache.iotdb.consensus.common.request.IConsensusRequest; import org.apache.ratis.protocol.Message; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -import java.nio.ByteBuffer; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; public class RequestMessage implements Message { private final IConsensusRequest actualRequest; private volatile ByteString serializedContent; - private static final int DEFAULT_BUFFER_SIZE = 2048 * 10; public RequestMessage(IConsensusRequest request) { this.actualRequest = request; @@ -46,18 +43,8 @@ public ByteString getContent() { if (serializedContent == null) { synchronized (this) { if (serializedContent == null) { - ByteBufferConsensusRequest req; - if (actualRequest instanceof ByteBufferConsensusRequest) { - req = (ByteBufferConsensusRequest) actualRequest; - serializedContent = ByteString.copyFrom(req.getContent()); - req.getContent().flip(); - } else { - // TODO Pooling - ByteBuffer byteBuffer = ByteBuffer.allocate(DEFAULT_BUFFER_SIZE); - actualRequest.serializeRequest(byteBuffer); - byteBuffer.flip(); - serializedContent = ByteString.copyFrom(byteBuffer); - } + serializedContent = + UnsafeByteOperations.unsafeWrap(actualRequest.serializeToByteBuffer()); } } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java index 8d6d2224a38b..bf70919ff8a2 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/SnapshotStorage.java @@ -20,7 +20,6 @@ import org.apache.iotdb.consensus.IStateMachine; -import org.apache.ratis.io.MD5Hash; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.storage.FileInfo; import org.apache.ratis.server.storage.RaftStorage; @@ -29,31 +28,28 @@ import org.apache.ratis.statemachine.StateMachineStorage; import org.apache.ratis.statemachine.impl.FileListSnapshotInfo; import org.apache.ratis.util.FileUtils; -import org.apache.ratis.util.MD5FileUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; import java.nio.file.DirectoryStream; +import java.nio.file.FileVisitResult; +import java.nio.file.FileVisitor; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; -import java.util.Objects; -/** - * TODO: Warning, currently in Ratis 2.2.0, there is a bug in installSnapshot. In subsequent - * installSnapshot, a follower may fail to install while the leader assume it success. This bug will - * be triggered when the snapshot threshold is low. This is fixed in current Ratis Master, and - * hopefully will be introduced in Ratis 2.3.0. - */ public class SnapshotStorage implements StateMachineStorage { + private final Logger logger = LoggerFactory.getLogger(SnapshotStorage.class); private final IStateMachine applicationStateMachine; + private final String META_FILE_PREFIX = ".ratis_meta."; private File stateMachineDir; - private final Logger logger = LoggerFactory.getLogger(SnapshotStorage.class); public SnapshotStorage(IStateMachine applicationStateMachine) { this.applicationStateMachine = applicationStateMachine; @@ -68,7 +64,9 @@ private Path[] getSortedSnapshotDirPaths() { ArrayList snapshotPaths = new ArrayList<>(); try (DirectoryStream stream = Files.newDirectoryStream(stateMachineDir.toPath())) { for (Path path : stream) { - snapshotPaths.add(path); + if (path.toFile().isDirectory()) { + snapshotPaths.add(path); + } } } catch (IOException exception) { logger.warn("cannot construct snapshot directory stream ", exception); @@ -87,6 +85,7 @@ private Path[] getSortedSnapshotDirPaths() { } public File findLatestSnapshotDir() { + moveSnapshotFileToSubDirectory(); Path[] snapshots = getSortedSnapshotDirPaths(); if (snapshots == null || snapshots.length == 0) { return null; @@ -94,6 +93,46 @@ public File findLatestSnapshotDir() { return snapshots[snapshots.length - 1].toFile(); } + private List getAllFilesUnder(File rootDir) { + List allFiles = new ArrayList<>(); + try { + Files.walkFileTree( + rootDir.toPath(), + new FileVisitor() { + @Override + public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) + throws IOException { + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) + throws IOException { + if (attrs.isRegularFile()) { + allFiles.add(file); + } + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { + logger.info("visit file {} failed due to {}", file.toAbsolutePath(), exc); + return FileVisitResult.TERMINATE; + } + + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException exc) + throws IOException { + return FileVisitResult.CONTINUE; + } + }); + } catch (IOException ioException) { + logger.error("IOException occurred during listing snapshot directory: ", ioException); + return Collections.emptyList(); + } + return allFiles; + } + @Override public SnapshotInfo getLatestSnapshot() { File latestSnapshotDir = findLatestSnapshotDir(); @@ -103,15 +142,8 @@ public SnapshotInfo getLatestSnapshot() { TermIndex snapshotTermIndex = Utils.getTermIndexFromDir(latestSnapshotDir); List fileInfos = new ArrayList<>(); - for (File file : Objects.requireNonNull(latestSnapshotDir.listFiles())) { - Path filePath = file.toPath(); - MD5Hash fileHash = null; - try { - fileHash = MD5FileUtil.computeMd5ForFile(file); - } catch (IOException e) { - logger.error("read file info failed for snapshot file ", e); - } - FileInfo fileInfo = new FileInfo(filePath, fileHash); + for (Path file : getAllFilesUnder(latestSnapshotDir)) { + FileInfo fileInfo = new FileInfoWithDelayedMd5Computing(file); fileInfos.add(fileInfo); } @@ -141,4 +173,84 @@ public File getStateMachineDir() { public File getSnapshotDir(String snapshotMetadata) { return new File(stateMachineDir.getAbsolutePath() + File.separator + snapshotMetadata); } + + /** + * Currently, we name the snapshotDir with Term_Index so that we can tell which directory contains + * the latest snapshot files. Unfortunately, when leader install snapshot to a slow follower, + * current Ratis implementation will flatten the directory and place all the snapshots directly + * under statemachine dir. Under this scenario, we cannot restore Term_Index from directory name. + * We decided to add an empty metadata file containing only Term_Index into the snapshotDir. his + * metadata file will be installed along with application snapshot files, so that Term_Index + * information is kept during InstallSnapshot. + */ + public boolean addTermIndexMetaFile(File snapshotDir, String termIndexMetadata) { + File snapshotMetaFile = new File(getMetafilePath(snapshotDir, termIndexMetadata)); + try { + return snapshotMetaFile.createNewFile(); + } catch (IOException e) { + logger.warn("cannot create snapshot metafile: ", e); + return false; + } + } + + private String getMetafilePath(File snapshotDir, String termIndexMetadata) { + // e.g. /_sm/3_39/.ratis_meta.3_39 + return snapshotDir.getAbsolutePath() + File.separator + META_FILE_PREFIX + termIndexMetadata; + } + + private String getMetafileMatcherRegex() { + // meta file should always end with term_index + return META_FILE_PREFIX + "\\d+_\\d+$"; + } + + /** + * After leader InstallSnapshot to a slow follower, Ratis will put all snapshot files directly + * under statemachineDir. We need to handle this special scenario and rearrange these files to + * appropriate sub-directory this function will move all snapshot files directly under /sm to + * /sm/term_index/ + */ + void moveSnapshotFileToSubDirectory() { + File[] potentialMetafile = + stateMachineDir.listFiles((dir, name) -> name.matches(getMetafileMatcherRegex())); + if (potentialMetafile == null || potentialMetafile.length == 0) { + // the statemachine dir contains no direct metafile + return; + } + String metadata = potentialMetafile[0].getName().substring(META_FILE_PREFIX.length()); + + File snapshotDir = getSnapshotDir(metadata); + snapshotDir.mkdir(); + + File[] snapshotFiles = stateMachineDir.listFiles(); + + // move files to snapshotDir, if an error occurred, delete snapshotDir + try { + if (snapshotFiles == null) { + logger.error( + "An unexpected condition triggered. please check implementation " + + this.getClass().getName()); + FileUtils.deleteFully(snapshotDir); + return; + } + + for (File file : snapshotFiles) { + if (file.equals(snapshotDir)) { + continue; + } + boolean success = file.renameTo(new File(snapshotDir + File.separator + file.getName())); + if (!success) { + logger.warn( + "move snapshot file " + + file.getAbsolutePath() + + " to sub-directory " + + snapshotDir.getAbsolutePath() + + "failed"); + FileUtils.deleteFully(snapshotDir); + break; + } + } + } catch (IOException e) { + logger.warn("delete directory failed: ", e); + } + } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/Utils.java b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/Utils.java index e1773c7f8066..7d3071ba9858 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/ratis/Utils.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/ratis/Utils.java @@ -22,11 +22,16 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.config.RatisConfig; +import org.apache.ratis.client.RaftClientConfigKeys; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.proto.RaftProtos.RaftPeerProto; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServerConfigKeys; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.thrift.TException; @@ -42,10 +47,16 @@ public class Utils { private static final int tempBufferSize = 1024; private static final byte PADDING_MAGIC = 0x47; - public static String IPAddress(TEndPoint endpoint) { + private Utils() {} + + public static String HostAddress(TEndPoint endpoint) { return String.format("%s:%d", endpoint.getIp(), endpoint.getPort()); } + public static String fromTEndPointToString(TEndPoint endpoint) { + return String.format("%s_%d", endpoint.getIp(), endpoint.getPort()); + } + /** Encode the ConsensusGroupId into 6 bytes: 2 Bytes for Group Type and 4 Bytes for Group ID */ public static long groupEncode(ConsensusGroupId consensusGroupId) { // use abbreviations to prevent overflow @@ -56,11 +67,11 @@ public static long groupEncode(ConsensusGroupId consensusGroupId) { } public static RaftPeerId fromTEndPointToRaftPeerId(TEndPoint endpoint) { - return RaftPeerId.valueOf(String.format("%s-%d", endpoint.getIp(), endpoint.getPort())); + return RaftPeerId.valueOf(fromTEndPointToString(endpoint)); } public static TEndPoint formRaftPeerIdToTEndPoint(RaftPeerId id) { - String[] items = id.toString().split("-"); + String[] items = id.toString().split("_"); return new TEndPoint(items[0], Integer.parseInt(items[1])); } @@ -73,7 +84,7 @@ public static TEndPoint formRaftPeerProtoToTEndPoint(RaftPeerProto proto) { public static RaftPeer fromTEndPointAndPriorityToRaftPeer(TEndPoint endpoint, int priority) { return RaftPeer.newBuilder() .setId(fromTEndPointToRaftPeerId(endpoint)) - .setAddress(IPAddress(endpoint)) + .setAddress(HostAddress(endpoint)) .setPriority(priority) .build(); } @@ -112,7 +123,7 @@ public static RaftGroupId fromConsensusGroupIdToRaftGroupId(ConsensusGroupId con /** Given raftGroupId, decrypt ConsensusGroupId out of it */ public static ConsensusGroupId fromRaftGroupIdToConsensusGroupId(RaftGroupId raftGroupId) { byte[] padded = raftGroupId.toByteString().toByteArray(); - long type = (padded[10] << 8) + padded[11]; + long type = (long) ((padded[10] & 0xff) << 8) + (padded[11] & 0xff); ByteBuffer byteBuffer = ByteBuffer.allocate(Integer.BYTES); byteBuffer.put(padded, 12, 4); byteBuffer.flip(); @@ -141,8 +152,71 @@ public static String getMetadataFromTermIndex(TermIndex termIndex) { } public static TermIndex getTermIndexFromDir(File snapshotDir) { - String ordinal = snapshotDir.getName(); - String[] items = ordinal.split("_"); + return getTermIndexFromMetadataString(snapshotDir.getName()); + } + + public static TermIndex getTermIndexFromMetadataString(String metadata) { + String[] items = metadata.split("_"); return TermIndex.valueOf(Long.parseLong(items[0]), Long.parseLong(items[1])); } + + public static void initRatisConfig(RaftProperties properties, RatisConfig config) { + GrpcConfigKeys.setMessageSizeMax(properties, config.getGrpc().getMessageSizeMax()); + GrpcConfigKeys.setFlowControlWindow(properties, config.getGrpc().getFlowControlWindow()); + GrpcConfigKeys.Server.setAsyncRequestThreadPoolCached( + properties, config.getGrpc().isAsyncRequestThreadPoolCached()); + GrpcConfigKeys.Server.setAsyncRequestThreadPoolSize( + properties, config.getGrpc().getAsyncRequestThreadPoolSize()); + GrpcConfigKeys.Server.setLeaderOutstandingAppendsMax( + properties, config.getGrpc().getLeaderOutstandingAppendsMax()); + + RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, config.getRpc().getSlownessTimeout()); + RaftServerConfigKeys.Rpc.setTimeoutMin(properties, config.getRpc().getTimeoutMin()); + RaftServerConfigKeys.Rpc.setTimeoutMax(properties, config.getRpc().getTimeoutMax()); + RaftServerConfigKeys.Rpc.setSleepTime(properties, config.getRpc().getSleepTime()); + RaftClientConfigKeys.Rpc.setRequestTimeout(properties, config.getRpc().getRequestTimeout()); + + RaftServerConfigKeys.LeaderElection.setLeaderStepDownWaitTime( + properties, config.getLeaderElection().getLeaderStepDownWaitTimeKey()); + RaftServerConfigKeys.LeaderElection.setPreVote( + properties, config.getLeaderElection().isPreVote()); + + RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled( + properties, config.getSnapshot().isAutoTriggerEnabled()); + RaftServerConfigKeys.Snapshot.setAutoTriggerThreshold( + properties, config.getSnapshot().getAutoTriggerThreshold()); + RaftServerConfigKeys.Snapshot.setCreationGap(properties, config.getSnapshot().getCreationGap()); + RaftServerConfigKeys.Snapshot.setRetentionFileNum( + properties, config.getSnapshot().getRetentionFileNum()); + + RaftServerConfigKeys.ThreadPool.setClientCached( + properties, config.getThreadPool().isClientCached()); + RaftServerConfigKeys.ThreadPool.setClientSize( + properties, config.getThreadPool().getClientSize()); + RaftServerConfigKeys.ThreadPool.setProxyCached( + properties, config.getThreadPool().isProxyCached()); + RaftServerConfigKeys.ThreadPool.setProxySize(properties, config.getThreadPool().getProxySize()); + RaftServerConfigKeys.ThreadPool.setServerCached( + properties, config.getThreadPool().isServerCached()); + RaftServerConfigKeys.ThreadPool.setServerSize( + properties, config.getThreadPool().getServerSize()); + + RaftServerConfigKeys.Log.setUseMemory(properties, config.getLog().isUseMemory()); + RaftServerConfigKeys.Log.setQueueElementLimit( + properties, config.getLog().getQueueElementLimit()); + RaftServerConfigKeys.Log.setQueueByteLimit(properties, config.getLog().getQueueByteLimit()); + RaftServerConfigKeys.Log.setPurgeGap(properties, config.getLog().getPurgeGap()); + RaftServerConfigKeys.Log.setPurgeUptoSnapshotIndex( + properties, config.getLog().isPurgeUptoSnapshotIndex()); + RaftServerConfigKeys.Log.setSegmentSizeMax(properties, config.getLog().getSegmentSizeMax()); + RaftServerConfigKeys.Log.setSegmentCacheNumMax( + properties, config.getLog().getSegmentCacheNumMax()); + RaftServerConfigKeys.Log.setSegmentCacheSizeMax( + properties, config.getLog().getSegmentCacheSizeMax()); + RaftServerConfigKeys.Log.setPreallocatedSize(properties, config.getLog().getPreallocatedSize()); + RaftServerConfigKeys.Log.setWriteBufferSize(properties, config.getLog().getWriteBufferSize()); + RaftServerConfigKeys.Log.setForceSyncNum(properties, config.getLog().getForceSyncNum()); + RaftServerConfigKeys.Log.setUnsafeFlushEnabled( + properties, config.getLog().isUnsafeFlushEnabled()); + } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneConsensus.java b/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneConsensus.java index 4c2100d1e100..d87e27837ff2 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneConsensus.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneConsensus.java @@ -19,9 +19,9 @@ package org.apache.iotdb.consensus.standalone; -import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType; import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.utils.FileUtils; import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.IStateMachine; import org.apache.iotdb.consensus.IStateMachine.Registry; @@ -30,6 +30,7 @@ import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException; @@ -43,9 +44,9 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,9 +65,9 @@ class StandAloneConsensus implements IConsensus { private final Map stateMachineMap = new ConcurrentHashMap<>(); - public StandAloneConsensus(TEndPoint thisNode, File storageDir, Registry registry) { - this.thisNode = thisNode; - this.storageDir = storageDir; + public StandAloneConsensus(ConsensusConfig config, Registry registry) { + this.thisNode = config.getThisNode(); + this.storageDir = new File(config.getStorageDir()); this.registry = registry; } @@ -86,19 +87,21 @@ private void initAndRecover() throws IOException { String[] items = path.getFileName().toString().split("_"); ConsensusGroupId consensusGroupId = ConsensusGroupId.Factory.create( - TConsensusGroupType.valueOf(items[0]).getValue(), Integer.parseInt(items[1])); - TEndPoint endPoint = new TEndPoint(items[2], Integer.parseInt(items[3])); - stateMachineMap.put( - consensusGroupId, + Integer.parseInt(items[0]), Integer.parseInt(items[1])); + StandAloneServerImpl consensus = new StandAloneServerImpl( - new Peer(consensusGroupId, endPoint), registry.apply(consensusGroupId))); + new Peer(consensusGroupId, thisNode), registry.apply(consensusGroupId)); + stateMachineMap.put(consensusGroupId, consensus); + consensus.start(); } } } } @Override - public void stop() throws IOException {} + public void stop() throws IOException { + stateMachineMap.values().parallelStream().forEach(StandAloneServerImpl::stop); + } @Override public ConsensusWriteResponse write(ConsensusGroupId groupId, IConsensusRequest request) { @@ -130,15 +133,15 @@ public ConsensusGenericResponse addConsensusGroup(ConsensusGroupId groupId, List .setException(new IllegalPeerNumException(consensusGroupSize)) .build(); } - if (!Objects.equals(thisNode, peers.get(0).getEndpoint())) { + if (!peers.contains(new Peer(groupId, thisNode))) { return ConsensusGenericResponse.newBuilder() - .setException(new IllegalPeerEndpointException(thisNode, peers.get(0).getEndpoint())) + .setException(new IllegalPeerEndpointException(thisNode, peers)) .build(); } AtomicBoolean exist = new AtomicBoolean(true); stateMachineMap.computeIfAbsent( groupId, - (k) -> { + k -> { exist.set(false); StandAloneServerImpl impl = new StandAloneServerImpl(peers.get(0), registry.apply(groupId)); @@ -166,11 +169,7 @@ public ConsensusGenericResponse removeConsensusGroup(ConsensusGroupId groupId) { (k, v) -> { exist.set(true); v.stop(); - String path = buildPeerDir(groupId); - File file = new File(path); - if (!file.delete()) { - logger.warn("Unable to delete consensus dir for group {} at {}", groupId, path); - } + FileUtils.deleteDirectory(new File(buildPeerDir(groupId))); return null; }); @@ -220,15 +219,12 @@ public Peer getLeader(ConsensusGroupId groupId) { return new Peer(groupId, thisNode); } + @Override + public List getAllConsensusGroupIds() { + return new ArrayList<>(stateMachineMap.keySet()); + } + private String buildPeerDir(ConsensusGroupId groupId) { - return storageDir - + File.separator - + groupId.getType() - + "_" - + groupId.getId() - + "_" - + thisNode.getIp() - + "_" - + thisNode.getPort(); + return storageDir + File.separator + groupId.getType().getValue() + "_" + groupId.getId(); } } diff --git a/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneServerImpl.java b/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneServerImpl.java index 309b86371b31..8438a5fbe487 100644 --- a/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneServerImpl.java +++ b/consensus/src/main/java/org/apache/iotdb/consensus/standalone/StandAloneServerImpl.java @@ -48,6 +48,8 @@ public IStateMachine getStateMachine() { @Override public void start() { stateMachine.start(); + // Notify itself as the leader + stateMachine.event().notifyLeaderChanged(peer.getGroupId(), peer.getEndpoint()); } @Override diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensusTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensusTest.java new file mode 100644 index 000000000000..320d4d3d272f --- /dev/null +++ b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/MultiLeaderConsensusTest.java @@ -0,0 +1,433 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.consensus.DataRegionId; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.consensus.IStateMachine; +import org.apache.iotdb.consensus.common.ConsensusGroup; +import org.apache.iotdb.consensus.common.DataSet; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; +import org.apache.iotdb.consensus.common.request.IConsensusRequest; +import org.apache.iotdb.consensus.common.request.IndexedConsensusRequest; +import org.apache.iotdb.consensus.config.ConsensusConfig; +import org.apache.iotdb.consensus.multileader.logdispatcher.IndexController; +import org.apache.iotdb.consensus.multileader.wal.ConsensusReqReader; +import org.apache.iotdb.consensus.multileader.wal.GetConsensusReqReaderPlan; +import org.apache.iotdb.tsfile.utils.PublicBAOS; + +import org.apache.ratis.util.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +public class MultiLeaderConsensusTest { + + private final Logger logger = LoggerFactory.getLogger(MultiLeaderConsensusTest.class); + + private final ConsensusGroupId gid = new DataRegionId(1); + + private final List peers = + Arrays.asList( + new Peer(gid, new TEndPoint("127.0.0.1", 6000)), + new Peer(gid, new TEndPoint("127.0.0.1", 6001)), + new Peer(gid, new TEndPoint("127.0.0.1", 6002))); + + private final List peersStorage = + Arrays.asList( + new File("target" + java.io.File.separator + "1"), + new File("target" + java.io.File.separator + "2"), + new File("target" + java.io.File.separator + "3")); + + private final ConsensusGroup group = new ConsensusGroup(gid, peers); + private final List servers = new ArrayList<>(); + private final List stateMachines = new ArrayList<>(); + + @Before + public void setUp() throws Exception { + for (int i = 0; i < 3; i++) { + peersStorage.get(i).mkdirs(); + stateMachines.add(new TestStateMachine()); + } + initServer(); + } + + @After + public void tearDown() throws Exception { + stopServer(); + for (File file : peersStorage) { + FileUtils.deleteFully(file); + } + } + + private void initServer() throws IOException { + for (int i = 0; i < 3; i++) { + int finalI = i; + servers.add( + (MultiLeaderConsensus) + ConsensusFactory.getConsensusImpl( + ConsensusFactory.MultiLeaderConsensus, + ConsensusConfig.newBuilder() + .setThisNode(peers.get(i).getEndpoint()) + .setStorageDir(peersStorage.get(i).getAbsolutePath()) + .build(), + groupId -> stateMachines.get(finalI)) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + ConsensusFactory.CONSTRUCT_FAILED_MSG, + ConsensusFactory.MultiLeaderConsensus)))); + servers.get(i).start(); + } + } + + private void stopServer() { + servers.parallelStream().forEach(MultiLeaderConsensus::stop); + servers.clear(); + } + + /** + * The three nodes use the requests in the queue to replicate the requests to the other two nodes + */ + @Test + public void ReplicateUsingQueueTest() throws IOException, InterruptedException { + logger.info("Start ReplicateUsingQueueTest"); + servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); + + Assert.assertEquals(0, servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(0, servers.get(1).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(0, servers.get(2).getImpl(gid).getController().getCurrentIndex()); + + for (int i = 0; i < IndexController.FLUSH_INTERVAL; i++) { + servers.get(0).write(gid, new TestEntry(i, peers.get(0))); + servers.get(1).write(gid, new TestEntry(i, peers.get(1))); + servers.get(2).write(gid, new TestEntry(i, peers.get(2))); + Assert.assertEquals(i + 1, servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(i + 1, servers.get(1).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(i + 1, servers.get(2).getImpl(gid).getController().getCurrentIndex()); + } + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(0).getImpl(gid).getController().getLastFlushedIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(1).getImpl(gid).getController().getLastFlushedIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(2).getImpl(gid).getController().getLastFlushedIndex()); + + for (int i = 0; i < 3; i++) { + long start = System.currentTimeMillis(); + while (servers.get(i).getImpl(gid).getCurrentSafelyDeletedSearchIndex() + < IndexController.FLUSH_INTERVAL) { + long current = System.currentTimeMillis(); + if ((current - start) > 20 * 1000) { + Assert.fail("Unable to replicate entries"); + } + Thread.sleep(100); + } + } + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(0).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(1).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(2).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 3, stateMachines.get(0).getRequestSet().size()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 3, stateMachines.get(1).getRequestSet().size()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 3, stateMachines.get(2).getRequestSet().size()); + Assert.assertEquals(stateMachines.get(0).getData(), stateMachines.get(1).getData()); + Assert.assertEquals(stateMachines.get(2).getData(), stateMachines.get(1).getData()); + + stopServer(); + initServer(); + + Assert.assertEquals(peers, servers.get(0).getImpl(gid).getConfiguration()); + Assert.assertEquals(peers, servers.get(1).getImpl(gid).getConfiguration()); + Assert.assertEquals(peers, servers.get(2).getImpl(gid).getConfiguration()); + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, + servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, + servers.get(1).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, + servers.get(2).getImpl(gid).getController().getCurrentIndex()); + + for (int i = 0; i < 3; i++) { + long start = System.currentTimeMillis(); + while (servers.get(i).getImpl(gid).getCurrentSafelyDeletedSearchIndex() + < IndexController.FLUSH_INTERVAL) { + long current = System.currentTimeMillis(); + if ((current - start) > 20 * 1000) { + Assert.fail("Unable to recover entries"); + } + Thread.sleep(100); + } + } + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(1).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(1).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(2).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + } + + /** + * First, suspend one node to test that the request replication between the two alive nodes is ok, + * then restart all nodes to lose state in the queue, and test using WAL replication to make all + * nodes finally consistent + */ + @Test + public void ReplicateUsingWALTest() throws IOException, InterruptedException { + logger.info("Start ReplicateUsingWALTest"); + servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); + + Assert.assertEquals(0, servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(0, servers.get(1).getImpl(gid).getController().getCurrentIndex()); + + for (int i = 0; i < IndexController.FLUSH_INTERVAL; i++) { + servers.get(0).write(gid, new TestEntry(i, peers.get(0))); + servers.get(1).write(gid, new TestEntry(i, peers.get(1))); + Assert.assertEquals(i + 1, servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(i + 1, servers.get(1).getImpl(gid).getController().getCurrentIndex()); + } + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(0).getImpl(gid).getController().getLastFlushedIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL, + servers.get(1).getImpl(gid).getController().getLastFlushedIndex()); + + Assert.assertEquals(0, servers.get(0).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.assertEquals(0, servers.get(1).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + + stopServer(); + initServer(); + + servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); + + Assert.assertEquals(peers, servers.get(0).getImpl(gid).getConfiguration()); + Assert.assertEquals(peers, servers.get(1).getImpl(gid).getConfiguration()); + Assert.assertEquals(peers, servers.get(2).getImpl(gid).getConfiguration()); + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, + servers.get(0).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, + servers.get(1).getImpl(gid).getController().getCurrentIndex()); + Assert.assertEquals(0, servers.get(2).getImpl(gid).getController().getCurrentIndex()); + + for (int i = 0; i < 2; i++) { + long start = System.currentTimeMillis(); + // should be [IndexController.FLUSH_INTERVAL, IndexController.FLUSH_INTERVAL * 2 - 1] after + // replicating all entries + while (servers.get(i).getImpl(gid).getCurrentSafelyDeletedSearchIndex() + < IndexController.FLUSH_INTERVAL) { + long current = System.currentTimeMillis(); + if ((current - start) > 20 * 1000) { + logger.error("{}", servers.get(i).getImpl(gid).getCurrentSafelyDeletedSearchIndex()); + Assert.fail("Unable to replicate entries"); + } + Thread.sleep(100); + } + } + + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, stateMachines.get(0).getRequestSet().size()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, stateMachines.get(1).getRequestSet().size()); + Assert.assertEquals( + IndexController.FLUSH_INTERVAL * 2, stateMachines.get(2).getRequestSet().size()); + + Assert.assertEquals(stateMachines.get(0).getData(), stateMachines.get(1).getData()); + Assert.assertEquals(stateMachines.get(2).getData(), stateMachines.get(1).getData()); + } + + private static class TestEntry implements IConsensusRequest { + + private final int num; + private final Peer peer; + + public TestEntry(int num, Peer peer) { + this.num = num; + this.peer = peer; + } + + @Override + public ByteBuffer serializeToByteBuffer() { + try (PublicBAOS publicBAOS = new PublicBAOS(); + DataOutputStream outputStream = new DataOutputStream(publicBAOS)) { + outputStream.writeInt(num); + peer.serialize(outputStream); + return ByteBuffer.wrap(publicBAOS.getBuf(), 0, publicBAOS.size()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TestEntry testEntry = (TestEntry) o; + return num == testEntry.num && Objects.equals(peer, testEntry.peer); + } + + @Override + public int hashCode() { + return Objects.hash(num, peer); + } + + @Override + public String toString() { + return "TestEntry{" + "num=" + num + ", peer=" + peer + '}'; + } + } + + private static class TestStateMachine implements IStateMachine, IStateMachine.EventApi { + + private final Set requestSet = ConcurrentHashMap.newKeySet(); + + public Set getRequestSet() { + return requestSet; + } + + public Set getData() { + Set data = new HashSet<>(); + requestSet.forEach(x -> data.add((TestEntry) x.getRequest())); + return data; + } + + @Override + public void start() {} + + @Override + public void stop() {} + + @Override + public TSStatus write(IConsensusRequest request) { + synchronized (requestSet) { + IConsensusRequest innerRequest = ((IndexedConsensusRequest) request).getRequest(); + if (innerRequest instanceof ByteBufferConsensusRequest) { + ByteBuffer buffer = innerRequest.serializeToByteBuffer(); + requestSet.add( + new IndexedConsensusRequest( + ((IndexedConsensusRequest) request).getSearchIndex(), + -1, + new TestEntry(buffer.getInt(), Peer.deserialize(buffer)))); + } else { + requestSet.add(((IndexedConsensusRequest) request)); + } + return new TSStatus(); + } + } + + @Override + public synchronized DataSet read(IConsensusRequest request) { + if (request instanceof GetConsensusReqReaderPlan) { + return new FakeConsensusReqReader(requestSet); + } + return null; + } + + @Override + public boolean takeSnapshot(File snapshotDir) { + return false; + } + + @Override + public void loadSnapshot(File latestSnapshotRootDir) {} + } + + public static class FakeConsensusReqReader implements ConsensusReqReader, DataSet { + + private final Set requestSet; + + public FakeConsensusReqReader(Set requestSet) { + this.requestSet = requestSet; + } + + @Override + public IConsensusRequest getReq(long index) { + synchronized (requestSet) { + for (IndexedConsensusRequest indexedConsensusRequest : requestSet) { + if (indexedConsensusRequest.getSearchIndex() == index) { + return indexedConsensusRequest; + } + } + return null; + } + } + + @Override + public List getReqs(long startIndex, int num) { + return null; + } + + @Override + public ReqIterator getReqIterator(long startIndex) { + return null; + } + } +} diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/multileader/RecoveryTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/RecoveryTest.java new file mode 100644 index 000000000000..b9000b733749 --- /dev/null +++ b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/RecoveryTest.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader; + +import org.apache.iotdb.common.rpc.thrift.TEndPoint; +import org.apache.iotdb.commons.consensus.ConsensusGroupId; +import org.apache.iotdb.commons.consensus.SchemaRegionId; +import org.apache.iotdb.consensus.ConsensusFactory; +import org.apache.iotdb.consensus.EmptyStateMachine; +import org.apache.iotdb.consensus.IConsensus; +import org.apache.iotdb.consensus.common.Peer; +import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; + +import org.apache.ratis.util.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; + +public class RecoveryTest { + + private final ConsensusGroupId schemaRegionId = new SchemaRegionId(1); + private IConsensus consensusImpl; + + public void constructConsensus() throws IOException { + consensusImpl = + ConsensusFactory.getConsensusImpl( + ConsensusFactory.MultiLeaderConsensus, + ConsensusConfig.newBuilder() + .setThisNode(new TEndPoint("0.0.0.0", 9000)) + .setStorageDir("target" + java.io.File.separator + "recovery") + .build(), + gid -> new EmptyStateMachine()) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + ConsensusFactory.CONSTRUCT_FAILED_MSG, + ConsensusFactory.MultiLeaderConsensus))); + consensusImpl.start(); + } + + @Before + public void setUp() throws Exception { + constructConsensus(); + } + + @After + public void tearDown() throws IOException { + consensusImpl.stop(); + FileUtils.deleteFully(new File("./target/recovery")); + } + + @Test + public void recoveryTest() throws Exception { + consensusImpl.addConsensusGroup( + schemaRegionId, + Collections.singletonList(new Peer(schemaRegionId, new TEndPoint("0.0.0.0", 9000)))); + + consensusImpl.removeConsensusGroup(schemaRegionId); + + consensusImpl.stop(); + consensusImpl = null; + + constructConsensus(); + + ConsensusGenericResponse response = + consensusImpl.addConsensusGroup( + schemaRegionId, + Collections.singletonList(new Peer(schemaRegionId, new TEndPoint("0.0.0.0", 9000)))); + + Assert.assertTrue(response.isSuccess()); + } +} diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexControllerTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexControllerTest.java new file mode 100644 index 000000000000..c9bf6051b071 --- /dev/null +++ b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/IndexControllerTest.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.ratis.util.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +public class IndexControllerTest { + + private static final File storageDir = new File("target" + java.io.File.separator + "test"); + private static final String prefix = "version"; + + @Before + public void setUp() throws IOException { + FileUtils.createDirectories(storageDir); + } + + @After + public void tearDown() throws IOException { + FileUtils.deleteFully(storageDir); + } + + /** test indexController when incrementIntervalAfterRestart == true */ + @Test + public void testTrueIncrementIntervalAfterRestart() { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + for (int i = 0; i < IndexController.FLUSH_INTERVAL - 1; i++) { + controller.incrementAndGet(); + } + Assert.assertEquals(IndexController.FLUSH_INTERVAL - 1, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getLastFlushedIndex()); + + for (int i = 0; i < IndexController.FLUSH_INTERVAL + 1; i++) { + controller.incrementAndGet(); + } + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 2 + 1, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 2, controller.getLastFlushedIndex()); + + controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 3, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 3, controller.getLastFlushedIndex()); + } + + /** test indexController when incrementIntervalAfterRestart == false */ + @Test + public void testFalseIncrementIntervalAfterRestart() { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, false); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + controller.updateAndGet(IndexController.FLUSH_INTERVAL - 1); + + Assert.assertEquals(IndexController.FLUSH_INTERVAL - 1, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + controller = new IndexController(storageDir.getAbsolutePath(), prefix, false); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + controller.updateAndGet(IndexController.FLUSH_INTERVAL + 1); + Assert.assertEquals(IndexController.FLUSH_INTERVAL + 1, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getLastFlushedIndex()); + + controller = new IndexController(storageDir.getAbsolutePath(), prefix, false); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getLastFlushedIndex()); + + controller.updateAndGet(IndexController.FLUSH_INTERVAL * 2 - 1); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 2 - 1, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getLastFlushedIndex()); + + controller = new IndexController(storageDir.getAbsolutePath(), prefix, false); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL, controller.getLastFlushedIndex()); + + controller.updateAndGet(IndexController.FLUSH_INTERVAL * 2 + 1); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 2 + 1, controller.getCurrentIndex()); + Assert.assertEquals(IndexController.FLUSH_INTERVAL * 2, controller.getLastFlushedIndex()); + } +} diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatusTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatusTest.java new file mode 100644 index 000000000000..cbf015257e1c --- /dev/null +++ b/consensus/src/test/java/org/apache/iotdb/consensus/multileader/logdispatcher/SyncStatusTest.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.consensus.multileader.logdispatcher; + +import org.apache.iotdb.consensus.config.MultiLeaderConfig; + +import org.apache.ratis.util.FileUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; + +public class SyncStatusTest { + + private static final File storageDir = new File("target" + java.io.File.separator + "test"); + private static final String prefix = "version"; + private static final MultiLeaderConfig config = new MultiLeaderConfig.Builder().build(); + + @Before + public void setUp() throws IOException { + FileUtils.createDirectories(storageDir); + } + + @After + public void tearDown() throws IOException { + FileUtils.deleteFully(storageDir); + } + + /** Confirm success from front to back */ + @Test + public void sequenceTest() throws InterruptedException { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(0, controller.getCurrentIndex()); + + SyncStatus status = new SyncStatus(controller, config); + List batchList = new ArrayList<>(); + + for (long i = 0; i < config.getReplication().getMaxPendingBatch(); i++) { + PendingBatch batch = new PendingBatch(i, i, Collections.emptyList()); + batchList.add(batch); + status.addNextBatch(batch); + } + + for (int i = 0; i < config.getReplication().getMaxPendingBatch(); i++) { + status.removeBatch(batchList.get(i)); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() - 1 - i, status.getPendingBatches().size()); + Assert.assertEquals(i, controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + } + + /** Confirm success from back to front */ + @Test + public void reverseTest() throws InterruptedException { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + SyncStatus status = new SyncStatus(controller, config); + List batchList = new ArrayList<>(); + + for (long i = 0; i < config.getReplication().getMaxPendingBatch(); i++) { + PendingBatch batch = new PendingBatch(i, i, Collections.emptyList()); + batchList.add(batch); + status.addNextBatch(batch); + } + + for (int i = 0; i < config.getReplication().getMaxPendingBatch() - 1; i++) { + status.removeBatch(batchList.get(config.getReplication().getMaxPendingBatch() - 1 - i)); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getPendingBatches().size()); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + status.removeBatch(batchList.get(0)); + Assert.assertEquals(0, status.getPendingBatches().size()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() - 1, controller.getCurrentIndex()); + Assert.assertEquals(config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + /** Confirm success first from front to back, then back to front */ + @Test + public void mixedTest() throws InterruptedException { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals(0, controller.getLastFlushedIndex()); + + SyncStatus status = new SyncStatus(controller, config); + List batchList = new ArrayList<>(); + + for (long i = 0; i < config.getReplication().getMaxPendingBatch(); i++) { + PendingBatch batch = new PendingBatch(i, i, Collections.emptyList()); + batchList.add(batch); + status.addNextBatch(batch); + } + + for (int i = 0; i < config.getReplication().getMaxPendingBatch() / 2; i++) { + status.removeBatch(batchList.get(i)); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() - 1 - i, status.getPendingBatches().size()); + Assert.assertEquals(i, controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + for (int i = config.getReplication().getMaxPendingBatch() / 2 + 1; + i < config.getReplication().getMaxPendingBatch(); + i++) { + status.removeBatch(batchList.get(i)); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() / 2, status.getPendingBatches().size()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + status.removeBatch(batchList.get(config.getReplication().getMaxPendingBatch() / 2)); + Assert.assertEquals(0, status.getPendingBatches().size()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() - 1, controller.getCurrentIndex()); + Assert.assertEquals(config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + /** Test Blocking while addNextBatch */ + @Test + public void waitTest() throws InterruptedException, ExecutionException { + IndexController controller = new IndexController(storageDir.getAbsolutePath(), prefix, true); + Assert.assertEquals(0, controller.getCurrentIndex()); + + SyncStatus status = new SyncStatus(controller, config); + List batchList = new ArrayList<>(); + + for (long i = 0; i < config.getReplication().getMaxPendingBatch(); i++) { + PendingBatch batch = new PendingBatch(i, i, Collections.emptyList()); + batchList.add(batch); + status.addNextBatch(batch); + } + + for (int i = 0; i < config.getReplication().getMaxPendingBatch() - 1; i++) { + status.removeBatch(batchList.get(config.getReplication().getMaxPendingBatch() - 1 - i)); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getPendingBatches().size()); + Assert.assertEquals(0, controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch(), status.getNextSendingIndex()); + } + + CompletableFuture future = + CompletableFuture.supplyAsync( + () -> { + PendingBatch batch = + new PendingBatch( + config.getReplication().getMaxPendingBatch(), + config.getReplication().getMaxPendingBatch(), + Collections.emptyList()); + batchList.add(batch); + try { + status.addNextBatch(batch); + } catch (InterruptedException e) { + e.printStackTrace(); + return false; + } + return true; + }); + + Thread.sleep(1000); + Assert.assertFalse(future.isDone()); + + status.removeBatch(batchList.get(0)); + Assert.assertTrue(future.get()); + Assert.assertEquals(1, status.getPendingBatches().size()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() - 1, controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() + 1, status.getNextSendingIndex()); + + status.removeBatch(batchList.get(config.getReplication().getMaxPendingBatch())); + Assert.assertEquals(0, status.getPendingBatches().size()); + Assert.assertEquals(config.getReplication().getMaxPendingBatch(), controller.getCurrentIndex()); + Assert.assertEquals( + config.getReplication().getMaxPendingBatch() + 1, status.getNextSendingIndex()); + } +} diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java index 4eee289586e2..b44d96460dd4 100644 --- a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java +++ b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/RatisConsensusTest.java @@ -28,6 +28,8 @@ import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; import org.apache.iotdb.consensus.common.response.ConsensusReadResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; +import org.apache.iotdb.consensus.config.RatisConfig; import org.apache.ratis.util.FileUtils; import org.junit.After; @@ -39,7 +41,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -48,33 +49,42 @@ public class RatisConsensusTest { - private static final String RATIS_CLASS_NAME = "org.apache.iotdb.consensus.ratis.RatisConsensus"; - private ConsensusGroupId gid; private List peers; private List peersStorage; private List servers; private List stateMachines; private ConsensusGroup group; - private Peer peer0; - private Peer peer1; - private Peer peer2; CountDownLatch latch; private void makeServers() throws IOException { for (int i = 0; i < 3; i++) { stateMachines.add(new TestUtils.IntegerCounter()); + RatisConfig config = + RatisConfig.newBuilder() + .setLog( + RatisConfig.Log.newBuilder() + .setPurgeUptoSnapshotIndex(true) + .setPurgeGap(10) + .build()) + .setSnapshot(RatisConfig.Snapshot.newBuilder().setAutoTriggerThreshold(100).build()) + .build(); int finalI = i; servers.add( ConsensusFactory.getConsensusImpl( - RATIS_CLASS_NAME, - peers.get(i).getEndpoint(), - peersStorage.get(i), + ConsensusFactory.RatisConsensus, + ConsensusConfig.newBuilder() + .setThisNode(peers.get(i).getEndpoint()) + .setRatisConfig(config) + .setStorageDir(peersStorage.get(i).getAbsolutePath()) + .build(), groupId -> stateMachines.get(finalI)) .orElseThrow( () -> new IllegalArgumentException( - String.format(ConsensusFactory.CONSTRUCT_FAILED_MSG, RATIS_CLASS_NAME)))); + String.format( + ConsensusFactory.CONSTRUCT_FAILED_MSG, + ConsensusFactory.RatisConsensus)))); servers.get(i).start(); } } @@ -83,16 +93,13 @@ private void makeServers() throws IOException { public void setUp() throws IOException { gid = new DataRegionId(1); peers = new ArrayList<>(); - peer0 = new Peer(gid, new TEndPoint("127.0.0.1", 6000)); - peer1 = new Peer(gid, new TEndPoint("127.0.0.1", 6001)); - peer2 = new Peer(gid, new TEndPoint("127.0.0.1", 6002)); - peers.add(peer0); - peers.add(peer1); - peers.add(peer2); + peers.add(new Peer(gid, new TEndPoint("127.0.0.1", 6000))); + peers.add(new Peer(gid, new TEndPoint("127.0.0.1", 6001))); + peers.add(new Peer(gid, new TEndPoint("127.0.0.1", 6002))); peersStorage = new ArrayList<>(); - peersStorage.add(new File("./target/1/")); - peersStorage.add(new File("./target/2/")); - peersStorage.add(new File("./target/3/")); + peersStorage.add(new File("target" + java.io.File.separator + "1")); + peersStorage.add(new File("target" + java.io.File.separator + "2")); + peersStorage.add(new File("target" + java.io.File.separator + "3")); for (File dir : peersStorage) { dir.mkdirs(); } @@ -113,70 +120,68 @@ public void tearDown() throws IOException { } @Test - public void basicConsensus() throws Exception { + public void basicConsensus3Copy() throws Exception { + servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); + + doConsensus(servers.get(0), group.getGroupId(), 10, 10); + } + + @Test + public void addMemberToGroup() throws Exception { + List original = peers.subList(0, 1); + + servers.get(0).addConsensusGroup(group.getGroupId(), original); + doConsensus(servers.get(0), group.getGroupId(), 10, 10); + + // add 2 members + servers.get(1).addConsensusGroup(group.getGroupId(), peers); + servers.get(0).addPeer(group.getGroupId(), peers.get(1)); + + servers.get(2).addConsensusGroup(group.getGroupId(), peers); + servers.get(0).changePeer(group.getGroupId(), peers); - // 1. Add a new group + Assert.assertEquals(stateMachines.get(0).getConfiguration().size(), 3); + doConsensus(servers.get(0), group.getGroupId(), 10, 20); + } + + @Test + public void removeMemberFromGroup() throws Exception { servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); - // 2. Do Consensus 10 doConsensus(servers.get(0), group.getGroupId(), 10, 10); - // 3. Remove two Peers from Group (peer 0 and peer 2) - // transfer the leader to peer1 - servers.get(0).transferLeader(gid, peer1); - // Assert.assertTrue(servers.get(1).isLeader(gid)); - // first use removePeer to inform the group leader of configuration change - servers.get(1).removePeer(gid, peer0); - servers.get(1).removePeer(gid, peer2); - // then use removeConsensusGroup to clean up removed Consensus-Peer's states - servers.get(0).removeConsensusGroup(gid); + servers.get(0).transferLeader(gid, peers.get(0)); + servers.get(0).removePeer(gid, peers.get(1)); + servers.get(1).removeConsensusGroup(gid); + servers.get(0).removePeer(gid, peers.get(2)); servers.get(2).removeConsensusGroup(gid); - Assert.assertEquals(servers.get(1).getLeader(gid).getEndpoint(), peers.get(1).getEndpoint()); - Assert.assertEquals(stateMachines.get(1).getLeaderEndpoint(), peers.get(1).getEndpoint()); - Assert.assertEquals(stateMachines.get(1).getConfiguration().size(), 1); - Assert.assertEquals(stateMachines.get(1).getConfiguration().get(0), peers.get(1)); - - // 4. try consensus again with one peer - doConsensus(servers.get(1), gid, 10, 20); - - // 5. add two peers back - // first notify these new peers, let them initialize - servers.get(0).addConsensusGroup(gid, peers); - servers.get(2).addConsensusGroup(gid, peers); - // then use addPeer to inform the group leader of configuration change - servers.get(1).addPeer(gid, peer0); - servers.get(1).addPeer(gid, peer2); - Assert.assertEquals(stateMachines.get(1).getConfiguration().size(), 3); - - // 6. try consensus with all 3 peers - doConsensus(servers.get(2), gid, 10, 30); - - // 7. again, group contains only peer0 - servers.get(0).transferLeader(group.getGroupId(), peer0); - servers.get(0).changePeer(group.getGroupId(), Collections.singletonList(peer0)); - servers.get(1).removeConsensusGroup(group.getGroupId()); - servers.get(2).removeConsensusGroup(group.getGroupId()); - Assert.assertEquals(stateMachines.get(0).getLeaderEndpoint(), peers.get(0).getEndpoint()); - Assert.assertEquals(stateMachines.get(0).getConfiguration().size(), 1); - Assert.assertEquals(stateMachines.get(0).getConfiguration().get(0), peers.get(0)); - - // 8. try consensus with only peer0 - doConsensus(servers.get(0), gid, 10, 40); - - // 9. shutdown all the servers + + doConsensus(servers.get(0), group.getGroupId(), 10, 20); + } + + @Test + public void crashAndStart() throws Exception { + servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); + servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); + + // 200 operation will trigger snapshot & purge + doConsensus(servers.get(0), group.getGroupId(), 200, 200); + for (IConsensus consensus : servers) { consensus.stop(); } servers.clear(); - // 10. start again and verify the snapshot makeServers(); servers.get(0).addConsensusGroup(group.getGroupId(), group.getPeers()); servers.get(1).addConsensusGroup(group.getGroupId(), group.getPeers()); servers.get(2).addConsensusGroup(group.getGroupId(), group.getPeers()); - doConsensus(servers.get(0), gid, 10, 50); + doConsensus(servers.get(0), gid, 10, 210); } private void doConsensus(IConsensus consensus, ConsensusGroupId gid, int count, int target) @@ -217,7 +222,7 @@ private void doConsensus(IConsensus consensus, ConsensusGroupId gid, int count, IConsensus leader = null; while (leader == null) { long current = System.currentTimeMillis(); - if ((current - start) > 60 * 1000 * 1000) { + if ((current - start) > 60 * 1000) { break; } for (int i = 0; i < 3; i++) { diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/SnapshotTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/SnapshotTest.java index e42f981706cf..45b4d6ee6ba9 100644 --- a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/SnapshotTest.java +++ b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/SnapshotTest.java @@ -75,7 +75,6 @@ public void close() throws IOException {} @Before public void setUp() throws IOException { - FileUtils.deleteFully(testDir); FileUtils.createDirectories(testDir); } @@ -97,6 +96,7 @@ public void testSnapshot() throws Exception { long index = proxy.takeSnapshot(); Assert.assertEquals(index, 616); Assert.assertTrue(new File(snapshotFilename).exists()); + Assert.assertTrue(new File(getSnapshotMetaFilename("421_616")).exists()); // take a snapshot at 616-4217 proxy.notifyTermIndexUpdated(616, 4217); @@ -105,16 +105,25 @@ public void testSnapshot() throws Exception { long indexLatest = proxy.takeSnapshot(); Assert.assertEquals(indexLatest, 4217); Assert.assertTrue(new File(snapshotFilenameLatest).exists()); + Assert.assertTrue(new File(getSnapshotMetaFilename("616_4217")).exists()); // query the latest snapshot SnapshotInfo info = proxy.getLatestSnapshot(); Assert.assertEquals(info.getTerm(), 616); Assert.assertEquals(info.getIndex(), 4217); - Assert.assertTrue(info.getFiles().get(0).getPath().endsWith(snapshotFilenameLatest)); // clean up proxy.getStateMachineStorage().cleanupOldSnapshots(null); Assert.assertFalse(new File(snapshotFilename).exists()); Assert.assertTrue(new File(snapshotFilenameLatest).exists()); } + + private String getSnapshotMetaFilename(String termIndexMeta) { + return testDir.getAbsolutePath() + + File.separator + + termIndexMeta + + File.separator + + ".ratis_meta." + + termIndexMeta; + } } diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/TestUtils.java b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/TestUtils.java index 0e36ea878e74..d383fe902b5d 100644 --- a/consensus/src/test/java/org/apache/iotdb/consensus/ratis/TestUtils.java +++ b/consensus/src/test/java/org/apache/iotdb/consensus/ratis/TestUtils.java @@ -41,7 +41,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class TestUtils { - static class TestDataSet implements DataSet { + public static class TestDataSet implements DataSet { private int number; public void setNumber(int number) { @@ -53,7 +53,7 @@ public int getNumber() { } } - static class TestRequest { + public static class TestRequest implements IConsensusRequest { private final int cmd; public TestRequest(ByteBuffer buffer) { @@ -63,9 +63,16 @@ public TestRequest(ByteBuffer buffer) { public boolean isIncr() { return cmd == 1; } + + @Override + public ByteBuffer serializeToByteBuffer() { + ByteBuffer buffer = ByteBuffer.allocate(4).putInt(cmd); + buffer.flip(); + return buffer; + } } - static class IntegerCounter implements IStateMachine, IStateMachine.EventApi { + public static class IntegerCounter implements IStateMachine, IStateMachine.EventApi { private AtomicInteger integer; private final Logger logger = LoggerFactory.getLogger(IntegerCounter.class); private TEndPoint leaderEndpoint; @@ -80,9 +87,13 @@ public void start() { public void stop() {} @Override - public TSStatus write(IConsensusRequest IConsensusRequest) { - ByteBufferConsensusRequest request = (ByteBufferConsensusRequest) IConsensusRequest; - TestRequest testRequest = new TestRequest(request.getContent()); + public TSStatus write(IConsensusRequest request) { + TestRequest testRequest; + if (request instanceof ByteBufferConsensusRequest) { + testRequest = new TestRequest(request.serializeToByteBuffer()); + } else { + testRequest = (TestRequest) request; + } if (testRequest.isIncr()) { integer.incrementAndGet(); } diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/standalone/RecoveryTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/standalone/RecoveryTest.java index a166893b6b8b..23244da47a69 100644 --- a/consensus/src/test/java/org/apache/iotdb/consensus/standalone/RecoveryTest.java +++ b/consensus/src/test/java/org/apache/iotdb/consensus/standalone/RecoveryTest.java @@ -26,6 +26,7 @@ import org.apache.iotdb.consensus.IConsensus; import org.apache.iotdb.consensus.common.Peer; import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; import org.apache.ratis.util.FileUtils; @@ -41,22 +42,22 @@ public class RecoveryTest { private final ConsensusGroupId schemaRegionId = new SchemaRegionId(1); private IConsensus consensusImpl; - private static final String STANDALONE_CONSENSUS_CLASS_NAME = - "org.apache.iotdb.consensus.standalone.StandAloneConsensus"; public void constructConsensus() throws IOException { consensusImpl = ConsensusFactory.getConsensusImpl( - STANDALONE_CONSENSUS_CLASS_NAME, - new TEndPoint("0.0.0.0", 9000), - new File("./target/recovery"), + ConsensusFactory.StandAloneConsensus, + ConsensusConfig.newBuilder() + .setThisNode(new TEndPoint("0.0.0.0", 9000)) + .setStorageDir("target" + java.io.File.separator + "recovery") + .build(), gid -> new EmptyStateMachine()) .orElseThrow( () -> new IllegalArgumentException( String.format( ConsensusFactory.CONSTRUCT_FAILED_MSG, - STANDALONE_CONSENSUS_CLASS_NAME))); + ConsensusFactory.StandAloneConsensus))); consensusImpl.start(); } diff --git a/consensus/src/test/java/org/apache/iotdb/consensus/standalone/StandAloneConsensusTest.java b/consensus/src/test/java/org/apache/iotdb/consensus/standalone/StandAloneConsensusTest.java index 776257e2f830..9e0e13be6da6 100644 --- a/consensus/src/test/java/org/apache/iotdb/consensus/standalone/StandAloneConsensusTest.java +++ b/consensus/src/test/java/org/apache/iotdb/consensus/standalone/StandAloneConsensusTest.java @@ -35,6 +35,7 @@ import org.apache.iotdb.consensus.common.request.IConsensusRequest; import org.apache.iotdb.consensus.common.response.ConsensusGenericResponse; import org.apache.iotdb.consensus.common.response.ConsensusWriteResponse; +import org.apache.iotdb.consensus.config.ConsensusConfig; import org.apache.iotdb.consensus.exception.ConsensusGroupAlreadyExistException; import org.apache.iotdb.consensus.exception.ConsensusGroupNotExistException; import org.apache.iotdb.consensus.exception.IllegalPeerEndpointException; @@ -58,8 +59,6 @@ public class StandAloneConsensusTest { - private static final String STANDALONE_CONSENSUS_CLASS_NAME = - "org.apache.iotdb.consensus.standalone.StandAloneConsensus"; private IConsensus consensusImpl; private final TestEntry entry1 = new TestEntry(0); private final ByteBufferConsensusRequest entry2 = @@ -77,8 +76,10 @@ public TestEntry(int num) { } @Override - public void serializeRequest(ByteBuffer buffer) { - buffer.putInt(num); + public ByteBuffer serializeToByteBuffer() { + ByteBuffer buffer = ByteBuffer.allocate(4).putInt(num); + buffer.flip(); + return buffer; } } @@ -99,7 +100,7 @@ public void stop() {} @Override public TSStatus write(IConsensusRequest request) { if (request instanceof ByteBufferConsensusRequest) { - return new TSStatus(((ByteBufferConsensusRequest) request).getContent().getInt()); + return new TSStatus(request.serializeToByteBuffer().getInt()); } else if (request instanceof TestEntry) { return new TSStatus( direction ? ((TestEntry) request).num + 1 : ((TestEntry) request).num - 1); @@ -125,9 +126,11 @@ public void loadSnapshot(File latestSnapshotRootDir) {} public void setUp() throws Exception { consensusImpl = ConsensusFactory.getConsensusImpl( - STANDALONE_CONSENSUS_CLASS_NAME, - new TEndPoint("0.0.0.0", 6667), - new File("./target/standalone"), + ConsensusFactory.StandAloneConsensus, + ConsensusConfig.newBuilder() + .setThisNode(new TEndPoint("0.0.0.0", 6667)) + .setStorageDir("target" + java.io.File.separator + "standalone") + .build(), gid -> { switch (gid.getType()) { case SchemaRegion: @@ -142,7 +145,7 @@ public void setUp() throws Exception { new IllegalArgumentException( String.format( ConsensusFactory.CONSTRUCT_FAILED_MSG, - STANDALONE_CONSENSUS_CLASS_NAME))); + ConsensusFactory.StandAloneConsensus))); consensusImpl.start(); } diff --git a/distribution/pom.xml b/distribution/pom.xml index b9b4dad132be..c9bd405b0de2 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -32,28 +32,6 @@ - - org.apache.maven.plugins - maven-antrun-plugin - 3.0.0 - - - package - - - - - - - - - - - run - - - - org.apache.maven.plugins maven-assembly-plugin @@ -75,6 +53,7 @@ src/assembly/grafana-connector.xml src/assembly/client-cpp.xml src/assembly/grafana-plugin.xml + src/assembly/library-udf.xml apache-iotdb-${project.version} @@ -111,6 +90,7 @@ apache-iotdb-${project.version}-grafana-connector-bin.zip apache-iotdb-${project.version}-client-cpp-${os.classifier}-bin.zip apache-iotdb-${project.version}-grafana-plugin-bin.zip + apache-iotdb-${project.version}-library-udf-bin.zip @@ -164,5 +144,11 @@ ${project.version} pom + + org.apache.iotdb + library-udf + ${project.version} + jar + diff --git a/distribution/src/assembly/all.xml b/distribution/src/assembly/all.xml index fb48d282c06d..aa17f03e5d78 100644 --- a/distribution/src/assembly/all.xml +++ b/distribution/src/assembly/all.xml @@ -62,6 +62,10 @@ confignode/conf ${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/conf + + confignode/conf + ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf + datanode/conf ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf @@ -114,8 +118,8 @@ tools/grafana/application.properties - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/iotdb-env.sh - datanode/conf/iotdb-env.sh + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/datanode-env.sh + datanode/conf/datanode-env.sh 0755 diff --git a/distribution/src/assembly/cluster.xml b/distribution/src/assembly/cluster.xml deleted file mode 100644 index 7f5903cd945b..000000000000 --- a/distribution/src/assembly/cluster.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - - cluster-bin - - dir - zip - - apache-iotdb-${project.version}-cluster-bin - - - - *:iotdb-cluster:zip:* - *:iotdb-cli:zip:* - - ${file.separator} - ${artifact.artifactId}.${artifact.extension} - true - - - - - - - - - ${maven.multiModuleProjectDirectory}/cluster/src/assembly/resources/sbin - sbin - 0755 - - - - - - - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/tools - tools - 0755 - - - - ${maven.multiModuleProjectDirectory}/cli/src/assembly/resources/sbin - sbin - 0755 - - - ${maven.multiModuleProjectDirectory}/cli/src/assembly/resources/tools - tools - 0755 - - - - - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/iotdb-env.sh - conf/iotdb-env.sh - 0755 - - - - common-files.xml - - diff --git a/distribution/src/assembly/confignode.xml b/distribution/src/assembly/confignode.xml index 3c830e28c71e..a3be40130038 100644 --- a/distribution/src/assembly/confignode.xml +++ b/distribution/src/assembly/confignode.xml @@ -37,6 +37,10 @@ + + conf + ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf + ${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/sbin sbin diff --git a/distribution/src/assembly/library-udf.xml b/distribution/src/assembly/library-udf.xml new file mode 100644 index 000000000000..1501cde10410 --- /dev/null +++ b/distribution/src/assembly/library-udf.xml @@ -0,0 +1,49 @@ + + + + library-udf-bin + + dir + zip + + apache-iotdb-${project.version}-library-udf-bin + + + + org.apache.iotdb:library-udf:jar:${project.version} + + ext/udf + ${artifact.artifactId}.${artifact.extension} + false + + + + + ${maven.multiModuleProjectDirectory}/library-udf/src/assembly/tools + tools + 0755 + + + + common-files.xml + + diff --git a/distribution/src/assembly/server.xml b/distribution/src/assembly/server.xml index adbeb5c90f7a..e774d8971167 100644 --- a/distribution/src/assembly/server.xml +++ b/distribution/src/assembly/server.xml @@ -77,8 +77,8 @@ - ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/iotdb-env.sh - conf/iotdb-env.sh + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/datanode-env.sh + conf/datanode-env.sh 0755 diff --git a/docker/src/main/DockerCompose/GrafanaPlugin/Dockerfile-0.14.0-iotdb b/docker/src/main/DockerCompose/GrafanaPlugin/Dockerfile-0.14.0-iotdb new file mode 100644 index 000000000000..421b7f24caee --- /dev/null +++ b/docker/src/main/DockerCompose/GrafanaPlugin/Dockerfile-0.14.0-iotdb @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +FROM openjdk:11-jre-slim +RUN apt update \ + # procps is for `free` command + && apt install wget unzip lsof procps -y \ + && wget https://downloads.apache.org/iotdb/0.14.0/apache-iotdb-0.14.0-server-bin.zip \ + # if you are in China, use the following URL + #&& wget https://mirrors.tuna.tsinghua.edu.cn/apache/iotdb/0.14.0/apache-iotdb-0.14.0-server-bin.zip \ + && unzip apache-iotdb-0.14.0-server-bin.zip \ + && rm apache-iotdb-0.14.0-server-bin.zip \ + && mv apache-iotdb-0.14.0-server-bin /iotdb \ + && apt remove wget unzip -y \ + && apt autoremove -y \ + && apt purge --auto-remove -y \ + && apt clean -y +EXPOSE 6667 +EXPOSE 31999 +EXPOSE 5555 +EXPOSE 8181 +EXPOSE 18080 +VOLUME /iotdb/data +VOLUME /iotdb/logs +ENV PATH="/iotdb/sbin/:/iotdb/tools/:${PATH}" +ENTRYPOINT ["/iotdb/sbin/start-server.sh"] diff --git a/docker/src/main/DockerCompose/GrafanaPlugin/docker-compose.yml b/docker/src/main/DockerCompose/GrafanaPlugin/docker-compose.yml new file mode 100644 index 000000000000..26a90474f7e1 --- /dev/null +++ b/docker/src/main/DockerCompose/GrafanaPlugin/docker-compose.yml @@ -0,0 +1,48 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +version: "3.7" + +services: + grafana: + image: grafana/grafana:8.2.5 + ports: + - 3000:3000 + environment: + - GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=iotdb + - GF_INSTALL_PLUGINS=https://mirrors.tuna.tsinghua.edu.cn/apache/iotdb/0.14.0/apache-iotdb-0.14.0-grafana-plugin-bin.zip;apache-iotdb-0.14.0-grafana-plugin-bin + iotdb: + image: apache/iotdb0.14 + build: + context: ./ + dockerfile: Dockerfile-0.14.0-iotdb + volumes: + - ./iotdb/conf/iotdb-rest.properties:/iotdb/conf/iotdb-rest.properties + - ./iotdb/data/:/iotdb/data + - ./iotdb/logs:/iotdb/logs + ports: + - 6667:6667 + - 18080:18080 + - 5555:5555 + - 31999:31999 + - 8181:8181 + alertmanager: + image: prom/alertmanager:v0.21.0 + ports: + - 9093:9093 \ No newline at end of file diff --git a/docker/src/main/DockerCompose/GrafanaPlugin/iotdb/conf/iotdb-rest.properties b/docker/src/main/DockerCompose/GrafanaPlugin/iotdb/conf/iotdb-rest.properties new file mode 100644 index 000000000000..75d0ae1b06cb --- /dev/null +++ b/docker/src/main/DockerCompose/GrafanaPlugin/iotdb/conf/iotdb-rest.properties @@ -0,0 +1,58 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +#################### +### REST Service Configuration +#################### + +# Is the REST service enabled +enable_rest_service=true + +# the binding port of the REST service +# rest_service_port=18080 + +# the default row limit to a REST query response when the rowSize parameter is not given in request +# rest_query_default_row_size_limit=10000 + +# the expiration time of the user login information cache (in seconds) +# cache_expire_in_seconds=28800 + +# maximum number of users can be stored in the user login cache. +# cache_max_num=100 + +# init capacity of users can be stored in the user login cache. +# cache_init_num=10 + +# is SSL enabled +# enable_https=false + +# SSL key store path +# key_store_path= + +# SSL key store password +# key_store_pwd= + +# SSL trust store path +# trust_store_path= + +# SSL trust store password. +# trust_store_pwd= + +# SSL timeout (in seconds) +# idle_timeout_in_seconds=50000 diff --git a/docker/src/main/Dockerfile-cluster b/docker/src/main/Dockerfile-cluster deleted file mode 100644 index abc324e7f644..000000000000 --- a/docker/src/main/Dockerfile-cluster +++ /dev/null @@ -1,44 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# docker build context is the root path of the repository - -FROM openjdk:11-jre-slim - -ADD distribution/target/apache-iotdb-*-cluster-bin.zip / - -RUN apt update \ - && apt install lsof dos2unix procps unzip -y \ - && unzip /apache-iotdb-*-bin.zip -d / \ - && rm /apache-iotdb-*-bin.zip \ - && mv /apache-iotdb-* /iotdb \ - && apt remove unzip -y \ - && apt autoremove -y \ - && apt purge --auto-remove -y \ - && apt clean -y -RUN dos2unix /iotdb/sbin/start-node.sh -RUN dos2unix /iotdb/sbin/../conf/iotdb-env.sh -EXPOSE 6667 -EXPOSE 31999 -EXPOSE 5555 -EXPOSE 8181 -VOLUME /iotdb/data -VOLUME /iotdb/logs -ENV PATH="/iotdb/sbin/:/iotdb/tools/:${PATH}" -ENTRYPOINT ["/iotdb/sbin/start-node.sh"] diff --git a/docker/src/main/Dockerfile-single b/docker/src/main/Dockerfile-single index 57f230397609..cb1b7c0db149 100644 --- a/docker/src/main/Dockerfile-single +++ b/docker/src/main/Dockerfile-single @@ -33,7 +33,7 @@ RUN apt update \ && apt purge --auto-remove -y \ && apt clean -y RUN dos2unix /iotdb/sbin/start-server.sh -RUN dos2unix /iotdb/sbin/../conf/iotdb-env.sh +RUN dos2unix /iotdb/sbin/../conf/datanode-env.sh EXPOSE 6667 EXPOSE 31999 EXPOSE 5555 diff --git a/docker/src/main/Dockerfile-single-influxdb b/docker/src/main/Dockerfile-single-influxdb index b59bf9358570..a0f77d41e212 100644 --- a/docker/src/main/Dockerfile-single-influxdb +++ b/docker/src/main/Dockerfile-single-influxdb @@ -28,13 +28,13 @@ RUN apt update \ && unzip /apache-iotdb-*-bin.zip -d / \ && rm /apache-iotdb-*-bin.zip \ && mv /apache-iotdb-* /iotdb \ - && sed -i '/^# enable_influxdb_rpc_service=false/a enable_influxdb_rpc_service=true' /iotdb/sbin/../conf/iotdb-engine.properties \ + && sed -i '/^# enable_influxdb_rpc_service=false/a enable_influxdb_rpc_service=true' /iotdb/sbin/../conf/iotdb-datanode.properties \ && apt remove unzip -y \ && apt autoremove -y \ && apt purge --auto-remove -y \ && apt clean -y RUN dos2unix /iotdb/sbin/start-server.sh -RUN dos2unix /iotdb/sbin/../conf/iotdb-env.sh +RUN dos2unix /iotdb/sbin/../conf/datanode-env.sh EXPOSE 6667 EXPOSE 31999 EXPOSE 5555 diff --git a/docker/src/main/Dockerfile-single-tc b/docker/src/main/Dockerfile-single-tc index f31c81b8e8ce..71f785fef90f 100644 --- a/docker/src/main/Dockerfile-single-tc +++ b/docker/src/main/Dockerfile-single-tc @@ -33,7 +33,7 @@ RUN apt update \ && apt purge --auto-remove -y \ && apt clean -y RUN dos2unix /iotdb/sbin/start-server.sh -RUN dos2unix /iotdb/sbin/../conf/iotdb-env.sh +RUN dos2unix /iotdb/sbin/../conf/datanode-env.sh EXPOSE 6667 EXPOSE 31999 EXPOSE 5555 diff --git a/docs/Development/ContributeGuide.md b/docs/Development/ContributeGuide.md index 04872ec23816..5ab5b6340689 100644 --- a/docs/Development/ContributeGuide.md +++ b/docs/Development/ContributeGuide.md @@ -122,20 +122,6 @@ plugin](https://github.com/diffplug/spotless/tree/main/plugin-maven) together wi 8. Before you submit codes, you can use `mvn spotless:check` to check your codes manually, and use `mvn spotless:apply` to format your codes. -**NOTICE (if you are using JDK16+)**: IF you are using JDK16+, you have to create a file called -`jvm.config`, put it under `.mvn/`, before you use `spotless:apply`. -The file contains the following content: -``` ---add-exports jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.file=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED ---add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED -``` - -This is [an issue of Spotless](https://github.com/diffplug/spotless/issues/834), -Once the issue is fixed, we can remove this file. - ## Code Sytle We use the [maven-checkstyle-plugin](https://checkstyle.sourceforge.io/config_filefilters.html) to make Java codes obey a consistent ruleset defined in [checkstyle.xml](https://github.com/apache/iotdb/blob/master/checkstyle.xml) under the project root. diff --git a/docs/Development/HowToJoin.md b/docs/Development/HowToJoin.md index e0a1df7cd7f5..3ef4e7f9abcb 100644 --- a/docs/Development/HowToJoin.md +++ b/docs/Development/HowToJoin.md @@ -41,7 +41,7 @@ After creation, please send an email to the mailing list including **self introd ## Subscribe WeChat public account Scan the QR code to subscribe official public account: Apache IoTDB -![IoTDB WeChat public account QR](https://img-blog.csdnimg.cn/907f9d614b2f47e3b0c66a7c53bcbd5d.png#pic_left) +![IoTDB WeChat public account QR](https://user-images.githubusercontent.com/7240743/98633970-73671c00-235d-11eb-9913-f38e570fcfc8.png) ## Long term considerations ### Learn the basic use of IoTDB diff --git a/docs/Development/HowtoContributeCode.md b/docs/Development/HowtoContributeCode.md new file mode 100644 index 000000000000..9e4d23cd4f6b --- /dev/null +++ b/docs/Development/HowtoContributeCode.md @@ -0,0 +1,160 @@ + + +# How to Contribute Code +## Process +Tasks are managed as issues in JIRA. +The full lifecycle of an Issue: Create an issue -> assign an issue -> submit a pr(pull request) -> review a pr -> squash merge a pr -> close an issue. + +## Contributing Conventions + + ### Creating an Issue + There are a few things to keep in mind when creating an issue in [ JIRA ](https://issues.apache.org/JIRA/projects/IOTDB/issues) + 1. Naming: Try to make it clear and easy to understand. Examples include supporting a new aggregate query function (avg) and optimizing the performance of querying raw data . The name of the issue will later be used as the release note. + + 2. Description: Issue of new features and improvements should be clear. Bug reports should cover the environment, load, phenomenon (abnormal log), the affected version(s) , etc. And it's best to include ways to reproduce the bug. + + ### Assigning an Issue + When assigning an issue in JIRA for yourself, it's recommended to add the comment, "I'm doing this", otherwise there might be duplication of effort. +Note: If you can't assign an issue, it is because your account doesn't have the necessary permission. +To address this, please send an email to the dev@iotdb.apache.org mailing list with the title of [application] apply for permission to assign issues to XXX (your JIRA username).。 +### Submitting a PR +#### What you need to submit +Issue type : New Feature + +1.Submit the user manual and the pr for code changes. + +A user manual is mainly for helping users understand how the functions work and how to use them. It is recommended to contain scenario and background, configuration, interface description and examples. The user manual of the official website is placed in the docs/UserGuide folder of apache/iotdb repository. To update the user manual directory, including adding, deleting documents and renaming documents, you need to make corresponding changes in the file(path:site/src/main/.vuepress/config.js) in the master branch. + +2.Submit UT (unit test) or IT (integration test). + + +When adding unit tests or integration tests , try to cover as many cases as possible. xxTest(path: iotdb/server/src/test/java/org/apache/iotdb/db/query/aggregation/) and xxIT(path: iotdb/integration/src/test/java/org/apache/iotdb/db/integration/) can be used as reference. + +Issue type : Improvement + +1.Submit the code and UT(if importing new scenario) +2.etter to submit test results, including quantified improvements and possible negative effects. + +Issue type : Bug + +Submit UT or IT that can reproduce the bug. + +#### Coding Reminders +Branch management + +The IoTDB version naming method is 0.{major version}.{minor version}. For example, for version 0.12.4, 12 is the major version and 4 is the minor version. + +As the current development branch, the master branch corresponds to the next major release version. When each major version is released for the first time, a separate branch will be created for archiving. For example, codes of the 0.12.x versions are placed under the rel/0.12 branch. + +If a bug of a released version is found and fixed, the bugfix pr should be submitted to all branches that are newer than the specific branch. For example, a pr which is about a version 0.11.x bugfix should be submitted to rel/0.11 branch, rel/0.12 branch and master branch. + +Code formatting +It is required to use "mvn spotless:apply" to format the code before committing, otherwise, the ci code format check will fail. + +Notes + +1.The default values need to be consistent between iotdb-datanode.properties file and IoTDBConfig file. + +2.To modify the configuration parameters, the following files need to be modified + +a.Configuration file: server/src/assembly/resources/conf/iotdb-datanode.properties + +b. Codes: IoTDBDescriptor, IoTDBConfig + +c. Documentation: docs/UserGuide/Reference/Config-Manual.md + +3.To modify configuration parameters in IT and UT, you need to modify them in the method annotated by @before and reset them in the method annotated by @after. In this way, you can avoid impact on other tests. The parameters of the compaction module are placed in the CompactionConfigRestorer file. + +#### PR Naming +Format: [To branch] [Jira number] PR name + +Example: [To rel/0.12] [IoTDB-1907] implement customized sync process: sender + +To branch + +It is required when submitting pr to a non-master branch (such as rel/0.13, in which case the pr name should contain [To rel/0.13]) and not required when submitting to a master branch. + +Jira number + +The name should start with a JIRA number so that the PR can be automatically linked to the corresponding issue. Example includes [IOTDB-1907] implement customized sync process: sender. +This auto-linking won't happen if the PR is created without any JIRA number or with one that is improper, in which case you need to correct the PR name and manually paste the PR link to the issue page by adding a comment or attaching a link. + +#### PR Description +Usually, the PR name can't reflect all changes, so it is better to add a description about what has been changed and give explanations for any difficult-to-understand part. + +The description of a bug-fixing pr needs to cover the cause of the bug and how to fix it, as well as the added UT/IT test cases and associated negative effects. + +#### After Submitting a PR + +Send to the dev@iotdb.apache.org mailing list an email that describes the PR in detail, then carefully read and respond to all review comments, and make changes after reaching a consensus. + +### Reviewing a PR +Checklist +1. Is the PR named correctly? and whether any new feature and bug fix have an issue number. +2. Is PR description sufficiently clear? +3. Are UT (unit test) or performance test reports submitted? +4. Is the user manual of the new feature submitted? +5. It should not contain code modifications for other issues. Irrelevant modifications should be placed in other PR. + +How to review a pr + +1. Click Files changed + + +2. Add review comments. First, move your mouse to the left. And then there will be a plus sign, click the plus sign. Second, write comments. Third, click Start a Review. At this step, all review comments will be temporarily saved, which others can not see. +3. Submit review. After all the comments are added, click Review Changes and select your opinion. Select "Approve" for those that can be combined. Select "Request Changes" or "Comment" for those that need to be modified. Select "Comment" for those that are not sure. Finally, submit a review and only the person submitting the PR can see the review. + + +### Merging a PR +Make sure that all review comments are responded. + +Obtain approval from at least 1 committer. + +Choose squash merge. You can choose rebase only when the author has only one commit record with a clear commit log. + +Close the corresponding issue in JIRA, and mark the repaired or completed version. Note that solving or closing an issue requires adding a pr or description to the issue, so that changes can be tracked via the issue. + +## How to Prepare User Manual and Design Document +User manual and other documentation on the official website are maintained in the apache/iotdb repository. + +The index items of each page of the official website are maintained in the file "site/src/main/.vuepress/config.js" of the master branch, while the specific content of the user manual is maintained in the branch of the corresponding version (for example, user manual for 0.12 is in rel/0.12). + +User manual + +It is mainly for helping users understand how the functions work and how to use them. +It is recommended that the user manual contains scenario and background, configuration, interface description and examples.。 + +Design document + +It is mainly for helping developers understand how to implement a function, including the organization of code modules and algorithms. +It is recommended that the design document contains background, design goals, idea, main module, interface design. + +### Modifying User Manual +Other than modifying different files, the process is the same as contributing codes. + +The English user manual is placed in the docs/UserGuide folder. + +To update the user manual directory, including adding, deleting documents and renaming documents, you need to make corresponding changes in the file(path:site/src/main/.vuepress/config.js) in the master branch. + +### Modifying the Top Navigation Bar of the Official Website + +Search for nav in the file(path:site/src/main/.vuepress/config.js), and make corresponding modifications by referencing the existing code, then submit a PR and wait for merging. Documents to be added can be placed in the docs folder. \ No newline at end of file diff --git a/docs/Download/README.md b/docs/Download/README.md index 7c6ad156fdb7..9dcbede3134c 100644 --- a/docs/Download/README.md +++ b/docs/Download/README.md @@ -85,7 +85,7 @@ Legacy version are available here: [https://archive.apache.org/dist/iotdb/](http ``` # Linux > sudo sysctl -w net.core.somaxconn=65535 - + # FreeBSD or Darwin > sudo sysctl -w kern.ipc.somaxconn=65535 ``` @@ -96,7 +96,16 @@ Legacy version are available here: [https://archive.apache.org/dist/iotdb/](http with what you set in the old version. * stop the old vesion instance, and start the new one. +- **How to upgrade from v.13.x to v0.14.x?** + + - **Version 0.14 has changed the SQL syntax conventions (please refer to the syntax conventions section of the user manual), the incompatibilities are as follows:** + - **Identifiers that are not quoted with backquotes are not allowed to be pure numbers and only allowed to contain letters, Chinese characters, and underscores. If the above occurs in an identifier, use backquotes to enclose the identifier.** + - **Identifiers no longer support quoting with single and double quotes, please use backquotes instead.** + - **When using the path node name in the Session API, it needs to be consistent with that in the SQL statement. If the path node is a pure number 111, it needs to be enclosed in backquotes in the SQL statement and written as \`111\`, then when using the Session API, the corresponding parameter also needs to be written as \`111\`.** + - In order to ensure the stability of UDF-related APIs, in version 0.14, UDF-related APIs are separated into an independent module and no longer depend on the tsfile package. The implemented UDFs need to rewrite the code, replace `TsDataType` with `Type`, and replace `org .apache.iotdb.tsfile.utils.Binary` with `org.apache.iotdb.udf.api.type.Binary`, then redo the packaging and loading process. + - How to upgrade from v.12.x to v0.13.x? + * The data format (i.e., TsFile data) of v0.12.x and v0.13.x are compatible, but the WAL file is incompatible. So, you can follow the steps: * ** Execute `SET STSTEM TO READONLY` command in CLI. ** @@ -108,9 +117,13 @@ Legacy version are available here: [https://archive.apache.org/dist/iotdb/](http other settings if you want. * Stop IoTDB v0.12.x instance, and then start v0.13.x. * **After the steps above, please make sure the `iotdb_version` in `data/system/schema/system.properties` file is `0.13.x`. -If not, please change it to `0.13.x` manually.** + If not, please change it to `0.13.x` manually.** * __NOTICE: V0.13 changes many settings in conf/iotdb-engine.properties, so do not use v0.12's configuration file directly.__ + * **In 0.13, the SQL syntax has been changed. The identifiers not enclosed in backquotes can only contain the following characters, otherwise they need to be enclosed in backquotes.** + * **[0-9 a-z A-Z _ : @ # $ { }] (letters, digits, some special characters)** + * **['\u2E80'..'\u9FFF'] (UNICODE Chinese characters)** + * **In 0.13, if the path node name in the `SELECT` clause consists of pure numbers, it needs to be enclosed in backquotes to distinguish it from the constant in the expression. For example, in the statement "select 123 + \`123\` from root.sg", the former 123 represents a constant, and the latter \`123\` will be spliced with root.sg, indicating the path root.sg.\`123\`.** - How to upgrade from v.11.x or v0.10.x to v0.12.x? * Upgrading from v0.11 or v0.10 to v0.12 is similar as v0.9 to v0.10. The upgrade tool will rewrite the data files automatically. @@ -157,10 +170,10 @@ If not, please change it to `0.13.x` manually.** directories point to the folders set in v0.8.x (or the backup folder). You can also modify other settings if you want. * Stop IoTDB v0.8 instance, and start v0.9.x, then the IoTDB will upgrade data file format automatically. - - + +​ # All releases diff --git a/docs/UserGuide/API/Programming-Java-Native-API.md b/docs/UserGuide/API/Programming-Java-Native-API.md index 699c60eb4431..247af53d088e 100644 --- a/docs/UserGuide/API/Programming-Java-Native-API.md +++ b/docs/UserGuide/API/Programming-Java-Native-API.md @@ -45,15 +45,13 @@ In root directory: ``` -## Syntax Description +## Syntax Convention - **IoTDB-SQL interface:** The input SQL parameter needs to conform to the [syntax conventions](../Reference/Syntax-Conventions.md) and be escaped for JAVA strings. For example, you need to add a backslash before the double-quotes. (That is: after JAVA escaping, it is consistent with the SQL statement executed on the command line.) - **Other interfaces:** - - The node names in path or path prefix as parameter: - - The node names which should be escaped by backticks (`) in the SQL statement, and escaping is not required here. - - The node names enclosed in single or double quotes still need to be enclosed in single or double quotes and must be escaped for JAVA strings. - - For the `checkTimeseriesExists` interface, since the IoTDB-SQL interface is called internally, the time-series pathname must be consistent with the SQL syntax conventions and be escaped for JAVA strings. + - The node names in path or path prefix as parameter: The node names which should be escaped by backticks (`) in the SQL statement, escaping is required here. - Identifiers (such as template names) as parameters: The identifiers which should be escaped by backticks (`) in the SQL statement, and escaping is not required here. +- **Code example for syntax convention could be found at:** `example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` ## Native APIs diff --git a/docs/UserGuide/API/Programming-MQTT.md b/docs/UserGuide/API/Programming-MQTT.md index a06cb6d3c08e..9e882edeb74f 100644 --- a/docs/UserGuide/API/Programming-MQTT.md +++ b/docs/UserGuide/API/Programming-MQTT.md @@ -35,7 +35,7 @@ The Built-in MQTT Service provide the ability of direct connection to IoTDB thro and then write the data into storage immediately. The MQTT topic corresponds to IoTDB timeseries. The messages payload can be format to events by `PayloadFormatter` which loaded by java SPI, and the default implementation is `JSONPayloadFormatter`. -The default `json` formatter support two json format, and the following is an MQTT message payload example: +The default `json` formatter support two json format and its json array. The following is an MQTT message payload example: ```json { @@ -47,18 +47,19 @@ The default `json` formatter support two json format, and the following is an MQ ``` or ```json -{ + { "device":"root.sg.d1", "timestamps":[1586076045524,1586076065526], "measurements":["s1","s2"], "values":[[0.530635,0.530635], [0.530655,0.530695]] - } + } ``` +or json array of the above two. ### MQTT Configurations -The IoTDB MQTT service load configurations from `${IOTDB_HOME}/${IOTDB_CONF}/iotdb-engine.properties` by default. +The IoTDB MQTT service load configurations from `${IOTDB_HOME}/${IOTDB_CONF}/iotdb-datanode.properties` by default. Configurations are as follows: @@ -156,7 +157,7 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter { @Override public String getName() { - // set the value of mqtt_payload_formatter in iotdb-engine.properties as the following string: + // set the value of mqtt_payload_formatter in iotdb-datanode.properties as the following string: return "CustomizedJson"; } } @@ -169,8 +170,8 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter { Then, in your server: * Create ${IOTDB_HOME}/ext/mqtt/ folder, and put the jar into this folder. -* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in `conf/iotdb-engine.properties`) -* Set the value of `mqtt_payload_formatter` in `conf/iotdb-engine.properties` as the value of getName() in your implementation +* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in `conf/iotdb-datanode.properties`) +* Set the value of `mqtt_payload_formatter` in `conf/iotdb-datanode.properties` as the value of getName() in your implementation , in this example, the value is `CustomizedJson` * Launch the IoTDB server. * Now IoTDB will use your implementation to parse the MQTT message. diff --git a/docs/UserGuide/API/Programming-Python-Native-API.md b/docs/UserGuide/API/Programming-Python-Native-API.md index c200586bbf93..0f962ebb4457 100644 --- a/docs/UserGuide/API/Programming-Python-Native-API.md +++ b/docs/UserGuide/API/Programming-Python-Native-API.md @@ -251,6 +251,98 @@ session.execute_query_statement(sql) session.execute_non_query_statement(sql) ``` +* Execute statement + +```python +session.execute_statement(sql) +``` + +### Schema Template +#### Create Schema Template +The step for creating a metadata template is as follows +1. Create the template class +2. Adding child Node,InternalNode and MeasurementNode can be chose +3. Execute create schema template function + +```python +template = Template(name=template_name, share_time=True) + +i_node_gps = InternalNode(name="GPS", share_time=False) +i_node_v = InternalNode(name="vehicle", share_time=True) +m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY) + +i_node_gps.add_child(m_node_x) +i_node_v.add_child(m_node_x) + +template.add_template(i_node_gps) +template.add_template(i_node_v) +template.add_template(m_node_x) + +session.create_schema_template(template) +``` +#### Modify Schema Template measurements +Modify measurements in a template, the template must be already created. These are functions that add or delete some measurement nodes. +* add node in template +```python +session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned) +``` + +* delete node in template +```python +session.delete_node_in_template(template_name, path) +``` + +#### Set Schema Template +```python +session.set_schema_template(template_name, prefix_path) +``` + +#### Uset Schema Template +```python +session.unset_schema_template(template_name, prefix_path) +``` + +#### Show Schema Template +* Show all schema templates +```python +session.show_all_templates() +``` +* Count all measurements in templates +```python +session.count_measurements_in_template(template_name) +``` + +* Judge whether the path is measurement or not in templates, This measurement must be in the template +```python +session.count_measurements_in_template(template_name, path) +``` + +* Judge whether the path is exist or not in templates, This path may not belong to the template +```python +session.is_path_exist_in_template(template_name, path) +``` + +* Show nodes under in schema template +```python +session.show_measurements_in_template(template_name) +``` + +* Show the path prefix where a schema template is set +```python +session.show_paths_template_set_on(template_name) +``` + +* Show the path prefix where a schema template is used (i.e. the time series has been created) +```python +session.show_paths_template_using_on(template_name) +``` + +#### Drop Schema Template +Delete an existing metadata template,dropping an already set template is not supported +```python +session.drop_schema_template("template_python") +``` + ### Pandas Support @@ -299,6 +391,150 @@ class MyTestCase(unittest.TestCase): by default it will load the image `apache/iotdb:latest`, if you want a specific version just pass it like e.g. `IoTDBContainer("apache/iotdb:0.12.0")` to get version `0.12.0` running. +### IoTDB DBAPI + +IoTDB DBAPI implements the Python DB API 2.0 specification (https://peps.python.org/pep-0249/), which defines a common +interface for accessing databases in Python. + +#### Examples ++ Initialization + +The initialized parameters are consistent with the session part (except for the sqlalchemy_mode). +```python +from iotdb.dbapi import connect + +ip = "127.0.0.1" +port_ = "6667" +username_ = "root" +password_ = "root" +conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False) +cursor = conn.cursor() +``` ++ simple SQL statement execution +```python +cursor.execute("SELECT ** FROM root") +for row in cursor.fetchall(): + print(row) +``` + ++ execute SQL with parameter + +IoTDB DBAPI supports pyformat style parameters +```python +cursor.execute("SELECT ** FROM root WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"}) +for row in cursor.fetchall(): + print(row) +``` + ++ execute SQL with parameter sequences +```python +seq_of_parameters = [ + {"timestamp": 1, "temperature": 1}, + {"timestamp": 2, "temperature": 2}, + {"timestamp": 3, "temperature": 3}, + {"timestamp": 4, "temperature": 4}, + {"timestamp": 5, "temperature": 5}, +] +sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)" +cursor.executemany(sql,seq_of_parameters) +``` + ++ close the connection and cursor +```python +cursor.close() +conn.close() +``` + +### IoTDB SQLAlchemy Dialect (Experimental) +The SQLAlchemy dialect of IoTDB is written to adapt to Apache Superset. +This part is still being improved. +Please do not use it in the production environment! +#### Mapping of the metadata +The data model used by SQLAlchemy is a relational data model, which describes the relationships between different entities through tables. +While the data model of IoTDB is a hierarchical data model, which organizes the data through a tree structure. +In order to adapt IoTDB to the dialect of SQLAlchemy, the original data model in IoTDB needs to be reorganized. +Converting the data model of IoTDB into the data model of SQLAlchemy. + +The metadata in the IoTDB are: + +1. Storage Group +2. Path +3. Entity +4. Measurement + +The metadata in the SQLAlchemy are: +1. Schema +2. Table +3. Column + +The mapping relationship between them is: + +| The metadata in the SQLAlchemy | The metadata in the IoTDB | +| -------------------- | ---------------------------------------------- | +| Schema | Storage Group | +| Table | Path ( from storage group to entity ) + Entity | +| Column | Measurement | + +The following figure shows the relationship between the two more intuitively: + +![sqlalchemy-to-iotdb](https://github.com/apache/iotdb-bin-resources/blob/main/docs/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true) + +#### Data type mapping +| data type in IoTDB | data type in SQLAlchemy | +|--------------------|-------------------------| +| BOOLEAN | Boolean | +| INT32 | Integer | +| INT64 | BigInteger | +| FLOAT | Float | +| DOUBLE | Float | +| TEXT | Text | +| LONG | BigInteger | + +#### Example + ++ execute statement + +```python +from sqlalchemy import create_engine + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") +connect = engine.connect() +result = connect.execute("SELECT ** FROM root") +for row in result.fetchall(): + print(row) +``` + ++ ORM (now only simple queries are supported) + +```python +from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + +metadata = MetaData( + schema='root.factory' +) +Base = declarative_base(metadata=metadata) + + +class Device(Base): + __tablename__ = "room2.device1" + Time = Column(BigInteger, primary_key=True) + temperature = Column(Float) + status = Column(Float) + + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") + +DbSession = sessionmaker(bind=engine) +session = DbSession() + +res = session.query(Device.status).filter(Device.temperature > 1) + +for row in res: + print(row) +``` + ## Developers diff --git a/docs/UserGuide/Administration-Management/Administration.md b/docs/UserGuide/Administration-Management/Administration.md index 6411080736db..e46ec44891d7 100644 --- a/docs/UserGuide/Administration-Management/Administration.md +++ b/docs/UserGuide/Administration-Management/Administration.md @@ -50,11 +50,11 @@ According to the [sample data](https://github.com/thulab/iotdb/files/4438687/Oth ### Create User -We use `CREATE USER ` to create users. For example, we can use root user who has all privileges to create two users for ln and sgcc groups, named ln\_write\_user and sgcc\_write\_user, with both passwords being write\_pwd. The SQL statement is: +We use `CREATE USER ` to create users. For example, we can use root user who has all privileges to create two users for ln and sgcc groups, named ln\_write\_user and sgcc\_write\_user, with both passwords being write\_pwd. It is recommended to wrap the username in backtick(`). The SQL statement is: ``` -CREATE USER ln_write_user 'write_pwd' -CREATE USER sgcc_write_user 'write_pwd' +CREATE USER `ln_write_user` 'write_pwd' +CREATE USER `sgcc_write_user` 'write_pwd' ``` Then use the following SQL statement to show the user: @@ -64,9 +64,9 @@ LIST USER As can be seen from the result shown below, the two users have been created: ``` -IoTDB> CREATE USER ln_write_user 'write_pwd' +IoTDB> CREATE USER `ln_write_user` 'write_pwd' Msg: The statement is executed successfully. -IoTDB> CREATE USER sgcc_write_user 'write_pwd' +IoTDB> CREATE USER `sgcc_write_user` 'write_pwd' Msg: The statement is executed successfully. IoTDB> LIST USER +---------------+ @@ -96,18 +96,21 @@ Msg: 602: No permissions for this operation INSERT Now, we use root user to grant the two users write privileges to the corresponding storage groups. -We use `GRANT USER PRIVILEGES ON ` to grant user privileges. For example: +We use `GRANT USER PRIVILEGES ON ` to grant user privileges(ps: grant create user does not need path). For example: ``` -GRANT USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** -GRANT USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +GRANT USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +GRANT USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +GRANT USER `ln_write_user` PRIVILEGES CREATE_USER ``` The execution result is as follows: ``` -IoTDB> GRANT USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** +IoTDB> GRANT USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** Msg: The statement is executed successfully. -IoTDB> GRANT USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +IoTDB> GRANT USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +Msg: The statement is executed successfully. +IoTDB> GRANT USER `ln_write_user` PRIVILEGES CREATE_USER Msg: The statement is executed successfully. ``` @@ -119,19 +122,22 @@ Msg: The statement is executed successfully. ### Revoker User Privilege -After granting user privileges, we could use `REVOKE USER PRIVILEGES ON ` to revoke the granted user privileges. For example, use root user to revoke the privilege of ln_write_user and sgcc_write_user: +After granting user privileges, we could use `REVOKE USER PRIVILEGES ON ` to revoke the granted user privileges(ps: revoke create user does not need path). For example, use root user to revoke the privilege of ln_write_user and sgcc_write_user: ``` -REVOKE USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** -REVOKE USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +REVOKE USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES CREATE_USER ``` The execution result is as follows: ``` -REVOKE USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** +REVOKE USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +Msg: The statement is executed successfully. +REVOKE USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** Msg: The statement is executed successfully. -REVOKE USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES CREATE_USER Msg: The statement is executed successfully. ``` @@ -149,70 +155,98 @@ Here are all related SQL statements: ``` CREATE USER ; -Eg: IoTDB > CREATE USER thulab 'pwd'; +Eg: IoTDB > CREATE USER `thulab` 'pwd'; ``` * Delete User ``` DROP USER ; -Eg: IoTDB > DROP USER xiaoming; +Eg: IoTDB > DROP USER `xiaoming`; ``` * Create Role ``` CREATE ROLE ; -Eg: IoTDB > CREATE ROLE admin; +Eg: IoTDB > CREATE ROLE `admin`; ``` * Delete Role ``` DROP ROLE ; -Eg: IoTDB > DROP ROLE admin; +Eg: IoTDB > DROP ROLE `admin`; ``` * Grant User Privileges ``` GRANT USER PRIVILEGES ON ; -Eg: IoTDB > GRANT USER tempuser PRIVILEGES DELETE_TIMESERIES on root.ln.**; +Eg: IoTDB > GRANT USER `tempuser` PRIVILEGES DELETE_TIMESERIES on root.ln.**; +``` + +- Grant User All Privileges + +``` +GRANT USER PRIVILEGES ALL ON ; +Eg: IoTDB > grant user renyuhua privileges all on root.** ``` * Grant Role Privileges ``` GRANT ROLE PRIVILEGES ON ; -Eg: IoTDB > GRANT ROLE temprole PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +Eg: IoTDB > GRANT ROLE `temprole` PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +``` + +- Grant Role All Privileges + +``` +GRANT ROLE PRIVILEGES ALL ON ; +Eg: IoTDB > GRANT ROLE `temprole` PRIVILEGES ALL ON root.ln.**; ``` * Grant User Role ``` GRANT TO ; -Eg: IoTDB > GRANT temprole TO tempuser; +Eg: IoTDB > GRANT `temprole` TO tempuser; ``` * Revoke User Privileges ``` REVOKE USER PRIVILEGES ON ; -Eg: IoTDB > REVOKE USER tempuser PRIVILEGES DELETE_TIMESERIES on root.ln.**; +Eg: IoTDB > REVOKE USER `tempuser` PRIVILEGES DELETE_TIMESERIES on root.ln.**; +``` + +* Revoke User All Privileges + +``` +REVOKE USER PRIVILEGES ALL ON ; +Eg: IoTDB > REVOKE USER `tempuser` PRIVILEGES ALL on root.ln.**; ``` * Revoke Role Privileges ``` REVOKE ROLE PRIVILEGES ON ; -Eg: IoTDB > REVOKE ROLE temprole PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +Eg: IoTDB > REVOKE ROLE `temprole` PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +``` + +* Revoke All Role Privileges + +``` +REVOKE ROLE PRIVILEGES ALL ON ; +Eg: IoTDB > REVOKE ROLE `temprole` PRIVILEGES ALL ON root.ln.**; ``` * Revoke Role From User ``` REVOKE FROM ; -Eg: IoTDB > REVOKE temprole FROM tempuser; +Eg: IoTDB > REVOKE `temprole` FROM tempuser; ``` * List Users @@ -233,49 +267,49 @@ Eg: IoTDB > LIST ROLE ``` LIST PRIVILEGES USER ON ; -Eg: IoTDB > LIST PRIVILEGES USER sgcc_write_user ON root.sgcc.**; +Eg: IoTDB > LIST PRIVILEGES USER `sgcc_write_user` ON root.sgcc.**; ``` * List Privileges of Roles ``` LIST ROLE PRIVILEGES -Eg: IoTDB > LIST ROLE PRIVILEGES actor; +Eg: IoTDB > LIST ROLE PRIVILEGES `actor`; ``` * List Privileges of Roles(On Specific Path) ``` LIST PRIVILEGES ROLE ON ; -Eg: IoTDB > LIST PRIVILEGES ROLE write_role ON root.sgcc.**; +Eg: IoTDB > LIST PRIVILEGES ROLE `write_role` ON root.sgcc.**; ``` * List Privileges of Users ``` LIST USER PRIVILEGES ; -Eg: IoTDB > LIST USER PRIVILEGES tempuser; +Eg: IoTDB > LIST USER PRIVILEGES `tempuser`; ``` * List Roles of User ``` LIST ALL ROLE OF USER ; -Eg: IoTDB > LIST ALL ROLE OF USER tempuser; +Eg: IoTDB > LIST ALL ROLE OF USER `tempuser`; ``` * List Users of Role ``` LIST ALL USER OF ROLE ; -Eg: IoTDB > LIST ALL USER OF ROLE roleuser; +Eg: IoTDB > LIST ALL USER OF ROLE `roleuser`; ``` * Alter Password ``` ALTER USER SET PASSWORD ; -Eg: IoTDB > ALTER USER tempuser SET PASSWORD 'newpwd'; +Eg: IoTDB > ALTER USER `tempuser` SET PASSWORD 'newpwd'; ``` @@ -295,36 +329,51 @@ At the same time, changes to roles are immediately reflected on all users who ow **List of privileges Included in the System** -|privilege Name|Interpretation| -|:---|:---| -|SET\_STORAGE\_GROUP|set storage groups; path dependent| -|DELETE\_STORAGE\_GROUP|delete storage groups; path dependent| -|CREATE\_TIMESERIES|create timeseries; path dependent| -|INSERT\_TIMESERIES|insert data; path dependent| -|READ\_TIMESERIES|query data; path dependent| -|DELETE\_TIMESERIES|delete data or timeseries; path dependent| -|DELETE\_STORAGE\_GROUP|delete storage groups; path dependent| -|CREATE\_USER|create users; path independent| -|DELETE\_USER|delete users; path independent| -|MODIFY\_PASSWORD|modify passwords for all users; path independent; (Those who do not have this privilege can still change their own asswords. )| -|LIST\_USER|list all users; list a user's privileges; list a user's roles; list users of Role with four kinds of operation privileges; path independent| -|GRANT\_USER\_PRIVILEGE|grant user privileges; path independent| -|REVOKE\_USER\_PRIVILEGE|revoke user privileges; path independent| -|GRANT\_USER\_ROLE|grant user roles; path independent| -|REVOKE\_USER\_ROLE|revoke user roles; path independent| -|CREATE\_ROLE|create roles; path independent| -|DELETE\_ROLE|delete roles; path independent| -|LIST\_ROLE|list all roles; list the privileges of a role; list the three kinds of operation privileges of all users owning a role; path independent| -|GRANT\_ROLE\_PRIVILEGE|grant role privileges; path independent| -|REVOKE\_ROLE\_PRIVILEGE|revoke role privileges; path independent| -|CREATE_FUNCTION|register UDFs; path independent| -|DROP_FUNCTION|deregister UDFs; path independent| -|CREATE_TRIGGER|create triggers; path dependent| -|DROP_TRIGGER|drop triggers; path dependent| -|START_TRIGGER|start triggers; path dependent| -|STOP_TRIGGER|stop triggers; path dependent| -|CREATE_CONTINUOUS_QUERY|create continuous queries; path independent| -|DROP_CONTINUOUS_QUERY|drop continuous queries; path independent| +|privilege Name|Interpretation|Example| +|:---|:---|----| +|SET\_STORAGE\_GROUP|set storage groups; path dependent|Eg: `set storage group to root.ln;`| +|DELETE\_STORAGE\_GROUP|delete storage groups; path dependent|Eg: `delete storage group root.ln;`| +|CREATE\_TIMESERIES|create timeseries; path dependent|Eg1: create timeseries
`create timeseries root.ln.wf02.status with datatype=BOOLEAN,encoding=PLAIN;`
Eg2: create aligned timeseries
`create aligned timeseries root.ln.device1(latitude FLOAT encoding=PLAIN compressor=SNAPPY, longitude FLOAT encoding=PLAIN compressor=SNAPPY);`| +|INSERT\_TIMESERIES|insert data; path dependent|Eg1: `insert into root.ln.wf02(timestamp,status) values(1,true);`
Eg2: `insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1)`| +|READ\_TIMESERIES|query data; path dependent|Eg1: `show storage group;`
Eg2: `show child paths root.ln, show child nodes root.ln;`
Eg3: `show devices;`
Eg4: `show timeseries root.**;`
Eg5: `show schema templates;`
Eg6: `show all ttl`
Eg7: [Query-Data](../Query-Data/Overview.md)(The query statements under this section all use this permission)
Eg8: CVS format data export
`./export-csv.bat -h 127.0.0.1 -p 6667 -u tempuser -pw root -td ./`
Eg9: Performance Tracing Tool
`tracing select * from root`
Eg10: UDF-Query
`select example(*) from root.sg.d1`
Eg11: Triggers-Query
`show triggers`| +|DELETE\_TIMESERIES|delete data or timeseries; path dependent|Eg1: delete timeseries
`delete timeseries root.ln.wf01.wt01.status`
Eg2: delete data
`delete from root.ln.wf02.wt02.status where time < 10`| +|CREATE\_USER|create users; path independent|Eg: `create user thulab 'passwd';`| +|DELETE\_USER|delete users; path independent|Eg: `drop user xiaoming;`| +|MODIFY\_PASSWORD|modify passwords for all users; path independent; (Those who do not have this privilege can still change their own asswords. )|Eg: `alter user tempuser SET PASSWORD 'newpwd';`| +|LIST\_USER|list all users; list a user's privileges; list a user's roles; list users of Role with four kinds of operation privileges; path independent|Eg1: `list user;`
Eg2: `list privileges user 'admin' on root.sgcc.**;`
Eg3: `list user privileges admin;`
Eg4: `list all user of role 'admin';`| +|GRANT\_USER\_PRIVILEGE|grant user privileges; path independent|Eg: `grant user tempuser privileges DELETE_TIMESERIES on root.ln.**;`| +|REVOKE\_USER\_PRIVILEGE|revoke user privileges; path independent|Eg: `revoke user tempuser privileges DELETE_TIMESERIES on root.ln.**;`| +|GRANT\_USER\_ROLE|grant user roles; path independent|Eg: `grant temprole to tempuser;`| +|REVOKE\_USER\_ROLE|revoke user roles; path independent|Eg: `revoke temprole from tempuser;`| +|CREATE\_ROLE|create roles; path independent|Eg: `create role admin;`| +|DELETE\_ROLE|delete roles; path independent|Eg: `drop role admin;`| +|LIST\_ROLE|list all roles; list the privileges of a role; list the three kinds of operation privileges of all users owning a role; path independent|Eg1: `list role`
Eg2: `list role privileges actor;`
Eg3: `list privileges role wirte_role ON root.sgcc;`
Eg4: `list all role of user admin;`| +|GRANT\_ROLE\_PRIVILEGE|grant role privileges; path independent|Eg: `grant role temprole privileges DELETE_TIMESERIES ON root.ln.**;`| +|REVOKE\_ROLE\_PRIVILEGE|revoke role privileges; path independent|Eg: `revoke role temprole privileges DELETE_TIMESERIES ON root.ln.**;`| +|CREATE_FUNCTION|register UDFs; path independent|Eg: `create function example AS 'org.apache.iotdb.udf.UDTFExample';`| +|DROP_FUNCTION|deregister UDFs; path independent|Eg: `drop function example`| +|CREATE_TRIGGER|create triggers; path dependent|Eg1: `CREATE TRIGGER BEFORE INSERT ON AS `
Eg2: `CREATE TRIGGER AFTER INSERT ON AS `| +|DROP_TRIGGER|drop triggers; path dependent|Eg: `drop trigger 'alert-listener-sg1d1s1'`| +|START_TRIGGER|start triggers; path dependent|Eg: `start trigger lert-listener-sg1d1s1'`| +|STOP_TRIGGER|stop triggers; path dependent|Eg: `stop trigger 'alert-listener-sg1d1s1'`| +|CREATE_CONTINUOUS_QUERY|create continuous queries; path independent|Eg: `select s1, s1 into t1, t2 from root.sg.d1`| +|DROP_CONTINUOUS_QUERY|drop continuous queries; path independent|Eg1: `DROP CONTINUOUS QUERY cq3`
Eg2: `DROP CQ cq3`| + +Note that the following SQL statements need to be granted multiple permissions before they can be used: + +- Import data: Need to assign `READ_TIMESERIES`,`INSERT_TIMESERIES` two permissions.。 + +``` +Eg: IoTDB > ./import-csv.bat -h 127.0.0.1 -p 6667 -u renyuhua -pw root -f dump0.csv +``` + +- Query Write-back (SELECT INTO) +- - `READ_TIMESERIES` permission of source sequence in all `select` clauses is required + - `INSERT_TIMESERIES` permission of target sequence in all `into` clauses is required + +``` +Eg: IoTDB > select s1, s1 into t1, t2 from root.sg.d1 limit 5 offset 1000 +``` ### Username Restrictions @@ -343,4 +392,191 @@ IoTDB specifies that the character length of a role name should have no less tha A path pattern's result set contains all the elements of its sub pattern's result set. For example, `root.sg.d.*` is a sub pattern of `root.sg.*.*`, while `root.sg.**` is not a sub pattern of -`root.sg.*.*`. When a user is granted privilege on a pattern, the pattern used in his DDL or DML must be a sub pattern of the privilege pattern, which guarantees that the user won't access the timeseries exceed his privilege scope. \ No newline at end of file +`root.sg.*.*`. When a user is granted privilege on a pattern, the pattern used in his DDL or DML must be a sub pattern of the privilege pattern, which guarantees that the user won't access the timeseries exceed his privilege scope. + +### Permission cache + +In distributed related permission operations, when changing permissions other than creating users and roles, all the cache information of `dataNode` related to the user (role) will be cleared first. If any `dataNode` cache information is clear and fails, the permission change task will fail. + +### Operations restricted by non root users + +At present, the following SQL statements supported by iotdb can only be operated by the `root` user, and no corresponding permission can be given to the new user. + +###### TTL + +- set ttl + +``` +Eg: IoTDB > set ttl to root.ln 3600 +``` + +- unset ttl + +``` +Eg: IoTDB > unset ttl to root.ln +``` + +###### Schema Template + +- Create Schema Template + +``` +Eg: IoTDB > create schema template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +``` + +- Set Schema Template + +``` +Eg: IoTDB > set schema template t1 to root.sg1.d1 +``` + +- Uset Schema Template + +``` +Eg: IoTDB > unset schema template t1 from root.sg1.d1 +``` + +- Drop Schema Template + +``` +Eg: IoTDB > drop schema template t1 +``` + +###### Tag and Attribute Management + +- Rename the tag/attribute key + +```text +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +``` + +- reset the tag/attribute value + +```text +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +``` + +- delete the existing tag/attribute + +```text +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +``` + +- add new tags + +```text +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +``` + +- add new attributes + +```text +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +``` + +- upsert alias, tags and attributes + +```text +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag3=v3, tag4=v4) ATTRIBUTES(attr3=v3, attr4=v4) +``` + +###### TsFile Management + +- Load TsFiles + +``` +Eg: IoTDB > load '/Users/Desktop/data/1575028885956-101-0.tsfile' +``` + +- remove a tsfile + +``` +Eg: IoTDB > remove '/Users/Desktop/data/data/root.vehicle/0/0/1575028885956-101-0.tsfile' +``` + +- unload a tsfile and move it to a target directory + +``` +Eg: IoTDB > unload '/Users/Desktop/data/data/root.vehicle/0/0/1575028885956-101-0.tsfile' '/data/data/tmp' +``` + +###### Count + +- Count storage group/Number of nodes/device/timeseries + +``` +Eg: IoTDB > count storage group +Eg: IoTDB > count nodes root.** LEVEL=2 +Eg: IoTDB > count devices root.ln.** +Eg: IoTDB > count timeseries root.** +``` + +###### Delete Time Partition (experimental) + +- Delete Time Partition (experimental) + +``` +Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +``` + +###### Continuous Query,CQ + +- Continuous Query,CQ + +``` +Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +``` + +###### Maintenance Command + +- FLUSH + +``` +Eg: IoTDB > flush +``` + +- MERGE + +``` +Eg: IoTDB > MERGE +Eg: IoTDB > FULL MERGE +``` + +- CLEAR CACHE + +```sql +Eg: IoTDB > CLEAR CACHE +``` + +- SET STSTEM TO READONLY / WRITABLE + +``` +Eg: IoTDB > SET STSTEM TO READONLY / WRITABLE +``` + +- SCHEMA SNAPSHOT + +```sql +Eg: IoTDB > CREATE SNAPSHOT FOR SCHEMA +``` + +- Query abort + +``` +Eg: IoTDB > KILL QUERY 1 +``` + +###### Watermark Tool + +- Watermark new users + +``` +Eg: IoTDB > grant watermark_embedding to Alice +``` + +- Watermark Detection + +``` +Eg: IoTDB > revoke watermark_embedding from Alice +``` + diff --git a/docs/UserGuide/Cluster/Cluster-Setup-Example.md b/docs/UserGuide/Cluster/Cluster-Setup-Example.md index 321e00ca9fa2..70e6c4bef93d 100644 --- a/docs/UserGuide/Cluster/Cluster-Setup-Example.md +++ b/docs/UserGuide/Cluster/Cluster-Setup-Example.md @@ -180,7 +180,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = A_private_Ip +internal_address = A_private_Ip ``` ***iotdb-engine.properties*** @@ -199,7 +199,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = B_private_Ip +internal_address = B_private_Ip ``` ***iotdb-engine.properties*** @@ -218,7 +218,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = C_private_Ip +internal_address = C_private_Ip ``` ***iotdb-engine.properties*** @@ -272,7 +272,7 @@ Set default\_replica\_num = 3: default value of this configuration item is 3 Set internal\_ip = private ip of node (e.g. 192.168.1.1) ``` -sed -i -e 's/^internal_ip=127.0.0.1$/internal_ip=192.168.1.1/g' conf/iotdb-cluster.properties +sed -i -e 's/^internal_address=127.0.0.1$/internal_address=192.168.1.1/g' conf/iotdb-cluster.properties ``` Set seed\_node = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 (e.g. private ip of all three nodes are 192.168.1.1,192.168.1.2,192.168.1.3) ``` diff --git a/docs/UserGuide/Cluster/Cluster-Setup.md b/docs/UserGuide/Cluster/Cluster-Setup.md index 67382a1cc929..e32e9b1fbb4d 100644 --- a/docs/UserGuide/Cluster/Cluster-Setup.md +++ b/docs/UserGuide/Cluster/Cluster-Setup.md @@ -174,13 +174,13 @@ In the process of cluster running, users can add new nodes to the cluster or del ``` # Unix/OS X -> sbin/remove-node.sh +> sbin/remove-node.sh # Windows -> sbin\remove-node.bat +> sbin\remove-node.bat ``` -`internal_ip` means the IP address of the node to be deleted `internal_meta_port` means the meta port of the node to be deleted +`internal_address` means the IP address of the node to be deleted `internal_meta_port` means the meta port of the node to be deleted ### Use Cli @@ -208,7 +208,7 @@ To stop the services of all the nodes on a single machine, you need to execute t | Name | internal\_ip | | ----------- | ------------------------------------------------------------ | -| Description | IP address of internal communication between nodes in IOTDB cluster, such as heartbeat, snapshot, raft log, etc. **`internal_ip` is a private ip.** | +| Description | IP address of internal communication between nodes in IOTDB cluster, such as heartbeat, snapshot, raft log, etc. **`internal_address` is a private ip.** | | Type | String | | Default | 127.0.0.1 | | Effective | After restart system, shall NOT change after cluster is up | diff --git a/docs/UserGuide/Data-Concept/Data-Type.md b/docs/UserGuide/Data-Concept/Data-Type.md index 4184be50d825..218933b9a9a5 100644 --- a/docs/UserGuide/Data-Concept/Data-Type.md +++ b/docs/UserGuide/Data-Concept/Data-Type.md @@ -33,7 +33,7 @@ IoTDB supports the following data types: * TEXT (String) -The time series of **FLOAT** and **DOUBLE** type can specify (MAX\_POINT\_NUMBER, see [this page](../Reference/SQL-Reference.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](Encoding.md) or [TS\_2DIFF](Encoding.md). If MAX\_POINT\_NUMBER is not specified, the system will use [float\_precision](../Reference/Config-Manual.md) in the configuration file `iotdb-engine.properties`. +The time series of **FLOAT** and **DOUBLE** type can specify (MAX\_POINT\_NUMBER, see [this page](../Reference/SQL-Reference.md) for more information on how to specify), which is the number of digits after the decimal point of the floating point number, if the encoding method is [RLE](Encoding.md) or [TS\_2DIFF](Encoding.md). If MAX\_POINT\_NUMBER is not specified, the system will use [float\_precision](../Reference/Config-Manual.md) in the configuration file `iotdb-datanode.properties`. * For Float data value, The data range is (-Integer.MAX_VALUE, Integer.MAX_VALUE), rather than Float.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(float) in Java. * For Double data value, The data range is (-Long.MAX_VALUE, Long.MAX_VALUE), rather than Double.MAX_VALUE, and the max_point_number is 19, caused by the limition of function Math.round(double) in Java (Long.MAX_VALUE=9.22E18). diff --git a/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md b/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md index 4aca0fdaab21..d1f71ac01221 100644 --- a/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md +++ b/docs/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md @@ -37,7 +37,7 @@ After that, you can get a **conf** directory and a **lib** directory in schema-engine-rocksdb/target/schema-engine-rocksdb. Copy the file in the conf directory to the conf directory of server, and copy the files in the lib directory to the lib directory of server. -Then, open the **iotdb-engine.properties** in the conf directory of server, and set the `schema_engine_mode` to +Then, open the **iotdb-datanode.properties** in the conf directory of server, and set the `schema_engine_mode` to Rocksdb_based. Restart the IoTDB, the system will use `RSchemaRegion` to manage the metadata. ``` diff --git a/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md b/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md index d01747c5e9af..6696a02722bf 100644 --- a/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md +++ b/docs/UserGuide/Ecosystem Integration/Grafana Plugin.md @@ -99,19 +99,22 @@ If compiling successful, you can see that the `distribution/target` directory co #### Install Grafana-Plugin -* Copy the front-end project target folder generated above to Grafana's plugin directory `${Grafana directory}\data\plugins\` - * Windows: the `data\plugins` directory is automatically created - * Linux: artificially create `/var/lib/grafana/plugins` directory - * MacOS: the plugin directory is `/usr/local/var/lib/grafana/plugins` (see more details after installing Grafana using `brew install`) +* Copy the front-end project target folder generated above to Grafana's plugin directory `${Grafana directory}\data\plugins\`。If there is no such directory, you can manually create it or start grafana and it will be created automatically. Of course, you can also modify the location of plugins. For details, please refer to the following instructions for modifying the location of Grafana's plugin directory. + * Modify Grafana configuration file: the file is in(`${Grafana directory}\conf\defaults.ini`), and do the following modifications: ```ini allow_loading_unsigned_plugins = iotdb ``` - +* Modify the location of Grafana's plugin directory: the file is in(`${Grafana directory}\conf\defaults.ini`), and do the following modifications: + + ```ini + plugins = data/plugins + ``` * Start Grafana (restart if the Grafana service is already started) +For more details,please click [here](https://grafana.com/docs/grafana/latest/plugins/installation/) #### Start Grafana @@ -194,11 +197,11 @@ Click the `New Dashboard` icon on the top right, and select `Add an empty panel` -Grafana plugin supports Raw mode and Aggregation mode, and the default mode is Raw mode. +Grafana plugin supports SQL: Full Customized mode and SQL: Drop-down List mode, and the default mode is SQL: Full Customized mode. -##### Raw input method +##### SQL: Full Customized input method Enter content in the SELECT, FROM , WHERE and CONTROL input box, where the WHERE and CONTROL input boxes are optional. @@ -238,7 +241,7 @@ Here are some examples of valid CONTROL content: Tip: Statements like `select * from root.xx.**` are not recommended because those statements may cause OOM. -##### Aggregation input method +##### SQL: Drop-down List Select a time series in the TIME-SERIES selection box, select a function in the FUNCTION option, and enter the contents in the SAMPLING INTERVAL、SLIDING STEP、LEVEL、FILL input boxes, where TIME-SERIES is a required item and the rest are non required items. @@ -246,7 +249,7 @@ Select a time series in the TIME-SERIES selection box, select a function in the #### Support for variables and template functions -Both raw and aggregation input methods support the variable and template functions of grafana. In the following example, raw input method is used, and aggregation is similar. +Both SQL: Full Customized and SQL: Drop-down List input methods support the variable and template functions of grafana. In the following example, raw input method is used, and aggregation is similar. After creating a new Panel, click the Settings button in the upper right corner: diff --git a/docs/UserGuide/Ecosystem Integration/Writing Data on HDFS.md b/docs/UserGuide/Ecosystem Integration/Writing Data on HDFS.md index 57f371d79b67..9d0cffb1ca01 100644 --- a/docs/UserGuide/Ecosystem Integration/Writing Data on HDFS.md +++ b/docs/UserGuide/Ecosystem Integration/Writing Data on HDFS.md @@ -41,7 +41,7 @@ Build server and Hadoop module by: `mvn clean package -pl server,hadoop -am -Dma Then, copy the target jar of Hadoop module `hadoop-tsfile-X.X.X-jar-with-dependencies.jar` into server target lib folder `.../server/target/iotdb-server-X.X.X/lib`. -Edit user config in `iotdb-engine.properties`. Related configurations are: +Edit user config in `iotdb-datanode.properties`. Related configurations are: * tsfile\_storage\_fs diff --git a/docs/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md b/docs/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md index 449ec27ff540..d5311018c858 100644 --- a/docs/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md +++ b/docs/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md @@ -19,143 +19,257 @@ --> -# Integration Test refactoring tutorial +# Developer Document for Integration Test -- Step 0. Prerequisites +**Integration test** is one of the phases in software testing, when different software modules are put together and tested as a whole. Integration tests are for evaluating whether a system or component meets the target functional requirements. - - **IT Location has been moved**; all Integration Tests have been moved to the integration module. - - **Test case MUST label**; the test classification label is the junit's `category`, which determines the test environment or process in which the test case is tested. - - **Code related to the test environment MAY need to be refactored**; this determines whether the current test environment can correctly test the test case. The corresponding statement needs to be refactored. +## Apache IoTDB Integration Test Criteria +### The Environment of the integration test of Apache IoTDB -- Step 1. Labeling test cases - - Add the appropriate `Category` before the test case class or test case method, which can collect any desired test category labels. +There are three kinds of environments for Apache IoTDB integration test, correspondingly **local standalone, Cluster, and remote.** The integration test should be conducted on at least one of them. Details of the three kinds are as follows. - - The `Category` of the following three test categories are all real and effective, +1. Local standalone. It is set up for integration testing of IoTDB, the standalone version. Any change of the configurations of IoTDB would require updating the configuration files before starting the database. +2. Cluster. It is set up for integration testing of IoTDB, the distribution version (pseudo-distribution). Any change of the configurations of IoTDB would require updating the configuration files before starting the database. +3. Remote. It is set up for the integration testing of a remote IoTDB instance, which could be either a standalone instance or a node in a remote cluster. Any change of the configuration is restricted and is not allowed currently. - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class, RemoteTest.class}) - public class IoTDBAliasIT { - ...... - } - - - @Category({LocalStandaloneTest.class, ClusterTest.class}) - public class IoTDBAlignByDeviceIT { - ...... - } - - - @Category({LocalStandaloneTest.class}) - public class IoTDBArithmeticIT { - ...... - } - ``` +Integration test developers need to specify at least one of the environments when writing the tests. Please check the details below. - - You can also add `Category` at the test method level. - ```java - @Category({LocalStandaloneTest.class}) - public class IoTDBExampleIT { - - // This case can ONLY test in environment of local. - @Test - public void theStandaloneCase() { - ...... - } - - // This case can test in environment of local, cluster and remote. - @Test - @Category({ClusterTest.class, RemoteTest.class}) - public void theAllEnvCase() { - ...... - } - } - ``` +### Black-Box Testing - - At present, all test cases must at least add the `Category` of the stand-alone test, namely `LocalStandaloneTest.class`. +Black-box testing is a software testing method that evaluates the functionality of a program without regard to its internal structure or how it works. Developers do not need to understand the internal logic of the application for testing. **Apache IoTDB integration tests are conducted as black-box tests. Any test interacting with the system through JDBC or Session API is considered a black-box test case.** Moreover, the validation of the output should also be implemented through the JDBC or Session API. +### Steps of an integration test -- Step 2. Environmental code refactoring +Generally, there are three steps to finish the integration test, (1) constructing the test class and annotating the environment, (2) housekeeping to prepare for the test and clean up the environment after the test, and (3) implementing the logic of the integration test. To test IoTDB not under the default configuration, the configuration should be changed before the test, which will be introduced in section 4. - - If the test case needs to be tested in the Cluster or Remote environment, the environment-related code MUST be refactored accordingly. If it is only tested in the LocalStandalone environment, modifications are only recommended. (Not all test cases can be tested in the Cluster or Remote environment because statements that are limited by some functions, such as local file operations, cannot be refactored.) + - | | LocalStandalone | Cluster | Remote | - | :------------------------- | :-------------: | :---------: | :---------: | - | setUp and tearDown | Recommend | Must | Must | - | getConnection | Recommend | Must | Must | - | change config | Recommend | Must | Not support | - | Local file operation | Won't change | Not support | Not support | - | Local descriptor operation | Won't change | Not support | Not support | - | restart operation | Won't change | Not support | Not support | +#### 1. Integration Test Class (IT Class) and Annotations - +When writing new IT classes, the developers are encouraged to create the new ones in the [integration-test](https://github.com/apache/iotdb/tree/master/integration-test) module. Except for the classes serving the other test cases, the classes containing integration tests to evaluate the functionality of IoTDB should be named "function"+"IT". For example, the test for auto-registration metadata in IoTDB is named “IoTDBAutoCreateSchemaIT”. - - The `setUp` and `tearDown` methods must be refactored in the Cluster and Remote environment +- Category`` Annotation. **When creating new IT classes, the ```@Category``` should be introduced explicitly**, and the test environment should be specified by ```LocalStandaloneIT.class```, ```ClusterIT.class```, and ```RemoteIT.class```, which corresponds to the Local Standalone, Cluster and Remote environment respectively. **In general, ```LocalStandaloneIT.class``` and ```ClusterIT.class``` should both be included**. Only in the case when some functionalities are only supported in the standalone version can we include ```LocalStandaloneIT.class``` solely. +- RunWith Annotation. The ```@RunWith(IoTDBTestRunner.class)``` annotation should be included in every IT class. - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class, RemoteTest.class}) - public class IoTDBAliasIT { - - @BeforeClass - public static void setUp() throws Exception { - // EnvironmentUtils.closeStatMonitor(); // orginal setup code - // EnvironmentUtils.envSetUp(); // orginal setup code - EnvFactory.getEnv().initBeforeClass(); // new initBeforeClass code - - insertData(); - } - - @AfterClass - public static void tearDown() throws Exception { - // EnvironmentUtils.cleanEnv(); // orginal code - EnvFactory.getEnv().cleanAfterClass(); // new cleanAfterClass code + +```java +// Introduce annotations to IoTDBAliasIT.class. The environments include local standalone, cluster and remote. +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class, RemoteIT.class}) +public class IoTDBAliasIT { + ... +} + +// Introduce annotations to IoTDBAlignByDeviceIT.class. The environments include local standalone and cluster. +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBAlignByDeviceIT { + ... +} +``` + +#### 2. Housekeeping to Prepare for the Test and Clean up the Environment after the Test + +Preparations before the test include starting an IoTDB (single or cluster) instance and preparing data for the test. The logic should be implemented in the ```setUp()``` method, and the method should follow the annotation ```@BeforeClass``` or ```@Before```. +The former means that this method is the first method executed for the IT class and is executed only once. The latter indicates that ```setUp()``` will be executed before each test method in the IT class. + +- Please start IoTDB instance through the factor class, i.e., ```EnvFactory.getEnv().initBeforeClass()```. +- Data preparation for the test includes registering storage groups, registering time series, and writing time series data as required by the test. It is recommended to implement a separate method within the IT class to prepare the data, such as ```insertData()```. +Please try to take advantage of the ```executeBatch()``` in JDBC or ```insertRecords()``` and ```insertTablets()``` in Session API if multiple statements or operations are to be executed. + +```java +@BeforeClass +public static void setUp() throws Exception { + // start an IoTDB instance + EnvFactory.getEnv().initBeforeClass(); + ... // data preparation +} +``` + +After the test, please clean up the environment by shut down the connections that have not been closed. This logic should be implemented in the ```tearDown()``` method. The ```tearDown()``` method follows the annotation ```@AfterClass``` or ```@After```. The former means that this method is the last method to execute for the IT class and is executed only once. The latter indicates that ```tearDown()``` will be executed after each test method in the IT class. + +- If the IoTDB connection is declared as an instance variable and is not closed after the test, please explicitly close it in the ```tearDown()``` method. +- The cleaning up should be implemented through the factory class, i.e., ```EnvFactory.getEnv().cleanAfterClass()```. + + +```java +@AfterClass +public static void tearDown() throws Exception { + ... // close the connection + // clean up the environment + EnvFactory.getEnv().cleanAfterClass(); +} +``` + +#### 3. Implementing the logic of IT + +IT of Apache IoTDB should be implemented as black-box testing. Please name the method as "functionality"+"Test", e.g., "selectWithAliasTest". The interaction should be implemented through JDBC or Session API. + +1 With JDBC + +When using the JDBC interface, it is recommended that the connection be established in a try statement. Connections established in this way do not need to be closed in the tearDown method explicitly. Connections need to be established through the factory class, i.e., ```EnvFactory.getEnv().getConnection()```. It is not necessary to specify the IP address or port number. The sample code is shown below. + +```java +@Test +public void someFunctionTest(){ + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + ... // execute the statements and test the correctness + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } +} +``` + +Please note that, +- **It is required to use ```executeQuery()``` to query the data from the database and get the ResultSet.** +- **For updating the database without any return value, it is required to use ```execute()``` method to interact with the database.** +The sample code is as follows. + +```java +@Test +public void exampleTest() throws Exception { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + // use execute() to set the storage groups + statement.execute("set storage group to root.sg"); + // use executeQuery() query the storage groups + try (ResultSet resultSet = statement.executeQuery("show storage group")) { + if (resultSet.next()) { + String storageGroupPath = resultSet.getString("storage group"); + Assert.assertEquals("root.sg", storageGroupPath); + } else { + Assert.fail("This ResultSet is empty."); } } - ``` + } +} +``` - +2 With Session API - - The `getConnection` must be refactored in Cluster and Remote environments +Currently, it is not recommended to implement IT with Session API. + +3 Annotations of Environment for the Test Methods + +For test methods, developers can also specify a test environment with the annotation before the method. It is important to note that a case with additional test environment annotations will be tested not only in the specified environment, but also in the environment of the IT class to which the use case belongs. The sample code is as follows. + + +```java +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class}) +public class IoTDBExampleIT { + + // This case will only be tested in a local stand-alone test environment + @Test + public void theStandaloneCaseTest() { + ... + } + + // The use case will be tested in the local standalone environment, the cluster environment, and the remote test environment. + @Test + @Category({ClusterIT.class, RemoteIT.class}) + public void theAllEnvCaseTest() { + ... + } +} +``` + +#### 4. Change the configurations of IoTDB when testing + +Sometimes, the configurations of IoTDB need to be changed in order to test the functionalities under certain conditions. Because changing the configurations on a remote machine is troublesome, configuration modification is not allowed in the remote environment. However, it is allowed in the local standalone and cluster environment. Changes of the configuration files should be implemented in the ```setUp()``` method, before ```EnvFactory.getEnv().initBeforeClass()```, and should be implemented through ConfigFactory. In ```tearDown()``` , please undo all changes of the configurations and revert to its original default settings by ConfigFactory after the environment cleanup (```EnvFactory.getEnv().cleanAfterTest()```). The example code is as follows. + + +```java +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBAlignedSeriesQueryIT { + + protected static boolean enableSeqSpaceCompaction; + protected static boolean enableUnseqSpaceCompaction; + protected static boolean enableCrossSpaceCompaction; + + @BeforeClass + public static void setUp() throws Exception { + // get the default configurations + enableSeqSpaceCompaction = ConfigFactory.getConfig().isEnableSeqSpaceCompaction(); + enableUnseqSpaceCompaction = ConfigFactory.getConfig().isEnableUnseqSpaceCompaction(); + enableCrossSpaceCompaction = ConfigFactory.getConfig().isEnableCrossSpaceCompaction(); + // update configurations + ConfigFactory.getConfig().setEnableSeqSpaceCompaction(false); + ConfigFactory.getConfig().setEnableUnseqSpaceCompaction(false); + ConfigFactory.getConfig().setEnableCrossSpaceCompaction(false); + EnvFactory.getEnv().initBeforeClass(); + AlignedWriteUtil.insertData(); + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanAfterClass(); + // revert to the default configurations + ConfigFactory.getConfig().setEnableSeqSpaceCompaction(enableSeqSpaceCompaction); + ConfigFactory.getConfig().setEnableUnseqSpaceCompaction(enableUnseqSpaceCompaction); + ConfigFactory.getConfig().setEnableCrossSpaceCompaction(enableCrossSpaceCompaction); + } +} +``` + +### Commands for starting IT + +1 Execute IT in the cluster environment + +```shell script +mvn clean verify \ + -Dsession.test.skip=true \ + -Diotdb.test.skip=true \ + -Dcluster.test.skip=true \ + -Dtsfile.test.skip=true \ + -Dcommons.test.skip=true \ + -Dconfignode.test.skip=true \ + -Dconsensus.test.skip=true \ + -Djdbc.test.skip=true \ + -Dmetrics.test.skip=true \ + -pl integration-test -am -PClusterIT +``` +2 Execute IT in the local standalone environment + +```shell script +mvn clean verify \ + -Dsession.test.skip=true \ + -Diotdb.test.skip=true \ + -Dcluster.test.skip=true \ + -Dtsfile.test.skip=true \ + -Dcommons.test.skip=true \ + -Dconfignode.test.skip=true \ + -Dconsensus.test.skip=true \ + -pl integration-test -am +``` + +3 Execute IT in the remote environment + +```shell script +mvn clean verify -pl integration-test -am -PRemoteIT \ + -DRemoteIp=127.0.0.1 \ + -DRemotePort=6667 +``` + +## Q&A +### Ways to check the log after the CI failure +1 click *Details* of the corresponding test + + + +2 check and download the error log + + + +You can also click the *summary* at the upper left and then check and download the error log. + + - ```java - private static void insertData() throws ClassNotFoundException { - // Class.forName(Config.JDBC_DRIVER_NAME); // orginal connection code - // try (Connection connection = - // DriverManager.getConnection( - // Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root"); - try (Connection connection = EnvFactory.getEnv().getConnection(); // new code - Statement statement = connection.createStatement()) { - - for (String sql : sqls) { - statement.execute(sql); - } - } catch (Exception e) { - e.printStackTrace(); - } - } - ``` - - - The method of changing the IoTDB configuration must be refactored in the Cluster environment. (As the remote environment cannot change the configuration remotely at present, the test cases that change the configuration will not support testing in the remote environment) - - In the Cluster environment, as the configuration cannot be changed dynamically, only the configuration changes before the environment init are effective. - - The refactoring has included most of the configuration changes, which can be changed through the method of `ConfigFactory.getConfig()`. - - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class}) - public class IoTDBCompleteIT { - private int prevVirtualStorageGroupNum; - - @Before - public void setUp() { - prevVirtualStorageGroupNum = - IoTDBDescriptor.getInstance().getConfig().getVirtualStorageGroupNum(); - // IoTDBDescriptor.getInstance().getConfig().setVirtualStorageGroupNum(16); // orginal code - ConfigFactory.getConfig().setVirtualStorageGroupNum(16); // new code - EnvFactory.getEnv().initBeforeClass(); - } - ``` - - - If the configuration item has not been included in the method of `ConfigFactory.getConfig()`, it needs to be defined in the BaseConfig.java interface file and implemented in StandaloneEnvConfig.java and ClusterEnvConfig.java, respectively. This part is not very common. For specific, please refer to the realized part. diff --git a/docs/UserGuide/IoTDB-Introduction/Architecture.md b/docs/UserGuide/IoTDB-Introduction/Architecture.md index 27eef200d9d5..b6b581cb4a15 100644 --- a/docs/UserGuide/IoTDB-Introduction/Architecture.md +++ b/docs/UserGuide/IoTDB-Introduction/Architecture.md @@ -25,7 +25,7 @@ Besides IoTDB engine, we also developed several components to provide better IoT IoTDB suite can provide a series of functions in the real situation such as data collection, data writing, data storage, data query, data visualization and data analysis. Figure 1.1 shows the overall application architecture brought by all the components of the IoTDB suite. - + As shown in Figure 1.1, users can use JDBC to import timeseries data collected by sensor on the device to local/remote IoTDB. These timeseries data may be system state data (such as server load and CPU memory, etc.), message queue data, timeseries data from applications, or other timeseries data in the database. Users can also write the data directly to the TsFile (local or on HDFS). diff --git a/docs/UserGuide/Maintenance-Tools/JMX-Tool.md b/docs/UserGuide/Maintenance-Tools/JMX-Tool.md index 3851b4badcd5..58f41b4489bb 100644 --- a/docs/UserGuide/Maintenance-Tools/JMX-Tool.md +++ b/docs/UserGuide/Maintenance-Tools/JMX-Tool.md @@ -34,7 +34,7 @@ View `$IOTDB_HOME/conf/jmx.password`, and use default user or add new users here If new users are added, remember to edit `$IOTDB_HOME/conf/jmx.access` and add new users' access * IoTDB is not LOCAL -Edit `$IOTDB_HOME/conf/iotdb-env.sh`, and modify config below: +Edit `$IOTDB_HOME/conf/datanode-env.sh`, and modify config below: ``` JMX_LOCAL="false" JMX_IP="the_real_iotdb_server_ip" # Write the actual IoTDB IP address diff --git a/docs/UserGuide/Maintenance-Tools/Maintenance-Command.md b/docs/UserGuide/Maintenance-Tools/Maintenance-Command.md index 5d99fa63e548..c24631b8f9d5 100644 --- a/docs/UserGuide/Maintenance-Tools/Maintenance-Command.md +++ b/docs/UserGuide/Maintenance-Tools/Maintenance-Command.md @@ -113,4 +113,69 @@ To get the executing `queryId`,you can use the `show query processlist` comman | ---- | ------- | --------- | | | | | -The maximum display length of statement is 64 characters. For statements with more than 64 characters, the intercepted part will be displayed. \ No newline at end of file +The maximum display length of statement is 64 characters. For statements with more than 64 characters, the intercepted part will be displayed. + + + +## Monitoring tool for cluster Region distribution + +A cluster uses a Region as a unit for data replication and data management . The Region status and distribution is helpful for system operation and maintenance testing , as shown in the following scenario : + +- Check which Datanodes are allocated to each Region in the cluster and whether the balance is correct. + +Currently, IoTDB supports Region query using the following SQL: + +- `SHOW REGIONS`: Show all Region +- `SHOW SCHEMA REGIONS`: Show all SchemaRegion distribution +- `SHOW DATA REGIONS`: Show all DataRegion distribution + +```sql +IoTDB> show regions ++--------+------------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+------------+------+-------------+-----+----------+----------+----+ +| 0|SchemaRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 1|SchemaRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 2|SchemaRegion| Up| root.sg| 1| 4| 127.0.0.1|6671| +| 3| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 4| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 5| DataRegion| Up| root.sg| 1| 1| 127.0.0.1|6667| +| 6| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 7| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 8| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 9| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 10| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 11| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 12| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| ++--------+------------+------+-------------+-----+----------+----------+----+ +Total line number = 13 +It costs 0.107s +IoTDB> show schema regions ++--------+------------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+------------+------+-------------+-----+----------+----------+----+ +| 0|SchemaRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 1|SchemaRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 2|SchemaRegion| Up| root.sg| 1| 4| 127.0.0.1|6671| ++--------+------------+------+-------------+-----+----------+----------+----+ +Total line number = 3 +It costs 0.009s +IoTDB> show data regions ++--------+----------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+----------+------+-------------+-----+----------+----------+----+ +| 3|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 4|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 5|DataRegion| Up| root.sg| 1| 1| 127.0.0.1|6667| +| 6|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 7|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 8|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 9|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 10|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 11|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 12|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| ++--------+----------+------+-------------+-----+----------+----------+----+ +Total line number = 10 +It costs 0.023s +``` + diff --git a/docs/UserGuide/Maintenance-Tools/Metric-Tool.md b/docs/UserGuide/Maintenance-Tools/Metric-Tool.md index 8d3360880241..8a8385aa3785 100644 --- a/docs/UserGuide/Maintenance-Tools/Metric-Tool.md +++ b/docs/UserGuide/Maintenance-Tools/Metric-Tool.md @@ -78,117 +78,141 @@ Next, we will choose Prometheus format data as samples to describe each kind of #### 4.3.1. API -| Metric | Tag | level | Description | Sample | -| ------------------- | --------------------- | ------ | ---------------------------------------- | -------------------------------------------- | +| Metric | Tag | level | Description | Sample | +| ------------------- | --------------------- | --------- | ---------------------------------------- | -------------------------------------------- | | entry_seconds_count | name="interface name" | important | The total request count of the interface | entry_seconds_count{name="openSession",} 1.0 | | entry_seconds_sum | name="interface name" | important | The total cost seconds of the interface | entry_seconds_sum{name="openSession",} 0.024 | | entry_seconds_max | name="interface name" | important | The max latency of the interface | entry_seconds_max{name="openSession",} 0.024 | | quantity_total | name="pointsIn" | important | The total points inserted into IoTDB | quantity_total{name="pointsIn",} 1.0 | -#### 4.3.2. File +#### 4.3.2. Task +| Metric | Tag | level | Description | Sample | +| ----------------------- | ----------------------------------------------------------------------------- | --------- | -------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| queue | name="compaction_inner/compaction_cross/flush",
status="running/waiting" | important | The count of current tasks in running and waiting status | queue{name="flush",status="waiting",} 0.0
queue{name="flush",status="running",} 0.0 | +| cost_task_seconds_count | name="inner_compaction/cross_compaction/flush" | important | The total count of tasks occurs till now | cost_task_seconds_count{name="flush",} 1.0 | +| cost_task_seconds_max | name="inner_compaction/cross_compaction/flush" | important | The seconds of the longest task takes till now | cost_task_seconds_max{name="flush",} 0.363 | +| cost_task_seconds_sum | name="inner_compaction/cross_compaction/flush" | important | The total cost seconds of all tasks till now | cost_task_seconds_sum{name="flush",} 0.363 | +| data_written | name="compaction",
type="aligned/not-aligned/total" | important | The size of data written in compaction | data_written{name="compaction",type="total",} 10240 | +| data_read | name="compaction" | important | The size of data read in compaction | data_read={name="compaction",} 10240 | -| Metric | Tag | level | Description | Sample | -| ---------- | -------------------- | ------ | ----------------------------------------------- | --------------------------- | -| file_size | name="wal/seq/unseq" | important | The current file size of wal/seq/unseq in bytes | file_size{name="wal",} 67.0 | -| file_count | name="wal/seq/unseq" | important | The current count of wal/seq/unseq files | file_count{name="seq",} 1.0 | +#### 4.3.3. Memory Usage -#### 4.3.3. Flush - -| Metric | Tag | level | Description | Sample | -| ----------------------- | ------------------------------------------- | ------ | ----------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| queue | name="flush",
status="running/waiting" | important | The count of current flushing tasks in running and waiting status | queue{name="flush",status="waiting",} 0.0
queue{name="flush",status="running",} 0.0 | -| cost_task_seconds_count | name="flush" | important | The total count of flushing occurs till now | cost_task_seconds_count{name="flush",} 1.0 | -| cost_task_seconds_max | name="flush" | important | The seconds of the longest flushing task takes till now | cost_task_seconds_max{name="flush",} 0.363 | -| cost_task_seconds_sum | name="flush" | important | The total cost seconds of all flushing tasks till now | cost_task_seconds_sum{name="flush",} 0.363 | - -#### 4.3.4. Compaction - -| Metric | Tag | level | Description | Sample | -|-------------------------|-------------------------------------------------------------------------|-------------|---------------------------------------------------------------------|---------------------------------------------------------------| -| queue | name="compaction_inner/compaction_cross",
status="running/waiting" | important | The count of current compaction tasks in running and waiting status | queue{name="compaction_inner",status="waiting",} 0.0 | -| cost_task_seconds_count | name="compaction" | important | The total count of compaction occurs till now | cost_task_seconds_count{name="compaction",} 1.0 | -| cost_task_seconds_max | name="compaction" | important | The seconds of the longest compaction task takes till now | cost_task_seconds_max{name="compaction",} 0.363 | -| cost_task_seconds_sum | name="compaction" | important | The total cost seconds of all compaction tasks till now | cost_task_seconds_sum{name="compaction",} 0.363 | -| data_written | name="compaction",
type="aligned/not-aligned/total" | important | The size of data written in compaction | data_written{name="compaction",type="total",} 10240 | -| data_read | name="compaction" | important | The size of data read in compaction | data_read={name="compaction",} 10240 | -#### 4.3.5. Memory Usage - -| Metric | Tag | level | Description | Sample | -| ------ | --------------------------------------- | ------ | --------------------------------------------------------------------- | --------------------------------- | +| Metric | Tag | level | Description | Sample | +| ------ | --------------------------------------- | --------- | --------------------------------------------------------------------- | --------------------------------- | | mem | name="chunkMetaData/storageGroup/mtree" | important | Current memory size of chunkMetaData/storageGroup/mtree data in bytes | mem{name="chunkMetaData",} 2050.0 | -#### 4.3.6. Cache Hit Ratio +#### 4.3.4. Cache -| Metric | Tag | level | Description | Sample | -| --------- | --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------- | -| cache_hit | name="chunk/timeSeriesMeta/bloomFilter" | important | Cache hit ratio of chunk/timeSeriesMeta and prevention ratio of bloom filter | cache_hit{name="chunk",} 80 | +| Metric | Tag | level | Description | Sample | +| ----------- | ----------------------------------------------------------------- | --------- | ----------------------------------------------------------------------------------------- | --------------------------------------------------- | +| cache_hit | name="chunk/timeSeriesMeta/bloomFilter/SchemaCache" | important | Cache hit ratio of chunk/timeSeriesMeta/SchemaCache and prevention ratio of bloom filter | cache_hit{name="chunk",} 80 | +| cache_total | name="StorageGroup/SchemaPartition/DataPartition", type="hit/all" | important | The cache hit/all counts of StorageGroup/SchemaPartition/DataPartition | cache_total{name="DataPartition",type="all",} 801.0 | -#### 4.3.7. Business Data -| Metric | Tag | level | Description | Sample | -| -------- | ------------------------------------- | ------ | ------------------------------------------------------------- | -------------------------------- | -| quantity | name="timeSeries/storageGroup/device" | important | The current count of timeSeries/storageGroup/devices in IoTDB | quantity{name="timeSeries",} 1.0 | +#### 4.3.5. Business Data -#### 4.3.8. Cluster +| Metric | Tag | level | Description | Sample | +| -------- | ------------------------------------- | --------- | ------------------------------------------------------------- | -------------------------------- | +| quantity | name="timeSeries/storageGroup/device" | important | The current count of timeSeries/storageGroup/devices in IoTDB | quantity{name="timeSeries",} 1.0 | -| Metric | Tag | level | Description | Sample | -| ------------------------- | ------------------------------- | ------ | -------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| cluster_node_leader_count | name="{{ip}}" | important | The count of ```dataGroupLeader``` on each node, which reflects the distribution of leaders | cluster_node_leader_count{name="127.0.0.1",} 2.0 | -| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | The count of ```uncommitted_log``` on each node in data groups it belongs to | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 | -| cluster_node_status | name="{{ip}}" | important | The current node status, 1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 | -| cluster_elect_total | name="{{ip}}",status="fail/win" | important | The count and result (won or failed) of elections the node participated in. | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 | +#### 4.3.6. Cluster + +| Metric | Tag | level | Description | Sample | +| ------------------------- | ------------------------------------------------------------------ | --------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| cluster_node_leader_count | name="{{ip}}" | important | The count of ```dataGroupLeader``` on each node, which reflects the distribution of leaders | cluster_node_leader_count{name="127.0.0.1",} 2.0 | +| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | The count of ```uncommitted_log``` on each node in data groups it belongs to | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 | +| cluster_node_status | name="{{ip}}" | important | The current node status, 1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 | +| cluster_elect_total | name="{{ip}}",status="fail/win" | important | The count and result (won or failed) of elections the node participated in. | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 | +| config_node | name="online" | core | The number of online confignodes | config_node{name="online",} 3.0 | +| data_node | name="online" | core | The number of online datanodes | data_node{name="online",} 3.0 | +| partition_table | name="number" | core | The number of partition table | partition_table{name="number",} 2.0 | +| region | name="total/{{ip}}:{{port}}",type="SchemaRegion/DataRegion" | important | The number of schemaRegion/dataRegion of cluster or specific node | region{name="127.0.0.1:6671",type="DataRegion",} 10.0 | +| region | name="{{storageGroupName}}",type="SchemaRegion/DataRegion" | normal | The number of DataRegion/SchemaRegion in storage group | region{name="root.schema.sg1",type="DataRegion",} 14.0 | +| slot | name="{{storageGroupName}}",type="schemaSlotNumber/dataSlotNumber" | normal | The number of dataSlot/schemaSlot in storage group | slot{name="root.schema.sg1",type="schemaSlotNumber",} 2.0 | ### 4.4. IoTDB PreDefined Metrics Set -Users can modify the value of `predefinedMetrics` in the `iotdb-metric.yml` file to enable the predefined set of metrics, which `LOGBACK` does not support in `dropwizard`. +Users can modify the value of `predefinedMetrics` in the `iotdb-metric.yml` file to enable the predefined set of metrics,now support `JVM`, `LOGBACK`, `FILE`, `PROCESS`, `SYSYTEM`. #### 4.4.1. JVM ##### 4.4.1.1. Threads -| Metric | Tag | Description | Sample | -| -------------------------- | ------------------------------------------------------------- | ------------------------------------ | -------------------------------------------------- | -| jvm_threads_live_threads | None | The current count of threads | jvm_threads_live_threads 25.0 | -| jvm_threads_daemon_threads | None | The current count of daemon threads | jvm_threads_daemon_threads 12.0 | -| jvm_threads_peak_threads | None | The max count of threads till now | jvm_threads_peak_threads 28.0 | -| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | The count of threads in each status | jvm_threads_states_threads{state="runnable",} 10.0 | +| Metric | Tag | level | Description | Sample | +| -------------------------- | ------------------------------------------------------------- | --------- | ------------------------------------ | -------------------------------------------------- | +| jvm_threads_live_threads | None | Important | The current count of threads | jvm_threads_live_threads 25.0 | +| jvm_threads_daemon_threads | None | Important | The current count of daemon threads | jvm_threads_daemon_threads 12.0 | +| jvm_threads_peak_threads | None | Important | The max count of threads till now | jvm_threads_peak_threads 28.0 | +| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | Important | The count of threads in each status | jvm_threads_states_threads{state="runnable",} 10.0 | ##### 4.4.1.2. GC -| Metric | Tag | Description | Sample | -| ----------------------------------- | ------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | -| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | The total count of YGC/FGC events and its cause | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 | -| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | The total cost seconds of YGC/FGC and its cause | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 | -| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | The max cost seconds of YGC/FGC till now and its cause | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 | -| jvm_gc_overhead_percent | None | An approximation of the percent of CPU time used by GC activities over the last lookback period or since monitoring began, whichever is shorter, in the range [0..1] | jvm_gc_overhead_percent 0.0 | -| jvm_gc_memory_promoted_bytes_total | None | Count of positive increases in the size of the old generation memory pool before GC to after GC | jvm_gc_memory_promoted_bytes_total 8425512.0 | -| jvm_gc_max_data_size_bytes | None | Max size of long-lived heap memory pool | jvm_gc_max_data_size_bytes 2.863661056E9 | -| jvm_gc_live_data_size_bytes | 无 | Size of long-lived heap memory pool after reclamation | jvm_gc_live_data_size_bytes 8450088.0 | -| jvm_gc_memory_allocated_bytes_total | None | Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next | jvm_gc_memory_allocated_bytes_total 4.2979144E7 | +| Metric | Tag | level | Description | Sample | +| ----------------------------------- | ------------------------------------------------------ | --------- | ------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | Important | The total count of YGC/FGC events and its cause | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 | +| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | Important | The total cost seconds of YGC/FGC and its cause | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 | +| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | Important | The max cost seconds of YGC/FGC till now and its cause | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 | +| jvm_gc_memory_promoted_bytes_total | None | Important | Count of positive increases in the size of the old generation memory pool before GC to after GC | jvm_gc_memory_promoted_bytes_total 8425512.0 | +| jvm_gc_max_data_size_bytes | None | Important | Max size of long-lived heap memory pool | jvm_gc_max_data_size_bytes 2.863661056E9 | +| jvm_gc_live_data_size_bytes | None | Important | Size of long-lived heap memory pool after reclamation | jvm_gc_live_data_size_bytes 8450088.0 | +| jvm_gc_memory_allocated_bytes_total | None | Important | Incremented for an increase in the size of the (young) heap memory pool after one GC to before the next | jvm_gc_memory_allocated_bytes_total 4.2979144E7 | ##### 4.4.1.3. Memory -| Metric | Tag | Description | Sample | -| ------------------------------- | ------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| jvm_buffer_memory_used_bytes | id="direct/mapped" | An estimate of the memory that the Java virtual machine is using for this buffer pool | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 | -| jvm_buffer_total_capacity_bytes | id="direct/mapped" | An estimate of the total capacity of the buffers in this pool | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 | -| jvm_buffer_count_buffers | id="direct/mapped" | An estimate of the number of buffers in the pool | jvm_buffer_count_buffers{id="direct",} 183.0 | -| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | The amount of memory in bytes that is committed for the Java virtual machine to use | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7
| -| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | The maximum amount of memory in bytes that can be used for memory management | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 | -| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | The amount of used memory | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9
jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7
| +| Metric | Tag | level | Description | Sample | +| ------------------------------- | ------------------------------- | --------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| jvm_buffer_memory_used_bytes | id="direct/mapped" | Important | An estimate of the memory that the Java virtual machine is using for this buffer pool | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 | +| jvm_buffer_total_capacity_bytes | id="direct/mapped" | Important | An estimate of the total capacity of the buffers in this pool | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 | +| jvm_buffer_count_buffers | id="direct/mapped" | Important | An estimate of the number of buffers in the pool | jvm_buffer_count_buffers{id="direct",} 183.0 | +| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | Important | The amount of memory in bytes that is committed for the Java virtual machine to use | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7
| +| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | Important | The maximum amount of memory in bytes that can be used for memory management | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 | +| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | Important | The amount of used memory | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9
jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7
| ##### 4.4.1.4. Classes -| Metric | Tag | Description | Sample | -| ---------------------------------- | --------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| jvm_classes_unloaded_classes_total | 无 | The total number of classes unloaded since the Java virtual machine has started execution | jvm_classes_unloaded_classes_total 680.0 | -| jvm_classes_loaded_classes | 无 | The number of classes that are currently loaded in the Java virtual machine | jvm_classes_loaded_classes 5975.0 | -| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | The approximate accumulated elapsed time spent in compilation | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 | +| Metric | Tag | level | Description | Sample | +| ---------------------------------- | --------------------------------------------- | --------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| jvm_classes_unloaded_classes_total | None | Important | The total number of classes unloaded since the Java virtual machine has started execution | jvm_classes_unloaded_classes_total 680.0 | +| jvm_classes_loaded_classes | None | Important | The number of classes that are currently loaded in the Java virtual machine | jvm_classes_loaded_classes 5975.0 | +| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | Important | The approximate accumulated elapsed time spent in compilation | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 | -#### 4.4.2. Log Events +#### 4.4.2. File -| Metric | Tag | Description | Sample | -| -------------------- | -------------------------------------- | ------------------------------------------------------------- | --------------------------------------- | -| logback_events_total | {level="trace/debug/info/warn/error",} | The count of trace/debug/info/warn/error log events till now | logback_events_total{level="warn",} 0.0 | +| Metric | Tag | level | Description | Sample | +| ---------- | -------------------- | --------- | ----------------------------------------------- | --------------------------- | +| file_size | name="wal/seq/unseq" | important | The current file size of wal/seq/unseq in bytes | file_size{name="wal",} 67.0 | +| file_count | name="wal/seq/unseq" | important | The current count of wal/seq/unseq files | file_count{name="seq",} 1.0 | + +#### 4.4.3. Logback + +| Metric | Tag | level | Description | 示例 | +| -------------------- | -------------------------------------- | --------- | ------------------------------------------------------------- | --------------------------------------- | +| logback_events_total | {level="trace/debug/info/warn/error",} | Important | The count of trace/debug/info/warn/error log events till now | logback_events_total{level="warn",} 0.0 | + +#### 4.4.4. Process +| Metric | Tag | level | Description | 示例 | +| --------------------- | -------------- | ----- | ----------------------------------------------------------------------------- | ----------------------------------------------- | +| process_cpu_load | name="cpu" | core | current process CPU Usage (%) | process_cpu_load{name="process",} 5.0 | +| process_cpu_time | name="cpu" | core | total Process CPU Time Occupied (ns) | process_cpu_time{name="process",} 3.265625E9 | +| process_max_mem | name="memory" | core | The maximum available memory for the JVM | process_max_mem{name="process",} 3.545759744E9 | +| process_used_mem | name="memory" | core | The current available memory for the JVM | process_used_mem{name="process",} 4.6065456E7 | +| process_total_mem | name="memory" | core | The current requested memory for the JVM | process_total_mem{name="process",} 2.39599616E8 | +| process_free_mem | name="memory" | core | The free available memory for the JVM | process_free_mem{name="process",} 1.94035584E8 | +| process_mem_ratio | name="memory" | core | Memory footprint ratio of process | process_mem_ratio{name="process",} 0.0 | +| process_threads_count | name="process" | core | The current number of threads | process_threads_count{name="process",} 11.0 | +| process_status | name="process" | core | The process survivor status, 1.0 means survivorship, and 0.0 means terminated | process_status{name="process",} 1.0 | + +#### 4.4.5. System +| Metric | Tag | level | Description | 示例 | +| ------------------------------ | ------------- | --------- | ----------------------------------------------------------- | -------------------------------------------------------------- | +| sys_cpu_load | name="cpu" | core | current system CPU Usage(%) | sys_cpu_load{name="system",} 15.0 | +| sys_cpu_cores | name="cpu" | core | available CPU cores | sys_cpu_cores{name="system",} 16.0 | +| sys_total_physical_memory_size | name="memory" | core | Maximum physical memory of system | sys_total_physical_memory_size{name="system",} 1.5950999552E10 | +| sys_free_physical_memory_size | name="memory" | core | The current available memory of system | sys_free_physical_memory_size{name="system",} 4.532396032E9 | +| sys_total_swap_space_size | name="memory" | core | The maximum swap area of system | sys_total_swap_space_size{name="system",} 2.1051273216E10 | +| sys_free_swap_space_size | name="memory" | core | The available swap area of system | sys_free_swap_space_size{name="system",} 2.931576832E9 | +| sys_committed_vm_size | name="memory" | important | the amount of virtual memory available to running processes | sys_committed_vm_size{name="system",} 5.04344576E8 | +| sys_disk_total_space | name="disk" | core | The total disk space | sys_disk_total_space{name="system",} 5.10770798592E11 | +| sys_disk_free_space | name="disk" | core | The available disk space | sys_disk_free_space{name="system",} 3.63467845632E11 | ### 4.5. Add custom metrics - If you want to add your own metrics data in IoTDB, please see the [IoTDB Metric Framework] (https://github.com/apache/iotdb/tree/master/metrics) document. @@ -216,6 +240,9 @@ The metrics collection switch is disabled by default,you need to enable it fro # whether enable the module enableMetric: false +# Is stat performance of operation latency +enablePerformanceStat: false + # Multiple reporter, options: [JMX, PROMETHEUS, IOTDB], IOTDB is off by default metricReporterList: - JMX @@ -227,9 +254,10 @@ monitorType: MICROMETER # Level of metric level, options: [CORE, IMPORTANT, NORMAL, ALL] metricLevel: IMPORTANT -# Predefined metric, options: [JVM, LOGBACK], LOGBACK are not supported in dropwizard +# Predefined metric, options: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] predefinedMetrics: - JVM + - FILE # The http server's port for prometheus exporter to get metric data. prometheusExporterPort: 9091 diff --git a/docs/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md b/docs/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md index 18e2d2c175c1..9d71eb479c11 100644 --- a/docs/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md +++ b/docs/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md @@ -49,7 +49,7 @@ To grasp the performance of iotdb, this module is added to count the time-consum #### Configuration parameter -location:conf/iotdb-engine.properties +location:conf/iotdb-datanode.properties

diff --git a/docs/UserGuide/Maintenance-Tools/Sync-Tool.md b/docs/UserGuide/Maintenance-Tools/Sync-Tool.md index 7d484a329e25..937fa9425c12 100644 --- a/docs/UserGuide/Maintenance-Tools/Sync-Tool.md +++ b/docs/UserGuide/Maintenance-Tools/Sync-Tool.md @@ -287,7 +287,7 @@ IoTDB> SHOW PIPESERVER STATUS ##### **Receiver** -- `vi conf/iotdb-engine.properties` to config the parameters,set the IP white list to 192.168.0.1/1 to receive and only receive data from sender. +- `vi conf/iotdb-datanode.properties` to config the parameters,set the IP white list to 192.168.0.1/1 to receive and only receive data from sender. ``` #################### @@ -420,7 +420,7 @@ It costs 0.134s ``` - Cause by: The receiver is not started or the receiver cannot be connected. - - Solution: Execute `SHOW PIPESERVER` on the receiver side to check if the receiver side is started, if not use `START PIPESERVER` to start; check if the whitelist in `iotdb-engine.properties` on the receiver side contains the sender ip. + - Solution: Execute `SHOW PIPESERVER` on the receiver side to check if the receiver side is started, if not use `START PIPESERVER` to start; check if the whitelist in `iotdb-datanode.properties` on the receiver side contains the sender ip. - Execute diff --git a/docs/UserGuide/Maintenance-Tools/Watermark-Tool.md b/docs/UserGuide/Maintenance-Tools/Watermark-Tool.md index e530435df627..02a12042885c 100644 --- a/docs/UserGuide/Maintenance-Tools/Watermark-Tool.md +++ b/docs/UserGuide/Maintenance-Tools/Watermark-Tool.md @@ -25,7 +25,7 @@ This tool has two functions: 1) watermark embedding of the IoTDB query result an #### Configuration -Watermark is disabled by default in IoTDB. To enable watermark embedding, the first thing is to modify the following fields in the configuration file `iotdb-engine.properties`: +Watermark is disabled by default in IoTDB. To enable watermark embedding, the first thing is to modify the following fields in the configuration file `iotdb-datanode.properties`: | Name | Example | Explanation | | ----------------------- | ------------------------------------------------------ | ------------------------------------------------------------ | @@ -43,7 +43,7 @@ Notes: - Both of them should be positive integers. - `embed_row_cycle` controls the ratio of rows watermarked. The smaller the `embed_row_cycle`, the larger the ratio of rows watermarked. When `embed_row_cycle` equals 1, every row is watermarked. - GroupBasedLSBMethod uses LSB embedding. `embed_lsb_num` controls the number of least significant bits available for watermark embedding. The biggger the `embed_lsb_num`, the larger the varying range of a data point. -- `watermark_secret_key`, `watermark_bit_string` and `watermark_method` should be kept secret from possible attackers. That is, it is your responsiblity to take care of `iotdb-engine.properties`. +- `watermark_secret_key`, `watermark_bit_string` and `watermark_method` should be kept secret from possible attackers. That is, it is your responsiblity to take care of `iotdb-datanode.properties`. #### Usage Example diff --git a/docs/UserGuide/Operate-Metadata/Auto-Create-MetaData.md b/docs/UserGuide/Operate-Metadata/Auto-Create-MetaData.md index 9ea5bdbb5dde..dbc303c4d1aa 100644 --- a/docs/UserGuide/Operate-Metadata/Auto-Create-MetaData.md +++ b/docs/UserGuide/Operate-Metadata/Auto-Create-MetaData.md @@ -83,14 +83,14 @@ Illustrated as the following figure: ### Type inference -| Data(String Format) | Format Type | iotdb-engine.properties | Default | -|:---:|:---|:---|:---| -| true | boolean | boolean\_string\_infer\_type | BOOLEAN | -| 1 | integer | integer\_string\_infer\_type | FLOAT | -| 17000000(integer > 2^24) | integer | long\_string\_infer\_type | DOUBLE | +| Data(String Format) | Format Type | iotdb-datanode.properties | Default | +|:---:|:---|:------------------------------|:---| +| true | boolean | boolean\_string\_infer\_type | BOOLEAN | +| 1 | integer | integer\_string\_infer\_type | FLOAT | +| 17000000(integer > 2^24) | integer | long\_string\_infer\_type | DOUBLE | | 1.2 | floating | floating\_string\_infer\_type | FLOAT | -| NaN | nan | nan\_string\_infer\_type | DOUBLE | -| 'I am text' | text | x | x | +| NaN | nan | nan\_string\_infer\_type | DOUBLE | +| 'I am text' | text | x | x | * Data types can be configured as BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT. @@ -98,14 +98,14 @@ Illustrated as the following figure: ### Encoding Type -| Data Type | iotdb-engine.properties | Default | -|:---|:---|:---| +| Data Type | iotdb-datanode.properties | Default | +|:---|:---------------------------|:---| | BOOLEAN | default\_boolean\_encoding | RLE | -| INT32 | default\_int32\_encoding | RLE | -| INT64 | default\_int64\_encoding | RLE | -| FLOAT | default\_float\_encoding | GORILLA | -| DOUBLE | default\_double\_encoding | GORILLA | -| TEXT | default\_text\_encoding | PLAIN | +| INT32 | default\_int32\_encoding | RLE | +| INT64 | default\_int64\_encoding | RLE | +| FLOAT | default\_float\_encoding | GORILLA | +| DOUBLE | default\_double\_encoding | GORILLA | +| TEXT | default\_text\_encoding | PLAIN | * Encoding types can be configured as PLAIN, RLE, TS_2DIFF, GORILLA, DICTIONARY. diff --git a/docs/UserGuide/Process-Data/Triggers.md b/docs/UserGuide/Process-Data/Triggers.md index d1d47f691405..950aa50b4cb4 100644 --- a/docs/UserGuide/Process-Data/Triggers.md +++ b/docs/UserGuide/Process-Data/Triggers.md @@ -307,7 +307,7 @@ When a user manages triggers, 4 types of authorities will be involved: * `START_TRIGGER`: Only users with this authority are allowed to start triggers. This authority is path dependent. * `STOP_TRIGGER`: Only users with this authority are allowed to stop triggers. This authority is path dependent. -For more information, refer to [Authority Management Statement](../Operation%20Manual/Administration.md). +For more information, refer to [Authority Management Statement](../Administration-Management/Administration.md). @@ -679,7 +679,113 @@ annotations.put("description", "{{.alertname}}: {{.series}} is {{.value}}"); alertManagerHandler.onEvent(new AlertManagerEvent(alertName, extraLabels, annotations)); ``` +#### ForwardSink +Trigger can use ForwardSink to forward written data through HTTP and MQTT, which has contains HTTPForwardHandler and MQTTForwardHandler. To improve connection efficiency, all HTTPForwardHandlers share a connection pool, while MQTTForwardHandlers with the same host, port and username parameters share a connection pool. + +The difference between MQTTForwardHandler and MQTTHandler is that the former uses connection pool while the latter does not, and the message format is also different. + +See [ForwardTrigger](#ForwardTrigger) as example. + +## ForwardTrigger +ForwardTrigger is a built-in trigger for data distribution/forwarding. It uses ForwardSink and consumption queue to realize asynchronous batch processing of trigger events. Asynchronous forwarding can avoid the system blocking caused by forwarding blocking. The connection pool in ForwardSink can make the connections in the pool reuse efficiently and safely, and avoid the overhead of frequent connection establishment and closing. + +Forward Queue Consume + +### Trigger process +1. Trigger event come. +2. ForwardTrigger put the event into queue pool. +3. Finish Trigger event. + +### Queue pool consumption process +1. Put trigger event into queue grouping by device (polling if there is no device). +2. Each queue consumer thread monitors the current queue. If it times out or reaches the maximum forwarding batch, it calls the handler for batch forwarding. +3. Handler batch serializes trigger events. After message encapsulation, call the built-in connection pool to complete forwarding. + +### Message format +At present, the message format only supports JSON format of fixed template. The template is as follows: +``` +[{"device":"%s","measurement":"%s","timestamp":%d,"value":%s}] +``` + +### Example +#### Create ForwardTrigger +Create a forward_http trigger with HTTP protocol and a forward_mqtt trigger with mqtt protocol, which subscribes to the prefix path `root.http` and `root.mqtt` respectively. +```sql +CREATE trigger forward_http AFTER INSERT ON root.http +AS 'org.apache.iotdb.db.engine.trigger.builtin.ForwardTrigger' +WITH ('protocol' = 'http', 'endpoint' = 'http://127.0.0.1:8080/forward_receive') + +CREATE trigger forward_mqtt AFTER INSERT ON root.mqtt +AS 'org.apache.iotdb.db.engine.trigger.builtin.ForwardTrigger' +WITH ('protocol' = 'mqtt', 'host' = '127.0.0.1', 'port' = '1883', 'username' = 'root', 'password' = 'root', 'topic' = 'mqtt-test') +``` + +#### Fire trigger +Insert data into the sub-path of the two prefix paths to fire the trigger. +```sql +INSERT INTO root.http.d1(timestamp, s1) VALUES (1, 1); +INSERT INTO root.http.d1(timestamp, s2) VALUES (2, true); +INSERT INTO root.mqtt.d1(timestamp, s1) VALUES (1, 1); +INSERT INTO root.mqtt.d1(timestamp, s2) VALUES (2, true); +``` + +#### Receive forwarded message +After the trigger is fired, JSON data in follow format will be received at the HTTP receiving end: +```json +[ + { + "device":"root.http.d1", + "measurement":"s1", + "timestamp":1, + "value":1.0 + }, + { + "device":"root.http.d1", + "measurement":"s2", + "timestamp":2, + "value":true + } +] +``` + +After the trigger is fired, JSON data in follow format will be received at the MQTT receiving end: +```json +[ + { + "device":"root.mqtt.d1", + "measurement":"s1", + "timestamp":1, + "value":1.0 + }, + { + "device":"root.mqtt.d1", + "measurement":"s2", + "timestamp":2, + "value":true + } +] +``` + +### Config Parameter of ForwardTrigger +| Parameter | Required | Default | Max | Description | +|--------------------|----------|--------------|------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| protocol | true | http | | Forward protocol, such as HTTP/MQTT | +| queueNumber | | 8 | 8 | The number of queue, comparing to the global parameter trigger_forward_max_queue_number and take the smaller | +| queueSize | | 2000 | 2000 | The size of queue, comparing to the global parameter trigger_forward_max_size_per_queue and take the smaller | +| batchSize | | 50 | 50 | The size of each forwarding batch, comparing to the global parameter trigger_forward_batch_size and take the smaller | +| stopIfException | | false | | Stop forwarding if exception occurs | +| endpoint | true | | | Request endpoint address (HTTP protocol parameter)
Note: HTTP connection pool parameters depend on global parameters:
trigger_forward_http_pool_size=200
and
trigger_forward_http_pool_max_per_route=20 | +| host | true | | | MQTT broker host (MQTT protocol parameter) | +| port | true | | | MQTT broker port (MQTT protocol parameter) | +| username | true | | | Username (MQTT protocol parameter) | +| password | true | | | Password (MQTT protocol parameter) | +| topic | true | | | The topic of MQTT message (MQTT protocol parameter) | +| reconnectDelay | | 10ms | | Reconnection waiting time (MQTT protocol parameter) | +| connectAttemptsMax | | 3 | | Max connection attempts (MQTT protocol parameter) | +| qos | | exactly_once | | Quality of Service (MQTT protocol parameter), must be exactly_once, at_least_once or at_most_once | +| poolSize | | 4 | 4 | MQTT Connection Pool Size (MQTT protocol parameter), comparing to the global parameter trigger_forward_mqtt_pool_size and take the smaller | +| retain | | false | | Let MQTT Broker retain the message after publishing (MQTT protocol parameter) | ## Maven Project Example @@ -807,7 +913,7 @@ public class TriggerExample implements Trigger { You can try this trigger by following the steps below: -* Enable MQTT service by modifying `iotdb-engine.properties` +* Enable MQTT service by modifying `iotdb-datanode.properties` ``` properties # whether to enable the mqtt service. diff --git a/docs/UserGuide/Process-Data/UDF-User-Defined-Function.md b/docs/UserGuide/Process-Data/UDF-User-Defined-Function.md index ad1b87619294..9c1dd6f58efc 100644 --- a/docs/UserGuide/Process-Data/UDF-User-Defined-Function.md +++ b/docs/UserGuide/Process-Data/UDF-User-Defined-Function.md @@ -47,7 +47,7 @@ If you use [Maven](http://search.maven.org/), you can search for the development ``` xml org.apache.iotdb - iotdb-server + udf-api 0.14.0-SNAPSHOT provided @@ -57,7 +57,7 @@ If you use [Maven](http://search.maven.org/), you can search for the development ## UDTF(User Defined Timeseries Generating Function) -To write a UDTF, you need to inherit the `org.apache.iotdb.db.query.udf.api.UDTF` class, and at least implement the `beforeStart` method and a `transform` method. +To write a UDTF, you need to inherit the `org.apache.iotdb.udf.api.UDTF` class, and at least implement the `beforeStart` method and a `transform` method. The following table shows all the interfaces available for user implementation. @@ -107,7 +107,7 @@ This method is mainly used to customize UDTF. In this method, the user can do th #### UDFParameters -`UDFParameters` is used to parse UDF parameters in SQL statements (the part in parentheses after the UDF function name in SQL). The input parameters have two parts. The first part is the paths (measurements) and their data types of the time series that the UDF needs to process, and the second part is the key-value pair attributes for customization. Only the second part can be empty. +`UDFParameters` is used to parse UDF parameters in SQL statements (the part in parentheses after the UDF function name in SQL). The input parameters have two parts. The first part is data types of the time series that the UDF needs to process, and the second part is the key-value pair attributes for customization. Only the second part can be empty. Example: @@ -120,11 +120,6 @@ Usage: ``` java void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception { - // parameters - for (PartialPath path : parameters.getPaths()) { - TSDataType dataType = parameters.getDataType(path); - // do something - } String stringValue = parameters.getString("key1"); // iotdb Float floatValue = parameters.getFloat("key2"); // 123.45 Double doubleValue = parameters.getDouble("key3"); // null @@ -152,7 +147,7 @@ void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) th // configurations configurations .setAccessStrategy(new RowByRowAccessStrategy()) - .setOutputDataType(TSDataType.INT32); + .setOutputDataType(Type.INT32); } ``` @@ -218,7 +213,7 @@ Note that the type of output sequence you set here determines the type of data t | `FLOAT` | `float` | | `DOUBLE` | `double` | | `BOOLEAN` | `boolean` | -| `TEXT` | `java.lang.String` and `org.apache.iotdb.tsfile.utils.Binary` | +| `TEXT` | `java.lang.String` and `org.apache.iotdb.udf.api.type.Binary` | The type of output time series of a UDTF is determined at runtime, which means that a UDTF can dynamically determine the type of output time series according to the type of input time series. Here is a simple example: @@ -245,13 +240,13 @@ This method processes the raw data one row at a time. The raw data is input from The following is a complete UDF example that implements the `void transform(Row row, PointCollector collector) throws Exception` method. It is an adder that receives two columns of time series as input. When two data points in a row are not `null`, this UDF will output the algebraic sum of these two data points. ``` java -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.Row; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.RowByRowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Adder implements UDTF { @@ -284,13 +279,14 @@ Below is a complete UDF example that implements the `void transform(RowWindow ro ```java import java.io.IOException; -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.RowWindow; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Counter implements UDTF { @@ -328,13 +324,13 @@ Below is a complete UDF example that implements the `void terminate(PointCollect ```java import java.io.IOException; -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.Row; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.RowByRowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Max implements UDTF { @@ -516,7 +512,7 @@ For more user permissions related content, please refer to [Account Management S ## Configurable Properties -When querying by a UDF, IoTDB may prompt that there is insufficient memory. You can resolve the issue by configuring `udf_initial_byte_array_length_for_memory_control`, `udf_memory_budget_in_mb` and `udf_reader_transformer_collector_memory_proportion` in `iotdb-engine.properties` and restarting the server. +When querying by a UDF, IoTDB may prompt that there is insufficient memory. You can resolve the issue by configuring `udf_initial_byte_array_length_for_memory_control`, `udf_memory_budget_in_mb` and `udf_reader_transformer_collector_memory_proportion` in `iotdb-datanode.properties` and restarting the server. diff --git a/docs/UserGuide/Query-Data/Query-Filter.md b/docs/UserGuide/Query-Data/Query-Filter.md index 4321d1c0b468..d0d90ebbf3bb 100644 --- a/docs/UserGuide/Query-Data/Query-Filter.md +++ b/docs/UserGuide/Query-Data/Query-Filter.md @@ -26,8 +26,9 @@ In IoTDB query statements, two filter conditions, **time filter** and **value fi The supported operators are as follows: - Comparison operators: greater than (`>`), greater than or equal ( `>=`), equal ( `=` or `==`), not equal ( `!=` or `<>`), less than or equal ( `<=`), less than ( `<`). -- Range contains operator: contains ( `IN` ). - Logical operators: and ( `AND` or `&` or `&&`), or ( `OR` or `|` or `||`), not ( `NOT` or `!`). +- Range contains operator: contains ( `IN` ). +- String matches operator: `LIKE`, `REGEXP`. ## Time Filter @@ -91,6 +92,21 @@ An example is as follows: select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); ```` +## Null Filter +Use null filters to filter data whose data value is null or not. + +1. Select data with values is null: + + ```sql + select code from root.sg1.d1 where temperature is null; + ```` + +2. Select data with values is not null: + + ```sql + select code from root.sg1.d1 where temperature is not null; + ```` + ## Fuzzy Query Fuzzy query is divided into Like statement and Regexp statement, both of which can support fuzzy matching of TEXT type data. diff --git a/docs/UserGuide/Query-Data/Select-Expression.md b/docs/UserGuide/Query-Data/Select-Expression.md index d97fc8562211..73f52dd981e7 100644 --- a/docs/UserGuide/Query-Data/Select-Expression.md +++ b/docs/UserGuide/Query-Data/Select-Expression.md @@ -109,6 +109,7 @@ It costs 0.014s ``` ## Compare Expression +### Operators #### Unary Logical Operators Supported operator `!` @@ -124,7 +125,7 @@ Supported operators `>`, `>=`, `<`, `<=`, `==`, `!=` Supported input data types: `INT32`, `INT64`, `FLOAT` and `DOUBLE` -It will transform all type to `DOUBLE` then do computation. +Note: It will transform all type to `DOUBLE` then do computation. Output data type: `BOOLEAN` @@ -138,14 +139,31 @@ Output data type: `BOOLEAN` Note: Only when the left operand and the right operand under a certain timestamp are both `BOOLEAN` type, the binary logic operation will have an output value. +#### IN Operators + +Supported operator `IN` + +Supported input data types: `All Types` + +Output data type: `BOOLEAN` + +#### String Match Operators + +Supported operators `LIKE`, `REGEXP` + +Supported input data types: `TEXT` + +Output data type: `BOOLEAN` + ### Example + +Input1: ```sql select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; ``` -Output: +Output1: ``` -IoTDB> select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; +-----------------------------+-----------+-----------+----------------+--------------------------+---------------------------+------------------------------------------------+ | Time|root.test.a|root.test.b|root.test.a > 10|root.test.a <= root.test.b|!root.test.a <= root.test.b|(root.test.a > 10) & (root.test.a > root.test.b)| +-----------------------------+-----------+-----------+----------------+--------------------------+---------------------------+------------------------------------------------+ @@ -158,6 +176,21 @@ IoTDB> select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; +-----------------------------+-----------+-----------+----------------+--------------------------+---------------------------+------------------------------------------------+ ``` +Input2: +```sql +select a, b, a in (1, 2), b like '1%', b regexp '[0-2]' from root.test; +``` + +Output2: +``` ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +| Time|root.test.a|root.test.b|root.test.a IN (1,2)|root.test.b LIKE '^1.*?$'|root.test.b REGEXP '[0-2]'| ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +|1970-01-01T08:00:00.001+08:00| 1| 111test111| true| true| true| +|1970-01-01T08:00:00.003+08:00| 3| 333test333| false| false| false| ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +``` + ## Priority of Operators |priority|operator |meaning | diff --git a/docs/UserGuide/QuickStart/QuickStart.md b/docs/UserGuide/QuickStart/QuickStart.md index 103587bd0689..ef06d4721d1f 100644 --- a/docs/UserGuide/QuickStart/QuickStart.md +++ b/docs/UserGuide/QuickStart/QuickStart.md @@ -48,8 +48,8 @@ You can download the binary file from: configuration files are under "conf" folder - * environment config module (`iotdb-env.bat`, `iotdb-env.sh`), - * system config module (`iotdb-engine.properties`) + * environment config module (`datanode-env.bat`, `datanode-env.sh`), + * system config module (`iotdb-datanode.properties`) * log config module (`logback.xml`). For more, see [Config](../Reference/Config-Manual.md) in detail. @@ -82,7 +82,7 @@ if you want to use JMX to connect IOTDB, you may need to add ``` -Dcom.sun.management.jmxremote.rmi.port=PORT -Djava.rmi.server.hostname=IP ``` -to $IOTDB_JMX_OPTS in iotdb-env.sh. or iotdb-env.bat +to $IOTDB_JMX_OPTS in datanode-env.sh. or datanode-env.bat ### Use Cli @@ -234,6 +234,6 @@ The server can be stopped with ctrl-C or the following script: The configuration files is in the `conf` folder, includes: -* environment configuration (`iotdb-env.bat`, `iotdb-env.sh`), -* system configuration (`iotdb-engine.properties`) +* environment configuration (`datanode-env.bat`, `datanode-env.sh`), +* system configuration (`iotdb-datanode.properties`) * log configuration (`logback.xml`). \ No newline at end of file diff --git a/docs/UserGuide/QuickStart/ServerFileList.md b/docs/UserGuide/QuickStart/ServerFileList.md index 8f0f425fdb40..3f6e87eb06a9 100644 --- a/docs/UserGuide/QuickStart/ServerFileList.md +++ b/docs/UserGuide/QuickStart/ServerFileList.md @@ -27,9 +27,9 @@ ## Configuration Files > under conf directory -1. iotdb-engine.properties +1. iotdb-datanode.properties 2. logback.xml -3. iotdb-env.sh +3. datanode-env.sh 4. jmx.access 5. jmx.password 6. iotdb-sync-client.properties diff --git a/docs/UserGuide/Reference/Config-Manual.md b/docs/UserGuide/Reference/Config-Manual.md index d044811ffa71..764eba562cba 100644 --- a/docs/UserGuide/Reference/Config-Manual.md +++ b/docs/UserGuide/Reference/Config-Manual.md @@ -26,9 +26,9 @@ Before starting to use IoTDB, you need to config the configuration files first. In total, we provide users three kinds of configurations module: -* environment configuration file (`iotdb-env.bat`, `iotdb-env.sh`). The default configuration file for the environment configuration item. Users can configure the relevant system configuration items of JAVA-JVM in the file. -* system configuration file (`iotdb-engine.properties`). - * `iotdb-engine.properties`: The default configuration file for the IoTDB engine layer configuration item. Users can configure the IoTDB engine related parameters in the file, such as JDBC service listening port (`rpc_port`), unsequence data storage directory (`unsequence_data_dir`), etc. What's more, Users can configure the information about the TsFile, such as the data size written to the disk per time(`group_size_in_byte`). +* environment configuration file (`datanode-env.bat`, `datanode-env.sh`). The default configuration file for the environment configuration item. Users can configure the relevant system configuration items of JAVA-JVM in the file. +* system configuration file (`iotdb-datanode.properties`). + * `iotdb-datanode.properties`: The default configuration file for the IoTDB engine layer configuration item. Users can configure the IoTDB engine related parameters in the file, such as JDBC service listening port (`rpc_port`), unsequence data storage directory (`unsequence_data_dir`), etc. What's more, Users can configure the information about the TsFile, such as the data size written to the disk per time(`group_size_in_byte`). * log configuration file (`logback.xml`) @@ -43,7 +43,7 @@ Trigger way: The client sends the command `load configuration` to the IoTDB serv ## IoTDB Environment Configuration File -The environment configuration file is mainly used to configure the Java environment related parameters when IoTDB Server is running, such as JVM related configuration. This part of the configuration is passed to the JVM when the IoTDB Server starts. Users can view the contents of the environment configuration file by viewing the `iotdb-env.sh` (or `iotdb-env.bat`) file. +The environment configuration file is mainly used to configure the Java environment related parameters when IoTDB Server is running, such as JVM related configuration. This part of the configuration is passed to the JVM when the IoTDB Server starts. Users can view the contents of the environment configuration file by viewing the `datanode-env.sh` (or `datanode-env.bat`) file. The detail of each variables are as follows: @@ -374,12 +374,12 @@ The permission definitions are in ${IOTDB\_CONF}/conf/jmx.access. * avg\_series\_point\_number\_threshold -|Name| avg\_series\_point\_number\_threshold | -|:---:|:---| -|Description| max average number of point of each series in memtable| -|Type|Int32| -|Default| 10000 | -|Effective|After restarting system| +|Name| avg\_series\_point\_number\_threshold | +|:---:|:-------------------------------------------------------| +|Description| max average number of point of each series in memtable | +|Type| Int32 | +|Default| 100000 | +|Effective| After restarting system | * tsfile\_size\_threshold diff --git a/docs/UserGuide/Reference/Keywords.md b/docs/UserGuide/Reference/Keywords.md index d3487e0da449..0d1293bfbd86 100644 --- a/docs/UserGuide/Reference/Keywords.md +++ b/docs/UserGuide/Reference/Keywords.md @@ -159,30 +159,6 @@ The following list shows the keywords and reserved words in IoTDB 0.13. Reserved - WITH - WITHOUT - WRITABLE - -- Data Type Keywords - - BOOLEAN - - DOUBLE - - FLOAT - - INT32 - - INT64 - - TEXT - -- Encoding Type Keywords - - DICTIONARY - - DIFF - - GORILLA - - PLAIN - - REGULAR - - RLE - - TS_2DIFF - -- Compressor Type Keywords - - GZIP - - LZ4 - - SNAPPY - - UNCOMPRESSED - - Privileges Keywords - SET_STORAGE_GROUP - CREATE_TIMESERIES diff --git a/docs/UserGuide/Reference/Syntax-Conventions.md b/docs/UserGuide/Reference/Syntax-Conventions.md index 47aa477d381d..fbf0d3503cf6 100644 --- a/docs/UserGuide/Reference/Syntax-Conventions.md +++ b/docs/UserGuide/Reference/Syntax-Conventions.md @@ -21,22 +21,153 @@ # Syntax Conventions +## Issues with syntax conventions in 0.13 and earlier version + +In previous versions of syntax conventions, we introduced some ambiguity to maintain compatibility. To avoid ambiguity, we have designed new syntax conventions, and this chapter will explain the issues with the old syntax conventions and why we made the change. + +### Issues related to identifier + +In version 0.13 and earlier, identifiers (including path node names) that are not quoted with backquotes are allowed to be pure numbers(Pure numeric path node names need to be enclosed in backquotes in the `SELECT` clause), and are allowed to contain some special characters. **In version 0.14, identifiers that are not quoted with backquotes are not allowed to be pure numbers and only allowed to contain letters, Chinese characters, and underscores. ** + +### Issues related to node name + +In previous versions of syntax conventions, when do you need to add quotation marks to the node name, and the rules for using single and double quotation marks or backquotes are complicated. We have unified usage of quotation marks in the new syntax conventions. For details, please refer to the relevant chapters of this document. + +#### When to use single and double quotes and backquotes + +In previous versions of syntax conventions, path node names were defined as identifiers, but when the path separator . was required in the path node name, single or double quotes were required. This goes against the rule that identifiers are quoted using backquotes. + +```SQL +# In the previous syntax convention, if you need to create a time series root.sg.`www.baidu.com`, you need to use the following statement: +create root.sg.'www.baidu.com' with datatype=BOOLEAN, encoding=PLAIN + +# The time series created by this statement is actually root.sg.'www.baidu.com', that is, the quotation marks are stored together. The three nodes of the time series are {"root","sg","'www.baidu.com'"}. + +# In the query statement, if you want to query the data of the time series, the query statement is as follows: +select 'www.baidu.com' from root.sg; +``` + +In the new syntax conventions, special node names are uniformly quoted using backquotes: + +```SQL +# In the new syntax convention, if you need to create a time series root.sg.`www.baidu.com`, the syntax is as follows: +create root.sg.`www.baidu.com` with 'datatype' = 'BOOLEAN', 'encoding' = 'PLAIN' + +#To query the time series, you can use the following statement: +select `www.baidu.com` from root.sg; +``` + +#### The issues of using quotation marks inside node names + +In previous versions of syntax conventions, when single quotes ' and double quotes " are used in path node names, they need to be escaped with a backslash \, and the backslashes will be stored as part of the path node name. Other identifiers do not have this restriction, causing inconsistency. + +```SQL +# Create time series root.sg.\"a +create timeseries root.sg.`\"a` with datatype=TEXT,encoding=PLAIN; + +# Query time series root.sg.\"a +select `\"a` from root.sg; ++-----------------------------+-----------+ +| Time|root.sg.\"a| ++-----------------------------+-----------+ +|1970-01-01T08:00:00.004+08:00| test| ++-----------------------------+-----------+ +``` + +In the new syntax convention, special path node names are uniformly referenced with backquotes. When single and double quotes are used in path node names, there is no need to add backslashes to escape, and backquotes need to be double-written. For details, please refer to the relevant chapters of the new syntax conventions. + +### Issues related to session API + +#### Session API syntax restrictions + +In version 0.13, the restrictions on using path nodes in non-SQL interfaces are as follows: + +- The node names in path or path prefix as parameter: + - The node names which should be escaped by backticks (`) in the SQL statement, and escaping is not required here. + - The node names enclosed in single or double quotes still need to be enclosed in single or double quotes and must be escaped for JAVA strings. + - For the `checkTimeseriesExists` interface, since the IoTDB-SQL interface is called internally, the time-series pathname must be consistent with the SQL syntax conventions and be escaped for JAVA strings. + +**In version 0.14, restrictions on using path nodes in non-SQL interfaces were enhanced:** + +- **The node names in path or path prefix as parameter: The node names which should be escaped by backticks (`) in the SQL statement, escaping is required here.** +- **Code example for syntax convention could be found at:** `example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` + +#### Inconsistent handling of string escaping between SQL and Session interfaces + +In previous releases, there was an inconsistency between the SQL and Session interfaces when using strings. For example, when using SQL to insert Text type data, the string will be unescaped, but not when using the Session interface, which is inconsistent. **In the new syntax convention, we do not unescape the strings. What you store is what will be obtained when querying (for the rules of using single and double quotation marks inside strings, please refer to this document for string literal chapter). ** + +The following are examples of inconsistencies in the old syntax conventions: + +Use Session's insertRecord method to insert data into the time series root.sg.a + +```Java +// session insert +String deviceId = "root.sg"; +List measurements = new ArrayList<>(); +measurements.add("a"); +String[] values = new String[]{"\\\\", "\\t", "\\\"", "\\u96d5"}; +for(int i = 0; i <= values.length; i++){ + List valueList = new ArrayList<>(); + valueList.add(values[i]); + session.insertRecord(deviceId, i + 1, measurements, valueList); + } +``` + +Query the data of root.sg.a, you can see that there is no unescaping: + +```Plain%20Text +// query result ++-----------------------------+---------+ +| Time|root.sg.a| ++-----------------------------+---------+ +|1970-01-01T08:00:00.001+08:00| \\| +|1970-01-01T08:00:00.002+08:00| \t| +|1970-01-01T08:00:00.003+08:00| \"| +|1970-01-01T08:00:00.004+08:00| \u96d5| ++-----------------------------+---------+ +``` + +Instead use SQL to insert data into root.sg.a: + +```SQL +# SQL insert +insert into root.sg(time, a) values(1, "\\") +insert into root.sg(time, a) values(2, "\t") +insert into root.sg(time, a) values(3, "\"") +insert into root.sg(time, a) values(4, "\u96d5") +``` + +Query the data of root.sg.a, you can see that the string is unescaped: + +```Plain%20Text +// query result ++-----------------------------+---------+ +| Time|root.sg.a| ++-----------------------------+---------+ +|1970-01-01T08:00:00.001+08:00| \| +|1970-01-01T08:00:00.002+08:00| | +|1970-01-01T08:00:00.003+08:00| "| +|1970-01-01T08:00:00.004+08:00| 雕| ++-----------------------------+---------+ +``` + ## Literal Values This section describes how to write literal values in IoTDB. These include strings, numbers, timestamp values, boolean values, and NULL. ### String Literals -A string is a sequence of characters, enclosed within either single quote (`'`) or double quote (`"`) characters. Examples: +> We refer to MySQL's definition of string:A string is a sequence of bytes or characters, enclosed within either single quote (`'`) or double quote (`"`) characters. + +Definition of string in MySQL could be found here:[MySQL :: MySQL 8.0 Reference Manual :: 9.1.1 String Literals](https://dev.mysql.com/doc/refman/8.0/en/string-literals.html) + +So in IoTDB, **A string is a sequence of bytes or characters, enclosed within either single quote (`'`) or double quote (`"`) characters.** Examples: + ```js 'a string' "another string" ``` -We will unescape the string unless it is used as a file path. Examples can be found in the usage scenarios part. - -More information about escape characters can be found in : [Characters (The Java™ Tutorials > Learning the Java Language > Numbers and Strings)](https://docs.oracle.com/javase/tutorial/java/data/characters.html) - #### Usage Scenarios Usages of string literals: @@ -48,20 +179,19 @@ Usages of string literals: insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') insert into root.ln.wf02.wt02(timestamp,hardware) values(2, '\\') - # select data from root.ln.wf02.wt02, '\\' is unescpaed as '\'. +-----------------------------+--------------------------+ | Time|root.ln.wf02.wt02.hardware| +-----------------------------+--------------------------+ |1970-01-01T08:00:00.001+08:00| v1| +-----------------------------+--------------------------+ - |1970-01-01T08:00:00.002+08:00| \| + |1970-01-01T08:00:00.002+08:00| \\| +-----------------------------+--------------------------+ - # select + # select select code from root.sg1.d1 where code in ('string1', 'string2'); ``` - -- Used in`LOAD` / `REMOVE` / `SETTLE` instructions to represent file path. File path will not be unescaped. + +- Used in`LOAD` / `REMOVE` / `SETTLE` instructions to represent file path. ```SQL # load @@ -77,7 +207,7 @@ Usages of string literals: - Password fields in user management statements ```SQL - # 'write_pwd' is a password + # write_pwd is the password CREATE USER ln_write_user 'write_pwd' ``` @@ -98,7 +228,7 @@ Usages of string literals: CREATE FUNCTION example AS 'org.apache.iotdb.udf.UDTFExample' ``` -- `AS` function provided by IoTDB can assign an alias to time series selected in query. Alias can be string or identifier. +- `AS` function provided by IoTDB can assign an alias to time series selected in query. Alias can be constant(including string) or identifier. ```SQL select s1 as 'temperature', s2 as 'speed' from root.ln.wf01.wt01; @@ -109,67 +239,8 @@ Usages of string literals: +-----------------------------+-----------|-----+ ``` -- The key of an attribute can be String Literal and identifier, the value of an attribute can be Constant(including String Literal) and identifer. Using string literal to represent key and value is recommended. +- The key/value of an attribute can be String Literal and identifier, more details can be found at **key-value pair** part. - 1. Attributes fields of trigger. See the attributes after `With` clause in the example below: - - ```SQL - # exmaple - CREATE TRIGGER `alert-listener-sg1d1s1` - AFTER INSERT - ON root.sg1.d1.s1 - AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' - WITH ( - 'lo' = '0', - 'hi' = '100.0' - ) - ``` - - 2. Attributes fields of UDF. See the attributes in select clause in the example below: - - ```sql - # 示例 - SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; - ``` - - 3. Key-value pair to represent tag/attributes in timeseries. - - ```SQL - # create timeseries - CREATE timeseries root.turbine.d1.s1(temprature) - WITH datatype=FLOAT, encoding=RLE, compression=SNAPPY, 'max_point_number' = '5' - TAGS('tag1' = 'v1', 'tag2'= 'v2') ATTRIBUTES('attr1' = 'v1', 'attr2' = 'v2') - - # alter tags and attributes of timeseries - ALTER timeseries root.turbine.d1.s1 SET 'newTag1' = 'newV1', 'attr1' = 'newV1' - - # rename tag - ALTER timeseries root.turbine.d1.s1 RENAME 'tag1' TO 'newTag1' - - # upsert alias, tags, attributes - ALTER timeseries root.turbine.d1.s1 UPSERT - ALIAS='newAlias' TAGS('tag2' = 'newV2', tag3=v3) ATTRIBUTES('attr3' ='v3', 'attr4'='v4') - - # add new tags - ALTER timeseries root.turbine.d1.s1 ADD TAGS 'tag3' = 'v3', 'tag4' = 'v4' - - # add new attributes - ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES 'attr3' = 'v3', 'attr4' = 'v4' - - # query for timeseries - SHOW timeseries root.ln.** WHRER 'unit' = 'c' - ``` - - 4. Attributes fields of Pipe and PipeSink. - - ```SQL - # PipeSink example - CREATE PIPESINK my_iotdb AS IoTDB ('ip' = 'your ip') - - # Pipe example - CREATE PIPE my_pipe TO my_iotdb FROM - (select ** from root WHERE time>=yyyy-mm-dd HH:MM:SS) WITH 'SyncDelOp' = 'true' - ``` #### How to use quotation marks in String Literals @@ -227,27 +298,27 @@ The `NULL` value means “no data.” `NULL` can be written in any lettercase. ## Identifiers -#### Usage scenarios +### Usage scenarios Certain objects within IoTDB, including `TRIGGER`, `FUNCTION`(UDF), `CONTINUOUS QUERY`, `SCHEMA TEMPLATE`, `USER`, `ROLE`,`Pipe`,`PipeSink`,`alias` and other object names are known as identifiers. -#### Constraints +### Constraints Below are basic constraints of identifiers, specific identifiers may have other constraints, for example, `user` should consists of more than 4 characters. - Permitted characters in unquoted identifiers: - - [0-9 a-z A-Z _ : @ # $ { }] (letters, digits, some special characters) + - [0-9 a-z A-Z _ ] (letters, digits and underscore) - ['\u2E80'..'\u9FFF'] (UNICODE Chinese characters) - Identifiers may begin with a digit, unquoted identifiers can not consists of solely digits. - Identifiers are case sensitive. +- Key words can be used as an identifier. -You need to quote the identifier with back quote(`) in the following cases: +**You need to quote the identifier with back quote(`) in the following cases:** - Identifier contains special characters. -- Using Key words as identifier - Identifier consists of solely digits. -#### How to use quotations marks in quoted identifiers +### How to use quotations marks in quoted identifiers `'` and `"` can be used directly in quoted identifiers. @@ -263,7 +334,7 @@ create schema template `t1``t` (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` -#### Examples +### Examples Examples of case in which quoted identifier is used : @@ -284,14 +355,14 @@ Examples of case in which quoted identifier is used : - UDF name should be quoted in cases described above : ```sql - # craete function named select, select is a keyword. - CREATE FUNCTION `select` AS 'org.apache.iotdb.udf.UDTFExample' + # create a funciton named 111, 111 consists of solely digits. + CREATE FUNCTION `111` AS 'org.apache.iotdb.udf.UDTFExample' ``` - Template name should be quoted in cases described above : ```sql - # create template named 111, 111 consists of solely digits. + # create a template named 111, 111 consists of solely digits. create schema template `111` (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` @@ -302,8 +373,8 @@ Examples of case in which quoted identifier is used : # create user special`user. CREATE USER `special``user.` 'write_pwd' - # create role `select` - CREATE ROLE `select` + # create role 111 + CREATE ROLE `111` ``` - Continuous query name should be quoted in cases described above : @@ -330,7 +401,7 @@ Examples of case in which quoted identifier is used : (select ** from root WHERE time>=yyyy-mm-dd HH:MM:SS) WITH 'SyncDelOp' = 'true' ``` -- `AS` function provided by IoTDB can assign an alias to time series selected in query. Alias can be string or identifier. +- `AS` function provided by IoTDB can assign an alias to time series selected in query. Alias can be constant(including string) or identifier. ```sql select s1 as temperature, s2 as speed from root.ln.wf01.wt01; @@ -341,33 +412,8 @@ Examples of case in which quoted identifier is used : +-----------------------------+-----------|-----+ ``` -- The key of an attribute can be String Literal and identifier, the value of an attribute can be Constant(including String Literal) and identifer. Using string literal to represent key and value is recommended. Below are examples of using identifier in key-value of tags and attributes: +- The key/value of an attribute can be String Literal and identifier, more details can be found at **key-value pair** part. - ```SQL - # create timeseries - CREATE timeseries root.turbine.d1.s1(temprature) - WITH datatype=FLOAT, encoding=RLE, compression=SNAPPY, max_point_number = 5 - TAGS(tag1 = v1, tag2 = v2) ATTRIBUTES(attr1 = v1, attr2 = v2) - - # alter tags and attributes of timeseries - ALTER timeseries root.turbine.d1.s1 SET newTag1 = newV1, attr1 = newV1 - - # rename tag - ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 - - # upsert alias, tags, attributes - ALTER timeseries root.turbine.d1.s1 UPSERT - ALIAS = newAlias TAGS(tag2 = newV2, tag3=v3) ATTRIBUTES(attr3 = v3, attr4 = v4) - - # add new tags - ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3 = v3, tag4 = v4 - - # add new attributes - ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3 = v3, attr4 = v4 - - # query for timeseries - SHOW timeseries root.ln.** WHRER unit = c - ``` ## Node Names in Path @@ -382,7 +428,7 @@ Node name is a special identifier, it can also be wildcard `*` and `**`. When cr As `*` can also be used in expressions of select clause to represent multiplication, below are examples to help you better understand the usage of `* `: ```SQL -# create timeseries root.sg.a*b +# create timeseries root.sg.`a*b` create timeseries root.sg.`a*b` with datatype=FLOAT,encoding=PLAIN; # As described in Identifier part, a*b should be quoted. @@ -394,7 +440,7 @@ create timeseries root.sg.a with datatype=FLOAT,encoding=PLAIN; # create timeseries root.sg.b create timeseries root.sg.b with datatype=FLOAT,encoding=PLAIN; -# query data of root.sg.a*b +# query data of root.sg.`a*b` select `a*b` from root.sg # Header of result dataset |Time|root.sg.a*b| @@ -409,17 +455,12 @@ select a*b from root.sg When node name is not wildcard, it is a identifier, which means the constraints on it is the same as described in Identifier part. -Node name quoted with back quote will also be wrapped with back quote in result dataset if it contains . or `, otherwise node name will not be quoted in result dataset. Below are examples to help you understand - -- Create timeseries stament: +- Create timeseries statement: ```SQL # Node name contains special characters like ` and .,all nodes of this timeseries are: ["root","sg","www.`baidu.com"] create timeseries root.sg.`www.``baidu.com`.a with datatype=FLOAT,encoding=PLAIN; -# Node name is a key word. -create timeseries root.sg.`select`.a with datatype=FLOAT,encoding=PLAIN; - # Node name consists of solely digits. create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN; ``` @@ -430,8 +471,7 @@ After executing above statments, execute "show timeseries",below is the result +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ | timeseries|alias|storage group|dataType|encoding|compression|tags|attributes| +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ -| root.sg.select.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| -| root.sg.111.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| +| root.sg.`111`.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| |root.sg.`www.``baidu.com`.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ ``` @@ -442,9 +482,6 @@ After executing above statments, execute "show timeseries",below is the result # Node name contains special characters like . and ` insert into root.sg.`www.``baidu.com`(timestamp, a) values(1, 2); -# Node name is a key word. -insert into root.sg.`select`(timestamp, a) values (1, 2); - # Node name consists of solely digits. insert into root.sg(timestamp, `111`) values (1, 2); ``` @@ -455,9 +492,6 @@ insert into root.sg(timestamp, `111`) values (1, 2); # Node name contains special characters like . and ` select a from root.sg.`www.``baidu.com`; -# Node name is a key word. -select a from root.sg.`select` - # Node name consists of solely digits. select `111` from root.sg ``` @@ -472,13 +506,6 @@ Results: |1970-01-01T08:00:00.001+08:00| 2.0| +-----------------------------+---------------------------+ -# select a from root.sg.`select` -+-----------------------------+----------------+ -| Time|root.sg.select.a| -+-----------------------------+----------------+ -|1970-01-01T08:00:00.001+08:00| 2.0| -+-----------------------------+----------------+ - # select `111` from root.sg +-----------------------------+-----------+ | Time|root.sg.111| @@ -487,12 +514,209 @@ Results: +-----------------------------+-----------+ ``` +## Key-Value Pair + +**The key/value of an attribute can be constant(including string) and identifier. ** + +Below are usage scenarios of key-value pair: + +- Attributes fields of trigger. See the attributes after `With` clause in the example below: + +```SQL +# 以字符串形式表示键值对 +CREATE TRIGGER `alert-listener-sg1d1s1` +AFTER INSERT +ON root.sg1.d1.s1 +AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' +WITH ( + 'lo' = '0', + 'hi' = '100.0' +) + +# 以标识符和常量形式表示键值对 +CREATE TRIGGER `alert-listener-sg1d1s1` +AFTER INSERT +ON root.sg1.d1.s1 +AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' +WITH ( + lo = 0, + hi = 100.0 +) +``` + +- Key-value pair to represent tag/attributes in timeseries: + +```sql +# create timeseries using string as key/value +CREATE timeseries root.turbine.d1.s1(temprature) +WITH datatype = FLOAT, encoding = RLE, compression = SNAPPY, 'max_point_number' = '5' +TAGS('tag1' = 'v1', 'tag2'= 'v2') ATTRIBUTES('attr1' = 'v1', 'attr2' = 'v2') + +# create timeseries using constant as key/value +CREATE timeseries root.turbine.d1.s1(temprature) +WITH datatype = FLOAT, encoding = RLE, compression = SNAPPY, max_point_number = 5 +TAGS(tag1 = v1, tag2 = v2) ATTRIBUTES(attr1 = v1, attr2 = v2) +``` + +```sql +# alter tags and attributes of timeseries +ALTER timeseries root.turbine.d1.s1 SET 'newTag1' = 'newV1', 'attr1' = 'newV1' + +ALTER timeseries root.turbine.d1.s1 SET newTag1 = newV1, attr1 = newV1 +``` + +```sql +# rename tag +ALTER timeseries root.turbine.d1.s1 RENAME 'tag1' TO 'newTag1' + +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +``` + +```sql +# upsert alias, tags, attributes +ALTER timeseries root.turbine.d1.s1 UPSERT +ALIAS='newAlias' TAGS('tag2' = 'newV2', 'tag3' = 'v3') ATTRIBUTES('attr3' ='v3', 'attr4'='v4') + +ALTER timeseries root.turbine.d1.s1 UPSERT +ALIAS = newAlias TAGS(tag2 = newV2, tag3 = v3) ATTRIBUTES(attr3 = v3, attr4 = v4) +``` + +```sql +# add new tags +ALTER timeseries root.turbine.d1.s1 ADD TAGS 'tag3' = 'v3', 'tag4' = 'v4' + +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3 = v3, tag4 = v4 +``` + +```sql +# add new attributes +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES 'attr3' = 'v3', 'attr4' = 'v4' + +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3 = v3, attr4 = v4 +``` + +```sql +# query for timeseries +SHOW timeseries root.ln.** WHRER 'unit' = 'c' + +SHOW timeseries root.ln.** WHRER unit = c +``` + +- Attributes fields of Pipe and PipeSink. + +```SQL +# PipeSink example +CREATE PIPESINK my_iotdb AS IoTDB ('ip' = '输入你的IP') + +# Pipe example +CREATE PIPE my_pipe TO my_iotdb FROM +(select ** from root WHERE time>=yyyy-mm-dd HH:MM:SS) WITH 'SyncDelOp' = 'true' +``` + ## Keywords and Reserved Words -Keywords are words that have significance in SQL require special treatment for use as identifiers and node names, and need to be escaped with backticks. -Certain keywords, such as TIME and ROOT, are reserved and cannot use as identifiers and node names (even after escaping). +Keywords are words that have significance in SQL. Keywords can be used as an identifier. Certain keywords, such as TIME/TIMESTAMP and ROOT, are reserved and cannot use as identifiers. + +[Keywords and Reserved Words](Keywords.md) shows the keywords and reserved words in IoTDB. + +## Session、TsFile API + +When using the Session and TsFile APIs, if the method you call requires parameters such as measurement, device, storage group, path in the form of String, **please ensure that the parameters passed in the input string is the same as when using the SQL statement**, here are some examples to help you understand. Code example could be found at: `example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` + +1. Take creating a time series createTimeseries as an example: + +```Java +public void createTimeseries( + String path, + TSDataType dataType, + TSEncoding encoding, + CompressionType compressor) + throws IoTDBConnectionException, StatementExecutionException; +``` + +If you wish to create the time series root.sg.a, root.sg.\`a.\`\`"b\`, root.sg.\`111\`, the SQL statement you use should look like this: + +```SQL +create timeseries root.sg.a with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; + +# node names contain special characters, each node in the time series is ["root","sg","a.`\"b"] +create timeseries root.sg.`a.``"b` with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; + +# node names are pure numbers +create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; +``` + +When you call the createTimeseries method, you should assign the path string as follows to ensure that the content of the path string is the same as when using SQL: + +```Java +// timeseries root.sg.a +String path = "root.sg.a"; -[Keywords and Reserved Words](Keywords.md) shows the keywords and reserved words in IoTDB 0.13. +// timeseries root.sg.`a``"b` +String path = "root.sg.`a``\"b`"; + +// timeseries root.sg.`111` +String path = "root.sg.`111`"; +``` + +2. Take inserting data insertRecord as an example: + +```Java +public void insertRecord( + String deviceId, + long time, + List measurements, + List types, + Object... values) + throws IoTDBConnectionException, StatementExecutionException; +``` + +If you want to insert data into the time series root.sg.a, root.sg.\`a.\`\`"b\`, root.sg.\`111\`, the SQL statement you use should be as follows: + +```SQL +insert into root.sg(timestamp, a, `a.``"b`, `111`) values (1, 2, 2, 2); +``` + +When you call the insertRecord method, you should assign deviceId and measurements as follows: + +```Java +// deviceId is root.sg +String deviceId = "root.sg"; + +// measurements +String[] measurements = new String[]{"a", "`a.``\"b`", "`111`"}; +List measurementList = Arrays.asList(measurements); +``` + +3. Take executeRawDataQuery as an example: + +```Java +public SessionDataSet executeRawDataQuery( + List paths, + long startTime, + long endTime) + throws StatementExecutionException, IoTDBConnectionException; +``` + +If you wish to query the data of the time series root.sg.a, root.sg.\`a.\`\`"b\`, root.sg.\`111\`, the SQL statement you use should be as follows : + +```SQL +select a from root.sg + +# node name contains special characters +select `a.``"b` from root.sg; + +# node names are pure numbers +select `111` from root.sg +``` + +When you call the executeRawDataQuery method, you should assign paths as follows: + +```Java +// paths +String[] paths = new String[]{"root.sg.a", "root.sg.`a.``\"b`", "root.sg.`111`"}; +List pathList = Arrays.asList(paths); +``` ## Learn More diff --git a/docs/UserGuide/Reference/TSDB-Comparison.md b/docs/UserGuide/Reference/TSDB-Comparison.md index 979a61f16b96..d450899f2119 100644 --- a/docs/UserGuide/Reference/TSDB-Comparison.md +++ b/docs/UserGuide/Reference/TSDB-Comparison.md @@ -109,8 +109,8 @@ Legend: * SQL like: - * IoTDB and InfluxDB support SQL like language. In addition, the integration of IoTDB and Calcite is almost done (a PR has been submitted), which means IoTDB will support Standard SQL soon. - * OpenTSDB and KairosDB only support Rest API, while IoTDB also supports Rest API (a PR has been submitted). + * IoTDB and InfluxDB support SQL like language. + * OpenTSDB and KairosDB only support Rest API, while IoTDB also supports Rest API. * TimescaleDB uses the SQL the same as PG. * Schema: @@ -263,9 +263,6 @@ I listed some interesting features that these systems may differ. Now only IoTDB supports a JDBC driver (though not all interfaces are implemented), and makes it possible to integrate many other JDBC driver based softwares. -* Standard SQL: - - As mentioned before, the integration of IoTDB and Calcite is almost done (a PR has been submitted), which means IoTDB will support Standard SQL. * Spark and Hive integration: diff --git a/docs/UserGuide/UDF-Library/String-Processing.md b/docs/UserGuide/UDF-Library/String-Processing.md index f33936969220..5d3e4e9cf342 100644 --- a/docs/UserGuide/UDF-Library/String-Processing.md +++ b/docs/UserGuide/UDF-Library/String-Processing.md @@ -21,6 +21,583 @@ # String Processing +## Length + +### Usage + +The function is used to get the length of input series. + +**Name:** LENGTH + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Output Series:** Output a single series. The type is INT32. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, length(s1) from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+----------------------+ +| Time|root.sg1.d1.s1|length(root.sg1.d1.s1)| ++-----------------------------+--------------+----------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 6| +|1970-01-01T08:00:00.002+08:00| 22test22| 8| ++-----------------------------+--------------+----------------------+ +``` + +## StrLocate + +### Usage + +The function is used to get the position of the first occurrence of substring `target` in input series. Returns -1 if there are no `target` in input. + +**Name:** LENGTH + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Parameter:** + ++ `target`: The substring to be located. ++ `reverse`: Indicates whether reverse locate is required. The default value is `false`, means left-to-right locate. + +**Output Series:** Output a single series. The type is INT32. + +**Note:** The index begins from 0. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, locate(s1, "target"="1") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+------------------------------------+ +| Time|root.sg1.d1.s1|locate(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 0| +|1970-01-01T08:00:00.002+08:00| 22test22| -1| ++-----------------------------+--------------+------------------------------------+ +``` + +Another SQL for query: + +```sql +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+------------------------------------------------------+ +| Time|root.sg1.d1.s1|locate(root.sg1.d1.s1, "target"="1", "reverse"="true")| ++-----------------------------+--------------+------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 5| +|1970-01-01T08:00:00.002+08:00| 22test22| -1| ++-----------------------------+--------------+------------------------------------------------------+ +``` + +## StartsWith + +### Usage + +The function is used to check whether input series starts with the specified prefix. + +**Name:** STARTSWITH + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Parameter:** ++ `target`: The prefix to be checked. + +**Output Series:** Output a single series. The type is BOOLEAN. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, startswith(s1, "target"="1") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+----------------------------------------+ +| Time|root.sg1.d1.s1|startswith(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+----------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| true| +|1970-01-01T08:00:00.002+08:00| 22test22| false| ++-----------------------------+--------------+----------------------------------------+ +``` + +## EndsWith + +### Usage + +The function is used to check whether input series ends with the specified suffix. + +**Name:** ENDSWITH + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Parameter:** ++ `target`: The suffix to be checked. + +**Output Series:** Output a single series. The type is BOOLEAN. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, endswith(s1, "target"="1") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+--------------------------------------+ +| Time|root.sg1.d1.s1|endswith(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+--------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| true| +|1970-01-01T08:00:00.002+08:00| 22test22| false| ++-----------------------------+--------------+--------------------------------------+ +``` + +## Concat + +### Usage + +The function is used to concat input series and target strings. + +**Name:** CONCAT + +**Input Series:** At least one input series. The data type is TEXT. + +**Parameter:** ++ `targets`: A series of K-V, key needs to start with `target` and be not duplicated, value is the string you want to concat. ++ `series_behind`: Indicates whether series behind targets. The default value is `false`. + +**Output Series:** Output a single series. The type is TEXT. + +**Note:** ++ If value of input series is NULL, it will be skipped. ++ We can only concat input series and `targets` separately. `concat(s1, "target1"="IoT", s2, "target2"="DB")` and + `concat(s1, s2, "target1"="IoT", "target2"="DB")` gives the same result. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+--------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2| ++-----------------------------+--------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| ++-----------------------------+--------------+--------------+ +``` + +SQL for query: + +```sql +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|concat(root.sg1.d1.s1, root.sg1.d1.s2, "target1"="IoT", "target2"="DB")| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| 1test1IoTDB| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| 22test222222testIoTDB| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +``` + +Another SQL for query: + +```sql +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|concat(root.sg1.d1.s1, root.sg1.d1.s2, "target1"="IoT", "target2"="DB", "series_behind"="true")| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| IoTDB1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| IoTDB22test222222test| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +``` + +## Substr + +### Usage + +The function is used to get the substring `start` to `end - 1`. + +**Name:** SUBSTR + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Parameter:** ++ `start`: Indicates the start position of substring. ++ `end`: Indicates the end position of substring. + +**Output Series:** Output a single series. The type is TEXT. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, substr(s1, "start"="0", "end"="2") from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+----------------------------------------------+ +| Time|root.sg1.d1.s1|substr(root.sg1.d1.s1, "start"="0", "end"="2")| ++-----------------------------+--------------+----------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 1t| +|1970-01-01T08:00:00.002+08:00| 22test22| 22| ++-----------------------------+--------------+----------------------------------------------+ +``` + +## Upper + +### Usage + +The function is used to get the string of input series with all characters changed to uppercase. + +**Name:** UPPER + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Output Series:** Output a single series. The type is TEXT. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, upper(s1) from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+---------------------+ +| Time|root.sg1.d1.s1|upper(root.sg1.d1.s1)| ++-----------------------------+--------------+---------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 1TEST1| +|1970-01-01T08:00:00.002+08:00| 22test22| 22TEST22| ++-----------------------------+--------------+---------------------+ +``` + +## Lower + +### Usage + +The function is used to get the string of input series with all characters changed to lowercase. + +**Name:** LOWER + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Output Series:** Output a single series. The type is TEXT. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1TEST1| +|1970-01-01T08:00:00.002+08:00| 22TEST22| ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s1, lower(s1) from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+---------------------+ +| Time|root.sg1.d1.s1|lower(root.sg1.d1.s1)| ++-----------------------------+--------------+---------------------+ +|1970-01-01T08:00:00.001+08:00| 1TEST1| 1test1| +|1970-01-01T08:00:00.002+08:00| 22TEST22| 22test22| ++-----------------------------+--------------+---------------------+ +``` + +## Trim + +### Usage + +The function is used to get the string whose value is same to input series, with all leading and trailing space removed. + +**Name:** TRIM + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Output Series:** Output a single series. The type is TEXT. + +**Note:** Returns NULL if input is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s3| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.002+08:00| 3querytest3| +|1970-01-01T08:00:00.003+08:00| 3querytest3 | ++-----------------------------+--------------+ +``` + +SQL for query: + +```sql +select s3, trim(s3) from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+--------------------+ +| Time|root.sg1.d1.s3|trim(root.sg1.d1.s3)| ++-----------------------------+--------------+--------------------+ +|1970-01-01T08:00:00.002+08:00| 3querytest3| 3querytest3| +|1970-01-01T08:00:00.003+08:00| 3querytest3 | 3querytest3| ++-----------------------------+--------------+--------------------+ +``` + +## StrCmp + +### Usage + +The function is used to get the compare result of two input series. Returns `0` if series value are the same, a `negative integer` if value of series1 is smaller than series2, +a `positive integer` if value of series1 is more than series2. + +**Name:** StrCmp + +**Input Series:** Support two input series. Data types are all the TEXT. + +**Output Series:** Output a single series. The type is INT32. + +**Note:** Returns NULL either series value is NULL. + +### Examples + +Input series: + +``` ++-----------------------------+--------------+--------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2| ++-----------------------------+--------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| ++-----------------------------+--------------+--------------+ +``` + +SQL for query: + +```sql +select s1, s2, strcmp(s1, s2) from root.sg1.d1 +``` + +Output series: + +``` ++-----------------------------+--------------+--------------+--------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|strcmp(root.sg1.d1.s1, root.sg1.d1.s2)| ++-----------------------------+--------------+--------------+--------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| 66| ++-----------------------------+--------------+--------------+--------------------------------------+ +``` + + +## StrReplace + +### Usage + +The function is used to replace the specific substring with given string. + +**Name:** STRREPLACE + +**Input Series:** Only support a single input series. The data type is TEXT. + +**Parameter:** + ++ `target`: The target substring to be replaced. ++ `replace`: The string to be put on. ++ `limit`: The number of matches to be replaced which should be an integer no less than -1, + default to -1 which means all matches will be replaced. ++ `offset`: The number of matches to be skipped, which means the first `offset` matches will not be replaced, default to 0. ++ `reverse`: Whether to count all the matches reversely, default to 'false'. + +**Output Series:** Output a single series. The type is TEXT. + +### Examples + +Input series: + +``` ++-----------------------------+---------------+ +| Time|root.test.d1.s1| ++-----------------------------+---------------+ +|2021-01-01T00:00:01.000+08:00| A,B,A+,B-| +|2021-01-01T00:00:02.000+08:00| A,A+,A,B+| +|2021-01-01T00:00:03.000+08:00| B+,B,B| +|2021-01-01T00:00:04.000+08:00| A+,A,A+,A| +|2021-01-01T00:00:05.000+08:00| A,B-,B,B| ++-----------------------------+---------------+ +``` + +SQL for query: + +```sql +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 +``` + +Output series: + +``` ++-----------------------------+-----------------------------------------+ +| Time|strreplace(root.test.d1.s1, "target"=",",| +| | "replace"="/", "limit"="2")| ++-----------------------------+-----------------------------------------+ +|2021-01-01T00:00:01.000+08:00| A/B/A+,B-| +|2021-01-01T00:00:02.000+08:00| A/A+/A,B+| +|2021-01-01T00:00:03.000+08:00| B+/B/B| +|2021-01-01T00:00:04.000+08:00| A+/A/A+,A| +|2021-01-01T00:00:05.000+08:00| A/B-/B,B| ++-----------------------------+-----------------------------------------+ +``` + +Another SQL for query: + +```sql +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 +``` + +Output series: + +``` ++-----------------------------+-----------------------------------------------------+ +| Time|strreplace(root.test.d1.s1, "target"=",", "replace"= | +| | "|", "limit"="1", "offset"="1", "reverse"="true")| ++-----------------------------+-----------------------------------------------------+ +|2021-01-01T00:00:01.000+08:00| A,B/A+,B-| +|2021-01-01T00:00:02.000+08:00| A,A+/A,B+| +|2021-01-01T00:00:03.000+08:00| B+/B,B| +|2021-01-01T00:00:04.000+08:00| A+,A/A+,A| +|2021-01-01T00:00:05.000+08:00| A,B-/B,B| ++-----------------------------+-----------------------------------------------------+ +``` + ## RegexMatch ### Usage @@ -217,83 +794,4 @@ Output series: |2021-01-01T00:00:04.000+08:00| A| |2021-01-01T00:00:05.000+08:00| B| +-----------------------------+-----------------------------------------------------+ -``` - -## StrReplace - -### Usage - -The function is used to replace the specific substring with given string. - -**Name:** STRREPLACE - -**Input Series:** Only support a single input series. The data type is TEXT. - -**Parameter:** - -+ `target`: The target substring to be replaced. -+ `replace`: The string to be put on. -+ `limit`: The number of matches to be replaced which should be an integer no less than -1, - default to -1 which means all matches will be replaced. -+ `offset`: The number of matches to be skipped, which means the first `offset` matches will not be replaced, default to 0. -+ `reverse`: Whether to count all the matches reversely, default to 'false'. - -**Output Series:** Output a single series. The type is TEXT. - -### Examples - -Input series: - -``` -+-----------------------------+---------------+ -| Time|root.test.d1.s1| -+-----------------------------+---------------+ -|2021-01-01T00:00:01.000+08:00| A,B,A+,B-| -|2021-01-01T00:00:02.000+08:00| A,A+,A,B+| -|2021-01-01T00:00:03.000+08:00| B+,B,B| -|2021-01-01T00:00:04.000+08:00| A+,A,A+,A| -|2021-01-01T00:00:05.000+08:00| A,B-,B,B| -+-----------------------------+---------------+ -``` - -SQL for query: - -```sql -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -``` - -Output series: - -``` -+-----------------------------+-----------------------------------------+ -| Time|strreplace(root.test.d1.s1, "target"=",",| -| | "replace"="/", "limit"="2")| -+-----------------------------+-----------------------------------------+ -|2021-01-01T00:00:01.000+08:00| A/B/A+,B-| -|2021-01-01T00:00:02.000+08:00| A/A+/A,B+| -|2021-01-01T00:00:03.000+08:00| B+/B/B| -|2021-01-01T00:00:04.000+08:00| A+/A/A+,A| -|2021-01-01T00:00:05.000+08:00| A/B-/B,B| -+-----------------------------+-----------------------------------------+ -``` - -Another SQL for query: - -```sql -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -``` - -Output series: - -``` -+-----------------------------+-----------------------------------------------------+ -| Time|strreplace(root.test.d1.s1, "target"=",", "replace"= | -| | "|", "limit"="1", "offset"="1", "reverse"="true")| -+-----------------------------+-----------------------------------------------------+ -|2021-01-01T00:00:01.000+08:00| A,B/A+,B-| -|2021-01-01T00:00:02.000+08:00| A,A+/A,B+| -|2021-01-01T00:00:03.000+08:00| B+/B,B| -|2021-01-01T00:00:04.000+08:00| A+,A/A+,A| -|2021-01-01T00:00:05.000+08:00| A,B-/B,B| -+-----------------------------+-----------------------------------------------------+ ``` \ No newline at end of file diff --git a/docs/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md b/docs/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md index 53390d431dcb..98748163a946 100644 --- a/docs/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md +++ b/docs/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md @@ -41,7 +41,7 @@ The second parameter indicates the path of the tsfile to be loaded and the name AUTOREGISTER option. If the metadata correspond to the timeseries in the tsfile to be loaded does not exist, you can choose whether to create the schema automatically. If this parameter is true, the schema is created automatically. If it is false, the schema will not be created. By default, the schema will be created. -SGLEVEL option. If the storage group correspond to the tsfile does not exist, the user can set the level of storage group through the fourth parameter. By default, it uses the storage group level which is set in `iotdb-engine.properties`. +SGLEVEL option. If the storage group correspond to the tsfile does not exist, the user can set the level of storage group through the fourth parameter. By default, it uses the storage group level which is set in `iotdb-datanode.properties`. VERIFY option. If this parameter is true, All timeseries in this loading tsfile will be compared with the timeseries in IoTDB. If existing a measurement which has different datatype with the measurement in IoTDB, the loading process will be stopped and exit. If consistence can be promised, setting false for this parameter will be a better choice. diff --git a/docs/zh/Community/About.md b/docs/zh/Community/About.md index baf24f5f42fe..6e006611fb34 100644 --- a/docs/zh/Community/About.md +++ b/docs/zh/Community/About.md @@ -18,8 +18,8 @@ under the License. --> -## 社区说明文档 -### 1.社区组织架构 +# 社区说明文档 +## 社区组织架构 所有抱着推动 Apache IoTDB 前进的共同目的、认同开源文化、遵循相关规范的个人或团体凝聚起来形成了我们的社区。 参考 Apache 基金会的社区理念,我们的社区存在以下四种身份。 - PMC:Project Management Committee,项目管理委员会成员。需通过评选,得到社区的共同认同。 @@ -48,8 +48,8 @@ Contributor:对项目的前进有相关贡献的社区成员。无需通过评 User: Apache IoTDB 项目的用户。无需通过评选。 - 权利: - 获得 User 礼品 -### 2.社区评选规章 -#### 2.1 参与评选的条件 +## 社区评选规章 +### 参与评选的条件 - PMC: - 获得现有 PMC 的推荐 - 技术层面:对项目的整体情况有较全面的了解 @@ -61,7 +61,7 @@ User: Apache IoTDB 项目的用户。无需通过评选。 - 需对对某个功能模块完全了解 - 开源布道师途径 - 对项目的开源布道起到实质的推进作用,如沉淀技术文档、提供与宣讲用户案例、积极参与社区交流与活动等 -### 3.社区交流途径 +## 社区交流途径 社区拥有丰富的交流途径。目前有以下五种交流途径。可以选择自己适用并倾向的渠道与社区进行交流。 途径一:通过 Slack 进行交流(全英文) @@ -81,7 +81,7 @@ User: Apache IoTDB 项目的用户。无需通过评选。 - 发送订阅邮件:向 dev-subscribe@iotdb.apache.org 发送一封邮件,主题内容不限。 - 确认订阅邮件:完成上一步后,将收到一封来自 iotdb.apache.org 结尾的确认订阅邮件。向该邮件再次发一封主题内容不限的邮件即可确认订阅。 -### 4.社区公约 +## 社区公约 遵循 Apache 的社区文化,形成以下公约。 保持开放。我们诚邀各位加入我们的社区。除了敏感内容以外,我们期待大家在公开的渠道对项目有关内容展开讨论。这也适用于寻求帮助和项目支持的消息; 不仅仅是群众的支持请求更有可能得到问题的答案,还可以确保社区注意到并纠正回答该问题的人无意间犯的错误。 拥有同理心、热情、友好和耐心。我们共同努力解决矛盾,怀揣善意,并尽最大努力以一种感同身受的方式行动。我们可能会遭受一些挫折,但这并不是进行人身攻击的借口。一个让人感到不舒服或受到威胁的社区不会成为一个卓有成效的社区。 我们应该对打交道的社区成员和社区外部人员都给予尊重。 @@ -104,8 +104,8 @@ User: Apache IoTDB 项目的用户。无需通过评选。 注意围绕主题,特别是在较长的讨论中。 离开项目时请考虑周全。每个项目都会有人加入有人离开。在有人离开或退出项目时,应该告诉其他成员他们离开了,并通过合适的方式以确保其他成员可以接起他们留下的工作。同时,他们应尊重那些继续参与项目的人,不应该歪曲项目的目标或成就。同样,社区成员也应尊重离开项目的人所做的选择。 -### 5.社区发布新版本流程 -#### 5.1流程总览 +## 社区发布新版本流程 +### 流程总览 社区发布新版本流程中有两类角色。 角色一:RM (发布经理,Release Manager),是发布新版本的发起人。一般为一个人而非多人承担这个角色。 角色二:社区开发者。社区开发者将对新版本是否发布进行投票。值得提醒的是,不是所有的社区开发者都强制为新版本的发布进行投票。 @@ -117,11 +117,11 @@ User: Apache IoTDB 项目的用户。无需通过评选。 第四步 - 社区开发者进行投票:社区开发者获取到内部测试版(Release Candidate)进行测验。测验后,通过邮件进行投票。 第五步 - 投票通过后 RM 发布正式版本并对外公布:当投票情况符合相关条件,RM发布正式版本,并通过邮件对外公布这个消息。(若投票失败,则通过邮件公布本次发布新版本失败,并在修改后,重新提起发布新版本的流程。此处将不对投票失败情况展开解释。) -#### 5.2作为社区开发者,如何验证新版本,来决定是否支持发布? +### 作为社区开发者,如何验证新版本,来决定是否支持发布? 请见 https://iotdb.apache.org/zh/Development/VoteRelease.html; -#### 5.3如何决定投票是否通过? +#### 如何决定投票是否通过? 依据 Apache Software Foundation 给出的说明,我们将把投票窗口保留 72 小时及以上。当收到至少三票 PMC 赞同票并且赞同票多余反对票的时候,代表投票通过。投票的结果将由 RM (发布经理,Release Manager)汇总。RM 会通过邮件公布结果。 -#### 5.4作为新手 RM,是否有详细的实践指导? +### 作为新手 RM,是否有详细的实践指导? 对于新手的 RM (发布经理,Release Manager),我们提供了详细的实践指导。每一步需要做的事情做了详细的说明,请查看博客《版本发布流程》。 \ No newline at end of file diff --git a/docs/zh/Development/HowToJoin.md b/docs/zh/Development/HowToJoin.md index f75a4374500b..d877a31540b4 100644 --- a/docs/zh/Development/HowToJoin.md +++ b/docs/zh/Development/HowToJoin.md @@ -41,7 +41,7 @@ ## 关注公众号 扫码关注官方公众号:Apache IoTDB -![IoTDB公众号二维码](https://img-blog.csdnimg.cn/907f9d614b2f47e3b0c66a7c53bcbd5d.png#pic_left) +![IoTDB公众号二维码](https://user-images.githubusercontent.com/7240743/98633970-73671c00-235d-11eb-9913-f38e570fcfc8.png) ## 长期事项 ### 学习IoTDB的基本使用 diff --git a/docs/zh/Development/HowtoContributeCode.md b/docs/zh/Development/HowtoContributeCode.md new file mode 100644 index 000000000000..424b55252bcb --- /dev/null +++ b/docs/zh/Development/HowtoContributeCode.md @@ -0,0 +1,154 @@ + + +# 技术贡献指南 +## 社区贡献流程概览 +Apache IoTDB 社区通过 JIRA 上的 issue 进行任务管理。 +Issue 的完整生命周期:创建 issue -> 认领 issue -> 提交 pr -> 审阅 pr -> 合并 pr -> 关闭 issue。 + +## 技术贡献规范 + + ### 创建 issue + 在 [ JIRA ](https://issues.apache.org/JIRA/projects/IOTDB/issues)上创建 issue 需要注意几个事项: + 1. 命名:争取采用清晰易懂的名字,如支持一种新的聚合查询功能(avg)、优化原始数据查询性能等。Issue 的名字之后会作为发版的 release note。 + + 2. 描述:新功能、优化需要描述具体希望做什么。 Bug 反馈需要描述环境、负载、现象描述(异常日志)、影响版本等,最好有复现方法。 + + ### 认领 issue + 在 JIRA 上认领 issue:分配给自己。建议添加一句评论:I'm doing this。避免与其他贡献者重复开发。 + 注:如果发现自己无法认领issue,则是因为自己的账户权限不够。 +遇到此情况,请向 dev@iotdb.apache.org 邮件列表发送邮件,标题为: [application] apply for permission to assign issues to XXX, 其中XXX是自己的JIRA用户名。 +### 提交 PR +#### 需提交的内容 +Issue 类型:New Feature + +1.提交中英文版本的用户手册和代码修改的 pr。 + +用户手册主要描述功能定义和使用方式,以便用户使用。用户手册建议包括:场景描述,配置方法,接口功能描述,使用示例。官网的用户手册内容放置在 apache/iotdb 仓库根目录下,英文版放在 docs/UserGuide ,中文版放在 docs/zh/UserGuide 。 +如果需要更新用户手册,包括新增或删除文档和修改文档名,需要在 master 分支的site/src/main/.vuepress/config.js 中做相应修改。 + +2.提交单元测试UT或集成测试IT + +需要增加单元测试UT 或集成测试IT,尽量覆盖多的用例。可以参考 xxTest(路径:iotdb/server/src/test/java/org/apache/iotdb/db/query/aggregation/), xxIT(路径:iotdb/integration/src/test/java/org/apache/iotdb/db/integration/)。 + +Issue 类型:Improvement + +提交代码和 UT,一般不需要修改用户手册。 +最好提交相关实验结果,其中包含量化的改进效果和带来的副作用。 + +Issue 类型:Bug + +需要编写能够复现此 bug 的 单元测试 UT 或集成测试 IT。 + +#### 代码管理 +分支管理: + +IoTDB 版本命名方式为:0.大版本.小版本。例如 0.12.4,12 就是大版本,4 是小版本。 + +master 分支作为当前主开发分支,对应下一个未发布的大版本,每个大版本发布时会切出一个单独的分支归档,如 0.12.x 系列版本的代码处于 rel/0.12 分支下。 + +后续如果发现并修复了某发布版本的 bug。对这些 bug 的修复都需要往大于等于该版本对应的归档分支提 pr。如一个关于 0.11.x 版本 bug 修复的 pr 需要同时向 rel/0.11、rel/0.12 和 master 分支提交。 + +代码格式化: +提交 PR 前需要使用 mvn spotless:apply 将代码格式化,再 commit,不然会导致 ci 代码格式化检查失败。 + +注意事项: + +iotdb-datanode.properties 和 IoTDBConfig 默认值需要保持一致。 + +如果需要对配置参数进行改动。以下文件需要同时修改: + 1. 配置文件:server/src/assembly/resources/conf/iotdb-datanode.properties + 2. 代码:IoTDBDescriptor、IoTDBConfig + 3. 文档:docs/UserGuide/Reference/Config-Manual.md、docs/zh/UserGuide/Reference/Config-Manual.md + +如果你想要在 IT 和 UT 文件中对配置参数进行修改,你需要在 @Before 修饰的方法里修改,并且在 @After 修饰的方法里重置,来避免对其他测试的影响。合并模块的参数统一放在CompactionConfigRestorer 文件里。 + +#### PR 命名 +命名方式:分支标签-Jira 标签-PR 名 + +示例: [To rel/0.12] [IoTDB-1907] implement customized sync process: sender + +分支标签 + +如果是向非 master 分支提 pr,如 rel/0.13 分支,需要在 pr 名写上 [To rel/0.13]。如果是指向master分支,则不需要写分支标签。 + +Jira 标签 + +以 JIRA 号开头,如:[IOTDB-1907] implement customized sync process: sender。这样创建 PR 后,机器人会将 PR 链接自动链到对应 issue 上。 + +如果创建 PR 时忘记添加 JIRA 号,或 JIRA 号不规范,则 PR 不会被自动链接到 Jira 上,需要先改正 PR 命名,并手动将 PR 链接贴到 issue 页面(通过留言或链接框)。 + +#### PR 描述 +通常 PR 名无法涵盖所有改动,需要添加具体描述,改动了哪些内容。对于较难理解的地方给予一定的解释。 + +修 bug 的 pr 需要描述 bug 出现的原因,以及解决方法,另外还需要描述UT/IT测试用例添加的情况和负面效果的描述。 + +#### 提交 PR 后 + +向邮件列表 dev@iotdb.apache.org 发送一封邮件,主要介绍 PR 的工作。重视每个审阅者的意见,一一回复,并对达成一致的建议进行修改。 + +### 审阅PR +主要关注以下事项: +1. PR命名是否规范,新功能和bug修复类型的pr是否带了JIRA 号。 +2. PR 描述是否清晰。 +3. 功能测试用例或性能测试报告是否附上。 +4. 新功能是否有用户手册。 +5. 尽量不夹带其他问题的代码修改,将不相关的修改拆分到其他PR。 + +代码审阅流程: + +1. 点击 PR 的 Files changed + +2. 对于有问题的行,移动到左侧,会出现加号,点击加号,然后评论,点击 Start a review,此时,所有的 Review 意见都会暂存起来,别人看不到。 +3. 所有评论加完后,需要点击 Review changes,选择你的意见,已经可以合并的选择 Approve,有 Bug 需要改的选择 Request changes 或者 Comment,不确定的选择 Comment。最后 Submit review 提交审阅意见,提 PR 的人才能看见此意见。 + +### 合并PR +确认所有审阅意见均已回复。 + +有1个以上 committer 的Approval。 + +选择 squash merge (当且仅当作者仅有一个提交记录,且记录的commitlog清晰,可选择rebase)。 + +到 JIRA 上关闭对应的 issue,标记修复或完成的版本【注意,解决或关闭 issue 都需要对 issue 添加 pr 或描述,通过 issue 要能够追踪这个任务的变动】。 + +## 如何写用户手册和设计文档 +官网的用户手册和其他文档都在 apache/iotdb 仓库中进行维护。 + +官网各个页面的索引项是在 master 分支的 site/src/main/.vuepress/config.js 文件维护的,用户手册的具体内容是在各个版本的分支维护的,如 0.12 版本的用户手册文档在 rel/0.12 中。 + +用户手册 + +主要描述功能定义和使用方式,以便用户使用。 +用户手册建议包括:场景描述,配置参数,接口功能描述,使用示例。用户手册建议包括:场景描述,配置参数,接口功能描述,使用示例。 + +设计文档 + +主要描述如何实现某个功能,包括代码模块组织及算法,以便开发者参与。 +设计文档建议包括如下内容:背景描述,设计目标,总体思路,主要模块,接口设计 + +### 如何修改用户手册 +贡献用户手册和贡献代码的流程是一样的,只是修改的文件不同。 +用户手册的英文版放在 docs/UserGuide , 中文版放在 docs/zh/UserGuide 下。 +如果需要更新 {master} 或 {rel/*} 分支的用户手册目录,包括新增或删除md文档、修改md文档名,需要在 master 分支的site/src/main/.vuepress/config.js 中做相应修改。 + +### 如何修改官网顶部导航栏 + +在 site/src/main/.vuepress/config.js 中搜索 nav (应有中英文两个地方),仿照现有的代码做相应修改。之后提交PR等待合并。需要新增的文档可以放进 docs 和 docs/zh 文件夹下。 \ No newline at end of file diff --git a/docs/zh/Download/README.md b/docs/zh/Download/README.md index 300683d58502..330d212fb3ee 100644 --- a/docs/zh/Download/README.md +++ b/docs/zh/Download/README.md @@ -83,7 +83,7 @@ ``` # Linux > sudo sysctl -w net.core.somaxconn=65535 - + # FreeBSD or Darwin > sudo sysctl -w kern.ipc.somaxconn=65535 ``` @@ -93,6 +93,14 @@ * 只需要下载新的小版本, 然后修改其配置文件,使其与原有版本的设置一致。 * 停掉旧版本进程,启动新版本即可。 +- 如何从 v0.13.x 升级到 v0.14.x? + + - **0.14 版本进行了 SQL 语法约定的改动(请参考用户手册语法约定章节),不兼容之处如下:** + - **不使用反引号引用的标识符不允许为纯数字,不使用反引号引用的标识符,只允许包含字母、中文字符、下划线。如果标识符中出现上述情况,请使用反引号将标识符括起。** + - **标识符不再支持使用单引号和双引号进行引用,请统一改为使用反引号引用。** + - **Session 接口中使用路径结点名时,写法需要与 SQL 语句中的一致。如路径结点为纯数字111,在 SQL 语句中需要使用反引号括起,写作\`111\`, 那么使用 Session 接口时,相应参数也需要写作\`111\`。** + - 为了保证 UDF 相关 API 的稳定性,0.14 版本中 UDF 相关 API 被独立为一个单独的 module,不再依赖 tsfile 包,已经实现的 UDF 需要改写代码,将 `TsDataType` 替换为 `Type`,将 `org.apache.iotdb.tsfile.utils.Binary` 替换成 `org.apache.iotdb.udf.api.type.Binary`,并重新进行打包装载流程。 + - 如何从 v0.12.x 升级到 v0.13.x? * 0.12 与 0.13 的数据文件格式兼容,但写前日志等格式不兼容,因此需要进行升级(但速度很快): * **在 0.12 的 CLI 中执行 `SET SYSTEM TO READONLY` 命令,并停止新数据的写入。** @@ -101,9 +109,13 @@ * 下载最新 0.13 版本,解压并修改配置文件,尤其是 **时间戳精度等首次启动后不可修改的配置参数 **。并将各数据目录都指向备份的或者 v0.12 原来使用的数据目录。 * 停止 v0.12 的实例,启动 v0.13 的实例。 * **执行上述步骤后, 请确认`data/system/schema/system.properties`文件中的 `iotdb_version` 是 `0.13.x`. -如果不是,请手动修改为`0.13.x`。** + 如果不是,请手动修改为`0.13.x`。** * __注意:0.13 的配置文件进行了较大改动,因此不要直接将 0.12 的配置文件用于 0.13__ - + * **0.13 进行了 SQL 语法的改动,不使用反引号括起的标识符中仅能包含如下字符,否则需要使用反引号括起。** + * **[0-9 a-z A-Z _ : @ # $ { }] (字母,数字,部分特殊字符)** + * **['\u2E80'..'\u9FFF'] (UNICODE 中文字符)** + * **0.13 中 `SELECT` 子句中路径结点名如果是纯数字,需要使用反引号引起,用于与表达式中的常数区分。如语句 "select 123 + \`123\` from root.sg",前一个123表示常数,后一个 \`123\`会和 root.sg 拼接,表示路径 root.sg.\`123\`。** + - 如何从 v0.11.x 或 v0.10.x 升级到 v0.12.x? * 从 0.11 或 0.10 升级到 0.12 的过程与 v0.9 升级到 v0.10 类似,升级工具会自动进行数据文件的升级。 * **停掉旧版本新数据写入。** @@ -115,7 +127,7 @@ * __注意 1:0.12 的配置文件进行了较大改动,因此不要直接将原本的配置文件用于 0.12__ * __注意 2: 由于 0.12 不支持从 0.9 或者更低版本升级,如果需要升级,请先升级到 0.10 版本__ * __注意 3: 在文件升级完成前,最好不要进行 delete 操作。如果删除某个存储组内的数据且该存储组内存在待升级文件,删除会失败。__ - + - 如何从 v0.10.x 升级到 v0.11.x? * 0.10 与 0.11 的数据文件格式兼容,但写前日志等格式不兼容,因此需要进行升级(但速度很快): * **停掉 0.10 的新数据写入。** @@ -136,7 +148,7 @@ * 我们推荐提前备份数据文件(以及写前日志和 mlog 文件),以备回滚。 * 下载最新版,解压并修改配置文件。将各数据目录都指向备份的或者 v0.8 原来使用的数据目录。 * 停止 v0.8 的实例,启动 v0.9.x 的实例。IoTDB 将自动升级数据文件格式。 - + # 所有版本 diff --git a/docs/zh/UserGuide/API/Programming-Java-Native-API.md b/docs/zh/UserGuide/API/Programming-Java-Native-API.md index 4eb1492d7f64..94c1ed40f2e8 100644 --- a/docs/zh/UserGuide/API/Programming-Java-Native-API.md +++ b/docs/zh/UserGuide/API/Programming-Java-Native-API.md @@ -52,11 +52,9 @@ mvn clean install -pl session -am -Dmaven.test.skip=true - 对于 IoTDB-SQL 接口:传入的 SQL 参数需要符合 [语法规范](../Reference/Syntax-Conventions.md) ,并且针对 JAVA 字符串进行反转义,如双引号前需要加反斜杠。(即:经 JAVA 转义之后与命令行执行的 SQL 语句一致。) - 对于其他接口: - - 经参数传入的路径或路径前缀中的节点: - - 在 SQL 语句中需要使用反引号(`)进行转义的,此处均不需要进行转义。 - - 使用单引号或双引号括起的节点,仍需要使用单引号或双引号括起,并且要针对 JAVA 字符串进行反转义。 - - 对于 `checkTimeseriesExists` 接口,由于内部调用了 IoTDB-SQL 接口,因此需要和 SQL 语法规范保持一致,并且针对 JAVA 字符串进行反转义。 - - 经参数传入的标识符(如模板名):在 SQL 语句中需要使用反引号(`)进行转义的,此处均不需要进行转义。 + - 经参数传入的路径或路径前缀中的节点: 在 SQL 语句中需要使用反引号(`)进行转义的,此处均需要进行转义。 + - 经参数传入的标识符(如模板名):在 SQL 语句中需要使用反引号(`)进行转义的,均可以不用进行转义。 + - 语法说明相关代码示例可以参考:`example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` ## 基本接口说明 diff --git a/docs/zh/UserGuide/API/Programming-MQTT.md b/docs/zh/UserGuide/API/Programming-MQTT.md index 78019546f26b..098fc3d961b4 100644 --- a/docs/zh/UserGuide/API/Programming-MQTT.md +++ b/docs/zh/UserGuide/API/Programming-MQTT.md @@ -36,7 +36,7 @@ IoTDB 服务器包括内置的 MQTT 服务,该服务允许远程设备将消 内置的 MQTT 服务提供了通过 MQTT 直接连接到 IoTDB 的能力。 它侦听来自 MQTT 客户端的发布消息,然后立即将数据写入存储。 MQTT 主题与 IoTDB 时间序列相对应。 消息有效载荷可以由 Java SPI 加载的`PayloadFormatter`格式化为事件,默认实现为`JSONPayloadFormatter` - 默认的`json`格式化程序支持两种 json 格式,以下是 MQTT 消息有效负载示例: + 默认的`json`格式化程序支持两种 json 格式以及由他们组成的json数组,以下是 MQTT 消息有效负载示例: ```json { @@ -48,13 +48,14 @@ MQTT 主题与 IoTDB 时间序列相对应。 ``` 或者 ```json -{ + { "device":"root.sg.d1", "timestamps":[1586076045524,1586076065526], "measurements":["s1","s2"], "values":[[0.530635,0.530635], [0.530655,0.530695]] - } + } ``` +或者以上两者的JSON数组形式。 @@ -155,7 +156,7 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter { @Override public String getName() { - // set the value of mqtt_payload_formatter in iotdb-engine.properties as the following string: + // set the value of mqtt_payload_formatter in iotdb-datanode.properties as the following string: return "CustomizedJson"; } } @@ -168,8 +169,8 @@ public class CustomizedJsonPayloadFormatter implements PayloadFormatter { 在 IoTDB 服务端: * 创建 ${IOTDB_HOME}/ext/mqtt/ 文件夹, 将刚才的 jar 包放入此文件夹。 -* 打开 MQTT 服务参数. (`enable_mqtt_service=true` in `conf/iotdb-engine.properties`) -* 用刚才的实现类中的 getName() 方法的返回值 设置为 `conf/iotdb-engine.properties` 中 `mqtt_payload_formatter` 的值, +* 打开 MQTT 服务参数. (`enable_mqtt_service=true` in `conf/iotdb-datanode.properties`) +* 用刚才的实现类中的 getName() 方法的返回值 设置为 `conf/iotdb-datanode.properties` 中 `mqtt_payload_formatter` 的值, , 在本例中,为 `CustomizedJson` * 启动 IoTDB * 搞定. diff --git a/docs/zh/UserGuide/API/Programming-Python-Native-API.md b/docs/zh/UserGuide/API/Programming-Python-Native-API.md index 55fc59d031da..9f385081bd6d 100644 --- a/docs/zh/UserGuide/API/Programming-Python-Native-API.md +++ b/docs/zh/UserGuide/API/Programming-Python-Native-API.md @@ -246,6 +246,99 @@ session.execute_query_statement(sql) session.execute_non_query_statement(sql) ``` +* 执行语句 + +```python +session.execute_statement(sql) +``` + + +### 元数据模版接口 +#### 构建元数据模版 +1. 首先构建Template类 +2. 添加子节点,可以选择InternalNode或MeasurementNode +3. 调用创建元数据模版接口 + +```python +template = Template(name=template_name, share_time=True) + +i_node_gps = InternalNode(name="GPS", share_time=False) +i_node_v = InternalNode(name="vehicle", share_time=True) +m_node_x = MeasurementNode("x", TSDataType.FLOAT, TSEncoding.RLE, Compressor.SNAPPY) + +i_node_gps.add_child(m_node_x) +i_node_v.add_child(m_node_x) + +template.add_template(i_node_gps) +template.add_template(i_node_v) +template.add_template(m_node_x) + +session.create_schema_template(template) +``` +#### 修改模版节点信息 +修改模版节点,其中修改的模版必须已经被创建。以下函数能够在已经存在的模版中增加或者删除物理量 +* 在模版中增加实体 +```python +session.add_measurements_in_template(template_name, measurements_path, data_types, encodings, compressors, is_aligned) +``` + +* 在模版中删除物理量 +```python +session.delete_node_in_template(template_name, path) +``` + +#### 挂载元数据模板 +```python +session.set_schema_template(template_name, prefix_path) +``` + +#### 卸载元数据模版 +```python +session.unset_schema_template(template_name, prefix_path) +``` + +#### 查看元数据模版 +* 查看所有的元数据模版 +```python +session.show_all_templates() +``` +* 查看元数据模版中的物理量个数 +```python +session.count_measurements_in_template(template_name) +``` + +* 判断某个节点是否为物理量,该节点必须已经在元数据模版中 +```python +session.count_measurements_in_template(template_name, path) +``` + +* 判断某个路径是否在元数据模版中,这个路径有可能不在元数据模版中 +```python +session.is_path_exist_in_template(template_name, path) +``` + +* 查看某个元数据模板下的物理量 +```python +session.show_measurements_in_template(template_name) +``` + +* 查看挂载了某个元数据模板的路径前缀 +```python +session.show_paths_template_set_on(template_name) +``` + +* 查看使用了某个元数据模板(即序列已创建)的路径前缀 +```python +session.show_paths_template_using_on(template_name) +``` + +#### 删除元数据模版 +删除已经存在的元数据模版,不支持删除已经挂载的模版 +```python +session.drop_schema_template("template_python") +``` + + ### 对 Pandas 的支持 我们支持将查询结果轻松地转换为 [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)。 @@ -294,6 +387,145 @@ class MyTestCase(unittest.TestCase): 默认情况下,它会拉取最新的 IoTDB 镜像 `apache/iotdb:latest`进行测试,如果您想指定待测 IoTDB 的版本,您只需要将版本信息像这样声明:`IoTDBContainer("apache/iotdb:0.12.0")`,此时,您就会得到一个`0.12.0`版本的 IoTDB 实例。 +### IoTDB DBAPI + +IoTDB DBAPI 遵循 Python DB API 2.0 规范 (https://peps.python.org/pep-0249/),实现了通过Python语言访问数据库的通用接口。 + +#### 例子 ++ 初始化 + +初始化的参数与Session部分保持一致(sqlalchemy_mode参数除外,该参数仅在SQLAlchemy方言中使用) +```python +from iotdb.dbapi import connect + +ip = "127.0.0.1" +port_ = "6667" +username_ = "root" +password_ = "root" +conn = connect(ip, port_, username_, password_,fetch_size=1024,zone_id="UTC+8",sqlalchemy_mode=False) +cursor = conn.cursor() +``` ++ 执行简单的SQL语句 +```python +cursor.execute("SELECT ** FROM root") +for row in cursor.fetchall(): + print(row) +``` + ++ 执行带有参数的SQL语句 + +IoTDB DBAPI 支持pyformat风格的参数 +```python +cursor.execute("SELECT ** FROM root WHERE time < %(time)s",{"time":"2017-11-01T00:08:00.000"}) +for row in cursor.fetchall(): + print(row) +``` + ++ 批量执行带有参数的SQL语句 +```python +seq_of_parameters = [ + {"timestamp": 1, "temperature": 1}, + {"timestamp": 2, "temperature": 2}, + {"timestamp": 3, "temperature": 3}, + {"timestamp": 4, "temperature": 4}, + {"timestamp": 5, "temperature": 5}, +] +sql = "insert into root.cursor(timestamp,temperature) values(%(timestamp)s,%(temperature)s)" +cursor.executemany(sql,seq_of_parameters) +``` + ++ 关闭连接 +```python +cursor.close() +conn.close() +``` + +### IoTDB SQLAlchemy Dialect(实验性) +IoTDB的SQLAlchemy方言主要是为了适配Apache superset而编写的,该部分仍在完善中,请勿在生产环境中使用! +#### 元数据模型映射 +SQLAlchemy 所使用的数据模型为关系数据模型,这种数据模型通过表格来描述不同实体之间的关系。 +而 IoTDB 的数据模型为层次数据模型,通过树状结构来对数据进行组织。 +为了使 IoTDB 能够适配 SQLAlchemy 的方言,需要对 IoTDB 中原有的数据模型进行重新组织, +把 IoTDB 的数据模型转换成 SQLAlchemy 的数据模型。 + +IoTDB 中的元数据有: + +1. Storage Group:存储组 +2. Path:存储路径 +3. Entity:实体 +4. Measurement:物理量 + +SQLAlchemy 中的元数据有: +1. Schema:数据模式 +2. Table:数据表 +3. Column:数据列 + +它们之间的映射关系为: + +| SQLAlchemy中的元数据 | IoTDB中对应的元数据 | +| -------------------- | ---------------------------------------------- | +| Schema | Storage Group | +| Table | Path ( from storage group to entity ) + Entity | +| Column | Measurement | + +下图更加清晰的展示了二者的映射关系: + +![sqlalchemy-to-iotdb](https://github.com/apache/iotdb-bin-resources/blob/main/docs/UserGuide/API/IoTDB-SQLAlchemy/sqlalchemy-to-iotdb.png?raw=true) + +#### 数据类型映射 +| IoTDB 中的数据类型 | SQLAlchemy 中的数据类型 | +|--------------|-------------------| +| BOOLEAN | Boolean | +| INT32 | Integer | +| INT64 | BigInteger | +| FLOAT | Float | +| DOUBLE | Float | +| TEXT | Text | +| LONG | BigInteger | +#### Example + ++ 执行语句 + +```python +from sqlalchemy import create_engine + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") +connect = engine.connect() +result = connect.execute("SELECT ** FROM root") +for row in result.fetchall(): + print(row) +``` + ++ ORM (目前只支持简单的查询) + +```python +from sqlalchemy import create_engine, Column, Float, BigInteger, MetaData +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker + +metadata = MetaData( + schema='root.factory' +) +Base = declarative_base(metadata=metadata) + + +class Device(Base): + __tablename__ = "room2.device1" + Time = Column(BigInteger, primary_key=True) + temperature = Column(Float) + status = Column(Float) + + +engine = create_engine("iotdb://root:root@127.0.0.1:6667") + +DbSession = sessionmaker(bind=engine) +session = DbSession() + +res = session.query(Device.status).filter(Device.temperature > 1) + +for row in res: + print(row) +``` ## 给开发人员 diff --git a/docs/zh/UserGuide/Administration-Management/Administration.md b/docs/zh/UserGuide/Administration-Management/Administration.md index 74a46ea08ac5..c51787f1a17e 100644 --- a/docs/zh/UserGuide/Administration-Management/Administration.md +++ b/docs/zh/UserGuide/Administration-Management/Administration.md @@ -49,11 +49,11 @@ IoTDB 为用户提供了权限管理操作,从而为用户提供对于数据 ### 创建用户 -使用 `CREATE USER ` 创建用户。例如,我们可以使用具有所有权限的root用户为 ln 和 sgcc 集团创建两个用户角色,名为 ln_write_user, sgcc_write_user,密码均为 write_pwd。SQL 语句为: +使用 `CREATE USER ` 创建用户。例如,我们可以使用具有所有权限的root用户为 ln 和 sgcc 集团创建两个用户角色,名为 ln_write_user, sgcc_write_user,密码均为 write_pwd。建议使用反引号(`)包裹用户名。SQL 语句为: ``` -CREATE USER ln_write_user 'write_pwd' -CREATE USER sgcc_write_user 'write_pwd' +CREATE USER `ln_write_user` 'write_pwd' +CREATE USER `sgcc_write_user` 'write_pwd' ``` 此时使用展示用户的 SQL 语句: @@ -63,9 +63,9 @@ LIST USER 我们可以看到这两个已经被创建的用户,结果如下: ``` -IoTDB> CREATE USER ln_write_user 'write_pwd' +IoTDB> CREATE USER `ln_write_user` 'write_pwd' Msg: The statement is executed successfully. -IoTDB> CREATE USER sgcc_write_user 'write_pwd' +IoTDB> CREATE USER `sgcc_write_user` 'write_pwd' Msg: The statement is executed successfully. IoTDB> LIST USER +---------------+ @@ -95,18 +95,21 @@ Msg: 602: No permissions for this operation INSERT 现在,我们用root用户分别赋予他们向对应存储组数据的写入权限. -我们使用 `GRANT USER PRIVILEGES ON ` 语句赋予用户权限,例如: +我们使用 `GRANT USER PRIVILEGES ON ` 语句赋予用户权限(注:其中,创建用户权限无需指定路径),例如: ``` -GRANT USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** -GRANT USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +GRANT USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +GRANT USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +GRANT USER `ln_write_user` PRIVILEGES CREATE_USER ``` 执行状态如下所示: ``` -IoTDB> GRANT USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** +IoTDB> GRANT USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** Msg: The statement is executed successfully. -IoTDB> GRANT USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +IoTDB> GRANT USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +Msg: The statement is executed successfully. +IoTDB> GRANT USER `ln_write_user` PRIVILEGES CREATE_USER Msg: The statement is executed successfully. ``` @@ -118,19 +121,22 @@ Msg: The statement is executed successfully. ### 撤销用户权限 -授予用户权限后,我们可以使用 `REVOKE USER PRIVILEGES ON ` 来撤销已授予的用户权限。例如,用root用户撤销ln_write_user和sgcc_write_user的权限: +授予用户权限后,我们可以使用 `REVOKE USER PRIVILEGES ON ` 来撤销已授予的用户权限(注:其中,撤销创建用户权限无需指定路径)。例如,用root用户撤销ln_write_user和sgcc_write_user的权限: ``` -REVOKE USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** -REVOKE USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +REVOKE USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES CREATE_USER ``` 执行状态如下所示: ``` -REVOKE USER ln_write_user PRIVILEGES INSERT_TIMESERIES on root.ln.** +REVOKE USER `ln_write_user` PRIVILEGES INSERT_TIMESERIES on root.ln.** +Msg: The statement is executed successfully. +REVOKE USER `sgcc_write_user` PRIVILEGES INSERT_TIMESERIES on root.sgcc.** Msg: The statement is executed successfully. -REVOKE USER sgcc_write_user PRIVILEGES INSERT_TIMESERIES on root.sgcc.** +REVOKE USER `ln_write_user` PRIVILEGES CREATE_USER Msg: The statement is executed successfully. ``` @@ -148,70 +154,98 @@ Msg: 602: No permissions for this operation INSERT ``` CREATE USER ; -Eg: IoTDB > CREATE USER thulab 'passwd'; +Eg: IoTDB > CREATE USER `thulab` 'passwd'; ``` * 删除用户 ``` DROP USER ; -Eg: IoTDB > DROP USER xiaoming; +Eg: IoTDB > DROP USER `xiaoming`; ``` * 创建角色 ``` CREATE ROLE ; -Eg: IoTDB > CREATE ROLE admin; +Eg: IoTDB > CREATE ROLE `admin`; ``` * 删除角色 ``` DROP ROLE ; -Eg: IoTDB > DROP ROLE admin; +Eg: IoTDB > DROP ROLE `admin`; ``` * 赋予用户权限 ``` GRANT USER PRIVILEGES ON ; -Eg: IoTDB > GRANT USER tempuser PRIVILEGES DELETE_TIMESERIES on root.ln.**; +Eg: IoTDB > GRANT USER `tempuser` PRIVILEGES DELETE_TIMESERIES on root.ln.**; +``` + +- 赋予用户全部的权限 + +``` +GRANT USER PRIVILEGES ALL ON ; +Eg: IoTDB > grant user renyuhua privileges all on root.** ``` * 赋予角色权限 ``` GRANT ROLE PRIVILEGES ON ; -Eg: IoTDB > GRANT ROLE temprole PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +Eg: IoTDB > GRANT ROLE `temprole` PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +``` + +- 赋予角色全部的权限 + +``` +GRANT ROLE PRIVILEGES ALL ON ; +Eg: IoTDB > GRANT ROLE `temprole` PRIVILEGES ALL ON root.ln.**; ``` * 赋予用户角色 ``` GRANT TO ; -Eg: IoTDB > GRANT temprole TO tempuser; +Eg: IoTDB > GRANT `temprole` TO tempuser; ``` * 撤销用户权限 ``` REVOKE USER PRIVILEGES ON ; -Eg: IoTDB > REVOKE USER tempuser PRIVILEGES DELETE_TIMESERIES on root.ln.**; +Eg: IoTDB > REVOKE USER `tempuser` PRIVILEGES DELETE_TIMESERIES on root.ln.**; +``` + +- 移除用户所有权限 + +``` +REVOKE USER PRIVILEGES ALL ON ; +Eg: IoTDB > REVOKE USER `tempuser` PRIVILEGES ALL on root.ln.**; ``` * 撤销角色权限 ``` REVOKE ROLE PRIVILEGES ON ; -Eg: IoTDB > REVOKE ROLE temprole PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +Eg: IoTDB > REVOKE ROLE `temprole` PRIVILEGES DELETE_TIMESERIES ON root.ln.**; +``` + +- 撤销角色全部的权限 + +``` +REVOKE ROLE PRIVILEGES ALL ON ; +Eg: IoTDB > REVOKE ROLE `temprole` PRIVILEGES ALL ON root.ln.**; ``` * 撤销用户角色 ``` REVOKE FROM ; -Eg: IoTDB > REVOKE temprole FROM tempuser; +Eg: IoTDB > REVOKE `temprole` FROM tempuser; ``` * 列出用户 @@ -232,49 +266,49 @@ Eg: IoTDB > LIST ROLE ``` LIST PRIVILEGES USER ON ; -Eg: IoTDB > LIST PRIVILEGES USER sgcc_write_user ON root.sgcc.**; +Eg: IoTDB > LIST PRIVILEGES USER `sgcc_write_user` ON root.sgcc.**; ``` * 列出角色权限 ``` LIST ROLE PRIVILEGES -Eg: IoTDB > LIST ROLE PRIVILEGES actor; +Eg: IoTDB > LIST ROLE PRIVILEGES `actor`; ``` * 列出角色在具体路径上的权限 ``` LIST PRIVILEGES ROLE ON ; -Eg: IoTDB > LIST PRIVILEGES ROLE write_role ON root.sgcc.**; +Eg: IoTDB > LIST PRIVILEGES ROLE `write_role` ON root.sgcc.**; ``` * 列出用户权限 ``` LIST USER PRIVILEGES ; -Eg: IoTDB > LIST USER PRIVILEGES tempuser; +Eg: IoTDB > LIST USER PRIVILEGES `tempuser`; ``` * 列出用户所有的角色 ``` LIST ALL ROLE OF USER ; -Eg: IoTDB > LIST ALL ROLE OF USER tempuser; +Eg: IoTDB > LIST ALL ROLE OF USER `tempuser`; ``` * 列出所有用户的角色 ``` LIST ALL USER OF ROLE ; -Eg: IoTDB > LIST ALL USER OF ROLE roleuser; +Eg: IoTDB > LIST ALL USER OF ROLE `roleuser`; ``` * 更新密码 ``` ALTER USER SET PASSWORD ; -Eg: IoTDB > ALTER USER tempuser SET PASSWORD 'newpwd'; +Eg: IoTDB > ALTER USER `tempuser` SET PASSWORD 'newpwd'; ``` @@ -294,36 +328,51 @@ Eg: IoTDB > ALTER USER tempuser SET PASSWORD 'newpwd'; **系统所含权限列表** -|权限名称|说明| -|:---|:---| -|SET\_STORAGE\_GROUP|创建存储组。包含设置存储组的权限。路径相关| -|DELETE\_STORAGE\_GROUP|删除存储组。路径相关| -|CREATE\_TIMESERIES|创建时间序列。路径相关| -|INSERT\_TIMESERIES|插入数据。路径相关| -|READ\_TIMESERIES|查询数据。路径相关| -|DELETE\_TIMESERIES|删除数据或时间序列。路径相关| -|DELETE\_STORAGE\_GROUP|删除存储组。路径相关| -|CREATE\_USER|创建用户。路径无关| -|DELETE\_USER|删除用户。路径无关| -|MODIFY\_PASSWORD|修改所有用户的密码。路径无关。(没有该权限者仍然能够修改自己的密码。)| -|LIST\_USER|列出所有用户,列出某用户权限,列出某用户具有的角色以及列出所有用户的角色四种操作的权限。路径无关| -|GRANT\_USER\_PRIVILEGE|赋予用户权限。路径无关| -|REVOKE\_USER\_PRIVILEGE|撤销用户权限。路径无关| -|GRANT\_USER\_ROLE|赋予用户角色。路径无关| -|REVOKE\_USER\_ROLE|撤销用户角色。路径无关| -|CREATE\_ROLE|创建角色。路径无关| -|DELETE\_ROLE|删除角色。路径无关| -|LIST\_ROLE|列出所有角色,列出某角色拥有的权限,列出拥有某角色的所有用户三种操作的权限。路径无关| -|GRANT\_ROLE\_PRIVILEGE|赋予角色权限。路径无关| -|REVOKE\_ROLE\_PRIVILEGE|撤销角色权限。路径无关| -|CREATE_FUNCTION|注册 UDF。路径无关| -|DROP_FUNCTION|卸载 UDF。路径无关| -|CREATE_TRIGGER|创建触发器。路径相关| -|DROP_TRIGGER|卸载触发器。路径相关| -|START_TRIGGER|启动触发器。路径相关| -|STOP_TRIGGER|停止触发器。路径相关| -|CREATE_CONTINUOUS_QUERY|创建连续查询。路径无关| -|DROP_CONTINUOUS_QUERY|卸载连续查询。路径无关| +|权限名称|说明|示例| +|:---|:---|----| +|SET\_STORAGE\_GROUP|创建存储组。包含设置存储组的权限。路径相关|Eg: `set storage group to root.ln;`| +|DELETE\_STORAGE\_GROUP|删除存储组。路径相关|Eg: `delete storage group root.ln;`| +|CREATE\_TIMESERIES|创建时间序列。路径相关|Eg1: 创建时间序列
`create timeseries root.ln.wf02.status with datatype=BOOLEAN,encoding=PLAIN;`
Eg2: 创建对齐时间序列
`create aligned timeseries root.ln.device1(latitude FLOAT encoding=PLAIN compressor=SNAPPY, longitude FLOAT encoding=PLAIN compressor=SNAPPY);`| +|INSERT\_TIMESERIES|插入数据。路径相关|Eg1: `insert into root.ln.wf02(timestamp,status) values(1,true);`
Eg2: `insert into root.sg1.d1(time, s1, s2) aligned values(1, 1, 1)`| +|READ\_TIMESERIES|查询数据。路径相关|Eg1: `show storage group;`
Eg2: `show child paths root.ln, show child nodes root.ln;`
Eg3: `show devices;`
Eg4: `show timeseries root.**;`
Eg5: `show schema templates;`
Eg6: `show all ttl`
Eg7: [数据查询](../Query-Data/Overview.md)(这一节之下的查询语句均使用该权限)
Eg8: CVS格式数据导出
`./export-csv.bat -h 127.0.0.1 -p 6667 -u tempuser -pw root -td ./`
Eg9: 查询性能追踪
`tracing select * from root`
Eg10: UDF查询
`select example(*) from root.sg.d1`
Eg11: 查询触发器
`show triggers`| +|DELETE\_TIMESERIES|删除数据或时间序列。路径相关|Eg1: 删除时间序列
`delete timeseries root.ln.wf01.wt01.status`
Eg2: 删除数据
`delete from root.ln.wf02.wt02.status where time < 10`| +|CREATE\_USER|创建用户。路径无关|Eg: `create user thulab 'passwd';`| +|DELETE\_USER|删除用户。路径无关|Eg: `drop user xiaoming;`| +|MODIFY\_PASSWORD|修改所有用户的密码。路径无关。(没有该权限者仍然能够修改自己的密码。)|Eg: `alter user tempuser SET PASSWORD 'newpwd';`| +|LIST\_USER|列出所有用户,列出某用户权限,列出某用户具有的角色以及列出所有用户的角色四种操作的权限。路径无关|Eg1: `list user;`
Eg2: `list privileges user 'admin' on root.sgcc.**;`
Eg3: `list user privileges admin;`
Eg4: `list all user of role 'admin';`| +|GRANT\_USER\_PRIVILEGE|赋予用户权限。路径无关|Eg: `grant user tempuser privileges DELETE_TIMESERIES on root.ln.**;`| +|REVOKE\_USER\_PRIVILEGE|撤销用户权限。路径无关|Eg: `revoke user tempuser privileges DELETE_TIMESERIES on root.ln.**;`| +|GRANT\_USER\_ROLE|赋予用户角色。路径无关|Eg: `grant temprole to tempuser;`| +|REVOKE\_USER\_ROLE|撤销用户角色。路径无关|Eg: `revoke temprole from tempuser;`| +|CREATE\_ROLE|创建角色。路径无关|Eg: `create role admin;`| +|DELETE\_ROLE|删除角色。路径无关|Eg: `drop role admin;`| +|LIST\_ROLE|列出所有角色,列出某角色拥有的权限,列出拥有某角色的所有用户三种操作的权限。路径无关|Eg1: `list role`
Eg2: `list role privileges actor;`
Eg3: `list privileges role wirte_role ON root.sgcc;`
Eg4: `list all role of user admin;`| +|GRANT\_ROLE\_PRIVILEGE|赋予角色权限。路径无关|Eg: `grant role temprole privileges DELETE_TIMESERIES ON root.ln.**;`| +|REVOKE\_ROLE\_PRIVILEGE|撤销角色权限。路径无关|Eg: `revoke role temprole privileges DELETE_TIMESERIES ON root.ln.**;`| +|CREATE_FUNCTION|注册 UDF。路径无关|Eg: `create function example AS 'org.apache.iotdb.udf.UDTFExample';`| +|DROP_FUNCTION|卸载 UDF。路径无关|Eg: `drop function example`| +|CREATE_TRIGGER|创建触发器。路径相关|Eg1: `CREATE TRIGGER BEFORE INSERT ON AS `
Eg2: `CREATE TRIGGER AFTER INSERT ON AS `| +|DROP_TRIGGER|卸载触发器。路径相关|Eg: `drop trigger 'alert-listener-sg1d1s1'`| +|START_TRIGGER|启动触发器。路径相关|Eg: `start trigger lert-listener-sg1d1s1'`| +|STOP_TRIGGER|停止触发器。路径相关|Eg: `stop trigger 'alert-listener-sg1d1s1'`| +|CREATE_CONTINUOUS_QUERY|创建连续查询。路径无关|Eg: `select s1, s1 into t1, t2 from root.sg.d1`| +|DROP_CONTINUOUS_QUERY|卸载连续查询。路径无关|Eg1: `DROP CONTINUOUS QUERY cq3`
Eg2: `DROP CQ cq3`| + +注意: 下述sql语句需要赋予多个权限才可以使用: + +- 导入数据,需要赋予`READ_TIMESERIES`,`INSERT_TIMESERIES`两种权限。 + +``` +Eg: IoTDB > ./import-csv.bat -h 127.0.0.1 -p 6667 -u renyuhua -pw root -f dump0.csv +``` + +- 查询写回(SELECT_INTO) + - 需要所有 `select` 子句中源序列的 `READ_TIMESERIES` 权限 + - 需要所有 `into` 子句中目标序列 `INSERT_TIMESERIES` 权限 + +``` +Eg: IoTDB > select s1, s1 into t1, t2 from root.sg.d1 limit 5 offset 1000 +``` ### 用户名限制 @@ -339,4 +388,190 @@ IoTDB 规定角色名的字符长度不小于 4,其中角色名不能包含空 ### 权限管理中的路径模式 -一个路径模式的结果集包含了它的子模式的结果集的所有元素。例如,`root.sg.d.*`是`root.sg.*.*`的子模式,而`root.sg.**`不是`root.sg.*.*`的子模式。当用户被授予对某个路径模式的权限时,在他的DDL或DML中使用的模式必须是该路径模式的子模式,这保证了用户访问时间序列时不会超出他的权限范围。 \ No newline at end of file +一个路径模式的结果集包含了它的子模式的结果集的所有元素。例如,`root.sg.d.*`是`root.sg.*.*`的子模式,而`root.sg.**`不是`root.sg.*.*`的子模式。当用户被授予对某个路径模式的权限时,在他的DDL或DML中使用的模式必须是该路径模式的子模式,这保证了用户访问时间序列时不会超出他的权限范围。 + +### 权限缓存 + +在分布式相关的权限操作中,在进行除了创建用户和角色之外的其他权限更改操作时,都会先清除与该用户(角色)相关的所有的`dataNode`的缓存信息,如果任何一台`dataNode`缓存信息清楚失败,这个权限更改的任务就会失败。 + +### 非root用户限制进行的操作 + +目前以下IoTDB支持的sql语句只有`root`用户可以进行操作,且没有对应的权限可以赋予新用户。 + +###### TTL + +- 设置ttl + +``` +Eg: IoTDB > set ttl to root.ln 3600 +``` + +- 取消ttl + +``` +Eg: IoTDB > unset ttl to root.ln +``` + +###### 元数据模板 + +- 创建元数据模板 + +``` +Eg: IoTDB > create schema template t1 (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) +``` + +- 挂载元数据模板 + +``` +Eg: IoTDB > set schema template t1 to root.sg1.d1 +``` + +- 卸载元数据模板 + +``` +Eg: IoTDB > unset schema template t1 from root.sg1.d1 +``` + +- 删除元数据模板 + +``` +Eg: IoTDB > drop schema template t1 +``` + +###### 标签点管理 + +- 重命名标签或属性 + +```text +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +``` + +- 重新设置标签或属性的值 + +```text +ALTER timeseries root.turbine.d1.s1 SET newTag1=newV1, attr1=newV1 +``` + +- 删除已经存在的标签或属性 + +```text +ALTER timeseries root.turbine.d1.s1 DROP tag1, tag2 +``` + +- 添加新的标签 + +```text +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3=v3, tag4=v4 +``` + +- 添加新的属性 + +```text +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3=v3, attr4=v4 +``` + +- 更新插入别名,标签和属性 + +```text +ALTER timeseries root.turbine.d1.s1 UPSERT ALIAS=newAlias TAGS(tag2=newV2, tag3=v3) ATTRIBUTES(attr3=v3, attr4=v4) +``` + +###### TsFile管理 + +- 加载TsFile + +``` +Eg: IoTDB > load '/Users/Desktop/data/1575028885956-101-0.tsfile' +``` + +- 删除TsFile文件 + +``` +Eg: IoTDB > remove '/Users/Desktop/data/data/root.vehicle/0/0/1575028885956-101-0.tsfile' +``` + +- 卸载TsFile文件到指定目录 + +``` +Eg: IoTDB > unload '/Users/Desktop/data/data/root.vehicle/0/0/1575028885956-101-0.tsfile' '/data/data/tmp' +``` + +###### 统计 + +- 统计存储组/节点数/设备/时间序列 + +``` +Eg: IoTDB > count storage group +Eg: IoTDB > count nodes root.** LEVEL=2 +Eg: IoTDB > count devices root.ln.** +Eg: IoTDB > count timeseries root.** +``` + +###### 删除时间分区(实验性功能) + +- 删除时间分区(实验性功能) + +``` +Eg: IoTDB > DELETE PARTITION root.ln 0,1,2 +``` + +###### 连续查询 + +- 连续查询(CQ) + +``` +Eg: IoTDB > CREATE CONTINUOUS QUERY cq1 BEGIN SELECT max_value(temperature) INTO temperature_max FROM root.ln.*.* GROUP BY time(10s) END +``` + +###### 运维命令 + +- FLUSH + +``` +Eg: IoTDB > flush +``` + +- MERGE + +``` +Eg: IoTDB > MERGE +Eg: IoTDB > FULL MERGE +``` + +- CLEAR CACHE + +```sql +Eg: IoTDB > CLEAR CACHE +``` + +- SET STSTEM TO READONLY / WRITABLE + +``` +Eg: IoTDB > SET STSTEM TO READONLY / WRITABLE +``` + +- SCHEMA SNAPSHOT + +```sql +Eg: IoTDB > CREATE SNAPSHOT FOR SCHEMA +``` + +- 查询终止 + +``` +Eg: IoTDB > KILL QUERY 1 +``` + +###### 水印工具 + +- 为新用户施加水印 + +``` +Eg: IoTDB > grant watermark_embedding to Alice +``` + +- 撤销水印 + +``` +Eg: IoTDB > revoke watermark_embedding from Alice +``` diff --git a/docs/zh/UserGuide/Cluster/Cluster-Setup-Example.md b/docs/zh/UserGuide/Cluster/Cluster-Setup-Example.md index fa323625e05a..6bf0499b90fd 100644 --- a/docs/zh/UserGuide/Cluster/Cluster-Setup-Example.md +++ b/docs/zh/UserGuide/Cluster/Cluster-Setup-Example.md @@ -171,7 +171,7 @@ nohup ./sbin/start-node.sh >/dev/null 2>&1 &nohup ./sbin/start-node.sh ./node2_c 假设我们需要在三个物理节点上部署分布式 IoTDB,这三个节点分别为 A, B 和 C,其公网 ip 分别为 A\_public\_IP*, *B\_public\_IP*, and *C\_public\_IP*,私网 ip 分别为 *A\_private\_IP*, *B\_private\_IP*, and *C\_private\_IP*. -注:如果没有公网 ip 或者私网 ip 则两者**设置成一致**即可,只需要保证客户端能够访问到服务端即可。 私网ip对应iotdb-cluster.properties中的`internal_ip`配置项,公网ip对应iotdb-engine.properties中的`rpc_address`配置项。 +注:如果没有公网 ip 或者私网 ip 则两者**设置成一致**即可,只需要保证客户端能够访问到服务端即可。 私网ip对应iotdb-cluster.properties中的`internal_address`配置项,公网ip对应iotdb-engine.properties中的`rpc_address`配置项。 ### 配置 **节点A**: @@ -183,7 +183,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = A_private_Ip +internal_address = A_private_Ip ``` ***iotdb-engine.properties*** @@ -202,7 +202,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = B_private_Ip +internal_address = B_private_Ip ``` ***iotdb-engine.properties*** @@ -221,7 +221,7 @@ seed_nodes = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 default_replica_num = 3 internal_meta_port = 9003 internal_data_port = 40010 -internal_ip = C_private_Ip +internal_address = C_private_Ip ``` ***iotdb-engine.properties*** @@ -279,7 +279,7 @@ cd apache-iotdb-0.12.4-cluster-bin 设置 internal\_ip = 节点的私有ip (以192.168.1.1为例) ``` -sed -i -e 's/^internal_ip=127.0.0.1$/internal_ip=192.168.1.1/g' conf/iotdb-cluster.properties +sed -i -e 's/^internal_address=127.0.0.1$/internal_address=192.168.1.1/g' conf/iotdb-cluster.properties ``` 设置 seed\_node = A_private_Ip:9003,B_private_Ip:9003,C_private_Ip:9003 (三个节点ip分别为192.168.1.1,192.168.1.2,192.168.1.3为例) ``` diff --git a/docs/zh/UserGuide/Cluster/Cluster-Setup.md b/docs/zh/UserGuide/Cluster/Cluster-Setup.md index 6cd5b814c043..5d916d2fcec6 100644 --- a/docs/zh/UserGuide/Cluster/Cluster-Setup.md +++ b/docs/zh/UserGuide/Cluster/Cluster-Setup.md @@ -171,13 +171,13 @@ iotdb-engines.properties 配置文件中的部分内容会不再生效: ``` # Unix/OS X -> sbin/remove-node.sh +> sbin/remove-node.sh # Windows -> sbin\remove-node.bat +> sbin\remove-node.bat ``` -`internal_ip`表示待删除节点的 IP 地址 `internal_meta_port`表示待删除节点的 meta 服务端口 +`internal_address`表示待删除节点的 IP 地址 `internal_meta_port`表示待删除节点的 meta 服务端口 ### 使用 Cli 工具 @@ -201,11 +201,11 @@ iotdb-engines.properties 配置文件中的部分内容会不再生效: ### 集群配置项 -- internal_ip +- internal_address -| 名字 | internal_ip | +| 名字 | internal_address | | ------------ | ------------------------------------------------------------ | -| 描述 | IOTDB 集群各个节点之间内部通信的 IP 地址,比如心跳、snapshot 快照、raft log 等。**`internal_ip`是集群内部的私有ip** | +| 描述 | IOTDB 集群各个节点之间内部通信的 IP 地址,比如心跳、snapshot 快照、raft log 等。**`internal_address`是集群内部的私有ip** | | 类型 | String | | 默认值 | 127.0.0.1 | | 改后生效方式 | 重启服务生效,集群建立后不可再修改 | diff --git a/docs/zh/UserGuide/Data-Concept/Data-Type.md b/docs/zh/UserGuide/Data-Concept/Data-Type.md index 5b81baa08c9c..19b9af6f0fac 100644 --- a/docs/zh/UserGuide/Data-Concept/Data-Type.md +++ b/docs/zh/UserGuide/Data-Concept/Data-Type.md @@ -34,7 +34,7 @@ IoTDB 支持: 一共六种数据类型。 -其中 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 [RLE](Encoding.md) 或 [TS_2DIFF](Encoding.md) 可以指定 MAX_POINT_NUMBER,该项为浮点数的小数点后位数,若不指定则系统会根据配置文件`iotdb-engine.properties`文件中的 [float_precision 项](../Reference/Config-Manual.md) 配置。 +其中 **FLOAT** 与 **DOUBLE** 类型的序列,如果编码方式采用 [RLE](Encoding.md) 或 [TS_2DIFF](Encoding.md) 可以指定 MAX_POINT_NUMBER,该项为浮点数的小数点后位数,若不指定则系统会根据配置文件`iotdb-datanode.properties`文件中的 [float_precision 项](../Reference/Config-Manual.md) 配置。 当系统中用户输入的数据类型与该时间序列的数据类型不对应时,系统会提醒类型错误,如下所示,二阶差分编码不支持布尔类型: diff --git a/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md b/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md index 413bd3d3b77b..8a267f237c55 100644 --- a/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md +++ b/docs/zh/UserGuide/Data-Modeling/SchemaRegion-rocksdb.md @@ -34,7 +34,7 @@ mvn clean package -pl schema-engine-rocksdb -am -DskipTests 命令运行结束后,在其 target/schema-engine-rocksdb 中会有一个 lib 文件夹和 conf 文件夹。将 conf 文件夹下的文件拷贝到 server 的 conf 文件夹中,将 lib 文件夹下的文件也拷贝到 server 的 lib 的文件夹中。 -在系统配置文件`iotdb-engine.properties`中,将配置项`schema_engine_mode`修改为`Rocksdb_based`, 如: +在系统配置文件`iotdb-datanode.properties`中,将配置项`schema_engine_mode`修改为`Rocksdb_based`, 如: ``` #################### diff --git a/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md b/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md index f5c100c7e176..876875555f0e 100644 --- a/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md +++ b/docs/zh/UserGuide/Ecosystem Integration/Grafana Plugin.md @@ -114,19 +114,21 @@ git clone https://github.com/apache/iotdb.git #### grafana-plugin 插件安装 -* 拷贝上述生成的前端工程目标文件夹到 Grafana 的插件目录中 `${Grafana文件目录}\data\plugins\` - * Windows 系统,启动 Grafana 后会自动创建 `data\plugins` 目录 - * Linux 系统,plugins 目录需要手动创建 `/var/lib/grafana/plugins` - * MacOS,plugins 目录在`/usr/local/var/lib/grafana/plugins`(具体位置参看使用 `brew install`安装 Grafana 后的命令行输出提示) +* 拷贝上述生成的前端工程目标文件夹到 Grafana 的插件目录中 `${Grafana文件目录}\data\plugins\`。如果没有此目录可以手动建或者启动grafana会自动建立,当然也可以修改plugins的位置,具体请查看下面的修改Grafana 的插件目录位置说明。 * 修改Grafana的配置文件:找到配置文件(`${Grafana文件目录}\conf\defaults.ini`),并进行如下的修改: ```ini allow_loading_unsigned_plugins = iotdb ``` +* 修改Grafana 的插件目录位置:找到配置文件(`${Grafana文件目录}\conf\defaults.ini`),并进行如下的修改: + ```ini + plugins = data/plugins + ``` * 如果 Grafana 服务已启动,则需要重启服务。 +更多详情,请点 [这里](https://grafana.com/docs/grafana/latest/plugins/installation/) #### 启动 Grafana @@ -209,11 +211,11 @@ Ip 为您的 IoTDB 服务器所在的宿主机 IP,port 为 REST 服务的运 -Grafana Plugin 支持Raw和Aggregation 两种方式,默认是Raw方式。 +Grafana Plugin 支持SQL: Full Customized和SQL: Drop-down List 两种方式,默认是SQL: Full Customized方式。 -##### Raw 输入方式 +##### SQL: Full Customized 输入方式 在 SELECT 输入框、FROM 输入框、WHERE输入框、CONTROL输入框中输入内容,其中 WHERE 和 CONTROL 输入框为非必填。 @@ -249,14 +251,14 @@ CONTROL 输入框为非必须填写项目,填写内容应当是控制查询类 提示:为了避免OOM问题,不推荐使用select * from root.xx.** 这种语句在Grafana plugin中使用。 -##### Aggregation 输入方式 +##### SQL: Drop-down List 输入方式 在 TIME-SERIES 选择框中选择一条时间序列、FUNCTION 选择一个函数、SAMPLING INTERVAL、SLIDING STEP、LEVEL、FILL 输入框中输入内容,其中 TIME-SERIESL 为必填项其余为非必填项。 #### 变量与模板功能的支持 -Raw和Aggregation两种输入方式都支持 Grafana 的变量与模板功能,下面示例中使用Raw输入方式,Aggregation与之类似。 +SQL: Full Customized和SQL: Drop-down List两种输入方式都支持 Grafana 的变量与模板功能,下面示例中使用SQL: Full Customized输入方式,SQL: Drop-down List与之类似。 创建一个新的 Panel 后,点击右上角的设置按钮,如下图所示: diff --git a/docs/zh/UserGuide/Ecosystem Integration/Writing Data on HDFS.md b/docs/zh/UserGuide/Ecosystem Integration/Writing Data on HDFS.md index 741a038862f7..44dc57341f2a 100644 --- a/docs/zh/UserGuide/Ecosystem Integration/Writing Data on HDFS.md +++ b/docs/zh/UserGuide/Ecosystem Integration/Writing Data on HDFS.md @@ -41,7 +41,7 @@ 然后,将 Hadoop 模块的 target jar 包`hadoop-tsfile-X.X.X-jar-with-dependencies.jar`复制到 server 模块的 target lib 文件夹 `.../server/target/iotdb-server-X.X.X/lib`下。 -编辑`iotdb-engine.properties`中的用户配置。相关配置项包括: +编辑`iotdb-datanode.properties`中的用户配置。相关配置项包括: * tsfile\_storage\_fs diff --git a/docs/zh/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md b/docs/zh/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md index e137e4f268b7..1be2b134a58b 100644 --- a/docs/zh/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md +++ b/docs/zh/UserGuide/Integration-Test/Integration-Test-refactoring-tutorial.md @@ -19,145 +19,243 @@ --> -# IoTDB社区Integration Test改造说明 +# 集成测试开发者文档 -- 步骤0. 前提须知 - - **位置已移动**;所有的Integration Test已被移动至单独的integration模块。 - - **测试用例必须打分类标签**; `Category` 即测试分类标签,决定了该测试用例在哪套测试环境或流程中被测试。 - - **涉及测试环境的代码可能要重构**;决定了该测试用例是否能被当前测试环境正确测试,需要根据相应的环境重构相应的代码。 +**集成测试**是软件测试中的一个阶段。在该阶段中,各个软件模块被组合起来作为一个整体进行测试。进行集成测试是为了评估某系统或某组件是否符合指定的功能需求。 +## Apache IoTDB 集成测试规范 -- 步骤1. 测试用例打标签 +### Apache IoTDB 集成测试的环境 - - 在测试用例类或者测试用例方法前加上合适的`Category`,可以是任意期望的测试分类标签的集合。 +Apache IoTDB 集成测试的环境一共有3种,分别为**本地单机测试环境、本地集群测试环境和远程测试环境。** Apache IOTDB 的集群测试需要在其中的1种或多种环境下完成。对于这三类环境的说明如下: +1. 本地单机测试环境:该环境用于完成本地的 Apache IoTDB 单机版的集成测试。若需要变更该环境的具体配置,需要在 IoTDB 实例启动前替换相应的配置文件,再启动 IoTDB 并进行测试。 +2. 本地集群测试环境:该环境用于完成本地的 Apache IoTDB 分布式版(伪分布式)的集成测试。若需要变更该环境的具体配置,需要在 IoTDB 集群启动前替换相应的配置文件,再启动 IoTDB 集群并进行测试。 +3. 远程测试环境:该环境用于测试远程 Apache IoTDB 的功能,连接的 IoTDB 实例可能是一个单机版的实例,也可以是远程集群的某一个节点。远程测试环境的具体配置的修改受到限制,暂不支持在测试时修改。 +集成测试开发者在编写测试程序时需要指定这三种环境的1种或多种。具体指定方法见后文。 - - 真实样例,下面三个测试类的`Category`都是真实有效的, +### 黑盒测试 - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class, RemoteTest.class}) - public class IoTDBAliasIT { - ...... - } - - - @Category({LocalStandaloneTest.class, ClusterTest.class}) - public class IoTDBAlignByDeviceIT { - ...... - } - - - @Category({LocalStandaloneTest.class}) - public class IoTDBArithmeticIT { - ...... - } - ``` - - - 甚至,你还可以在测试方法级别加`Category`。 - - ```java - @Category({LocalStandaloneTest.class}) - public class IoTDBExampleIT { - - // This case can ONLY test in environment of local. - @Test - public void theStandaloneCase() { - ...... - } - - // This case can test in environment of local, cluster and remote. - @Test - @Category({ClusterTest.class, RemoteTest.class}) - public void theAllEnvCase() { - ...... - } - } - ``` - - - 目前,所有测试用例至少要加上单机测试的测试分类,即`LocalStandaloneTest.class`。 +**黑盒测试** 是一种软件测试方法,它检验程序的功能,而不考虑其内部结构或工作方式。开发者不需要了解待测程序的内部逻辑即可完成测试。**Apache IoTDB 的集成测试以黑盒测试的方式进行。通过 JDBC 或 Session API 的接口实现测试输入的用例即为黑盒测试用例。** 因此,测试用例的输出验证也应该通过 JDBC 或 Session API 的返回结果实现。 +### 集成测试的步骤 +集成测试的步骤主要分为三步,即 (1) 构建测试类和标注测试环境、(2) 设置测试前的准备工作以及测试后的清理工作以及 (3) 实现集成测试逻辑。如果需要测试非默认环境下的 IoTDB,还需要修改 IoTDB 的配置,修改方法对应小结的第4部分。 -- 步骤2. 环境代码重构 + - - 如果测试用例需要在Cluster或者Remote环境下被测试,那么必须对环境相关的代码作相应重构,如果是仅在LocalStandalone环境下测试,则只推荐修改。(不是所有的测试用例可以在Cluster或者Remote环境下被测试,因为受限于部分功能的语句比如本地文件操作,这些代码不能被重构。) +#### 1. 集成测试类和注解 - | | LocalStandalone | Cluster | Remote | - | :------------------------- | :-------------: | :---------: | :---------: | - | setUp and tearDown | Recommend | Must | Must | - | getConnection | Recommend | Must | Must | - | change config | Recommend | Must | Not support | - | Local file operation | Won't change | Not support | Not support | - | Local descriptor operation | Won't change | Not support | Not support | - | restart operation | Won't change | Not support | Not support | - - - - - `setUp` 和`tearDown` 方法内的重构,在Cluster和Remote环境下是必须更改的 - - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class, RemoteTest.class}) - public class IoTDBAliasIT { - - @BeforeClass - public static void setUp() throws Exception { - // EnvironmentUtils.closeStatMonitor(); // orginal setup code - // EnvironmentUtils.envSetUp(); // orginal setup code - EnvFactory.getEnv().initBeforeClass(); // new initBeforeClass code - - insertData(); - } - - @AfterClass - public static void tearDown() throws Exception { - // EnvironmentUtils.cleanEnv(); // orginal code - EnvFactory.getEnv().cleanAfterClass(); // new cleanAfterClass code - } +构建的集成测试类时,开发者需要在 Apache IoTDB 的 [integration-test](https://github.com/apache/iotdb/tree/master/integration-test) 模块中创建测试类。类名应当能够精简准确地表述该集成测试的目的。除用于服务其他测试用例的类外,含集成测试用例用于测试 Apache IoTDB 功能的类,应当命名为“功能+IT”。例如,用于测试IoTDB自动注册元数据功能的集成测试命名为“IoTDBAutoCreateSchemaIT”。 + +- Category 注解:**在构建集成测试类时,需要显式地通过引入```@Category```注明测试环境** ,测试环境用```LocalStandaloneIT.class```、```ClusterIT.class``` 和 ```RemoteIT.class```来表示,分别与“Apache IoTDB 集成测试的环境”中的本地单机测试环境、本地集群测试环境和远程测试环境对应。标签内是测试环境的集合,可以包含多个元素,表示在多种环境下分别测试。**一般情况下,标签```LocalStandaloneIT.class``` 和 ```ClusterIT.class``` 是必须添加的。** 当某些功能仅支持单机版 IoTDB 时可以只保留```LocalStandaloneIT.class```。 +- RunWith 注解: 每一个集成测试类上都需要添加 ```@RunWith(IoTDBTestRunner.class)``` 标签。 + +```java +// 给 IoTDBAliasIT 测试类加标签,分别在本地单机测试环境、 +// 本地集群测试环境和远程测试环境完成测试。 +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class, RemoteIT.class}) +public class IoTDBAliasIT { + ... +} + +// 给 IoTDBAlignByDeviceIT 测试类加标签,分别在本地单机 +// 测试环境和本地集群测试环境完成测试。 +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBAlignByDeviceIT { + ... +} +``` + +#### 2. 设置测试前的准备工作以及测试后的清理工作 + +测试前的准备工作包括启动 IoTDB(单机或集群)实例和测试用的数据准备。这些逻辑在setUp方法内实现。其中setUp方法前需要添加```@BeforeClass``` 或 ```@Before``` 标签,前者表示该方法为当前集成测试执行的第 1 个方法,并且在集成测试运行时只执行 1 次,后者表示在运行当前集成测试的每 1 个测试方法前,该方法都会被执行 1 次。 +- IoTDB 实例启动通过调用工厂类来实现,即```EnvFactory.getEnv().initBeforeClass()```。 +- 测试用的数据准备包括按测试需要提前注册存储组、注册时间序列、写入时间序列数据等。建议在测试类内实现单独的方法来准备数据,如insertData()。若需要写入多条数据,请使用批量写入的接口(JDBC中的executeBatch接口,或Session API 中的 insertRecords、insertTablets 等接口)。 + +```java +@BeforeClass +public static void setUp() throws Exception { + // 启动 IoTDB 实例 + EnvFactory.getEnv().initBeforeClass(); + ... // 准备数据 +} +``` + +测试后需要清理相关的环境,其中需要断开还没有关闭的连接。这些逻辑在 tearDown 方法内实现。其中 tearDown 方法前需要添加```@AfterClass``` 或 ```@After``` 标签,前者表示该方法为当前集成测试执行的最后一个方法,并且在集成测试运行时只执行 1 次,后者表示在运行当前集成测试的每一个测试方法后,该方法都会被执行 1 次。 +- 如果 IoTDB 连接以测试类成员变量的形式声明,并且在测试后没有断开连接,则需要在 tearDown 方法内显式断开。 +- IoTDB 环境的清理通过调用工厂类来实现,即```EnvFactory.getEnv().cleanAfterClass()```。 + +```java +@AfterClass +public static void tearDown() throws Exception { + ... // 断开连接等 + // 清理 IoTDB 实例的环境 + EnvFactory.getEnv().cleanAfterClass(); +} +``` + +#### 3. 实现集成测试逻辑 + +Apache IoTDB 的集成测试以黑盒测试的方式进行,测试方法的名称为“测试的功能点+Test”,例如“selectWithAliasTest”。测试通过 JDBC 或 Session API 的接口来完成。 + +1、使用JDBC接口 + +使用JDBC接口时,建议将连接建立在 try 语句内,以这种方式建立的连接无需在 tearDown 方法内关闭。连接需要通过工厂类来建立,即```EnvFactory.getEnv().getConnection()```,不要指定具体的 ip 地址或端口号。示例代码如下所示。 + +```java +@Test +public void someFunctionTest(){ + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + ... // 执行相应语句并做测试 + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); } - ``` - - +} +``` +注意: +- **查询操作必须使用```executeQuery()```方法,返回ResultSet;** 对于**更新数据库等无返回值的操作,必须使用```execute()```方法。** 示例代码如下。 - - `getConnection` 的重构,在Cluster和Remote环境下是必须更改 - - ```java - private static void insertData() throws ClassNotFoundException { - // Class.forName(Config.JDBC_DRIVER_NAME); // orginal connection code - // try (Connection connection = - // DriverManager.getConnection( - // Config.IOTDB_URL_PREFIX + "127.0.0.1:6667/", "root", "root"); - try (Connection connection = EnvFactory.getEnv().getConnection(); // new code - Statement statement = connection.createStatement()) { - - for (String sql : sqls) { - statement.execute(sql); - } - } catch (Exception e) { - e.printStackTrace(); - } +```java +@Test +public void exampleTest() throws Exception { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + // 使用 execute() 方法设置存储组 + statement.execute("set storage group to root.sg"); + // 使用 executeQuery() 方法查询存储组 + try (ResultSet resultSet = statement.executeQuery("show storage group")) { + if (resultSet.next()) { + String storageGroupPath = resultSet.getString("storage group"); + Assert.assertEquals("root.sg", storageGroupPath); + } else { + Assert.fail("This ResultSet is empty."); } - ``` - - - - - 更改配置的方法,在Cluster环境下是必须重构的。(由于目前Remote环境无法远程更改配置,更改配置的测试用例将不支持Remote环境下测试) - - - 在Cluster环境下,由于无法动态更改配置,只有环境init前的配置更改才有效。 - - 重构已包含了大部分的配置更改,通过`ConfigFactory.getConfig()` 的方法可以进行链式更改。 - - ```java - @Category({LocalStandaloneTest.class, ClusterTest.class}) - public class IoTDBCompleteIT { - private int prevVirtualStorageGroupNum; - - @Before - public void setUp() { - prevVirtualStorageGroupNum = - IoTDBDescriptor.getInstance().getConfig().getVirtualStorageGroupNum(); - // IoTDBDescriptor.getInstance().getConfig().setVirtualStorageGroupNum(16); // orginal code - ConfigFactory.getConfig().setVirtualStorageGroupNum(16); // new code - EnvFactory.getEnv().initBeforeClass(); - } - ``` - - - 若配置项尚未在`ConfigFactory.getConfig()` 的方法中包含,需要在BaseConfig.java接口文件中定义,在StandaloneEnvConfig.java和ClusterEnvConfig.java中分别实现,这部分不是很常用,具体方法可以参考已实现的部分,目前暂不列出。 + } + } +} +``` + +2、使用 Session API + +目前暂不支持使用 Session API 来做集成测试。 + +3、测试方法的环境标签 +对于测试方法,开发者也可以指定特定的测试环境,只需要在对应的测试方法前注明环境即可。值得注意的是,有额外测试环境标注的用例,不但会在所指定的环境中进行测试,还会在该用例隶属的测试类所对应的环境中进行测试。示例代码如下。 + + +```java +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class}) +public class IoTDBExampleIT { + + // 该用例只会在本地单机测试环境中进行测试 + @Test + public void theStandaloneCaseTest() { + ... + } + + // 该用例会在本地单机测试环境、本地集群测试环境和远程测试环境中进行测试 + @Test + @Category({ClusterIT.class, RemoteIT.class}) + public void theAllEnvCaseTest() { + ... + } +} +``` + +#### 4. 测试中 IoTDB 配置参数的修改 + +有时,为了测试 IoTDB 在特定配置条件下的功能需要更改其配置。由于远程的机器配置无法修改,因此,需要更改配置的测试不支持远程测试环境,只支持本地单机测试环境和本地集群测试环境。配置文件的修改需要在setUp方法中实现,在```EnvFactory.getEnv().initBeforeClass()```之前执行,应当使用 ConfigFactory 提供的方法来实现。在 tearDown 方法内,需要将 IoTDB 的配置恢复到原默认设置,这一步在环境清理(```EnvFactory.getEnv().cleanAfterTest()```)后通过调用ConfigFactory提供的方法来执行。实例代码如下。 + +```java +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBAlignedSeriesQueryIT { + + protected static boolean enableSeqSpaceCompaction; + protected static boolean enableUnseqSpaceCompaction; + protected static boolean enableCrossSpaceCompaction; + + @BeforeClass + public static void setUp() throws Exception { + // 获取默认配置 + enableSeqSpaceCompaction = ConfigFactory.getConfig().isEnableSeqSpaceCompaction(); + enableUnseqSpaceCompaction = ConfigFactory.getConfig().isEnableUnseqSpaceCompaction(); + enableCrossSpaceCompaction = ConfigFactory.getConfig().isEnableCrossSpaceCompaction(); + // 更新配置 + ConfigFactory.getConfig().setEnableSeqSpaceCompaction(false); + ConfigFactory.getConfig().setEnableUnseqSpaceCompaction(false); + ConfigFactory.getConfig().setEnableCrossSpaceCompaction(false); + EnvFactory.getEnv().initBeforeClass(); + AlignedWriteUtil.insertData(); + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanAfterClass(); + // 恢复为默认配置 + ConfigFactory.getConfig().setEnableSeqSpaceCompaction(enableSeqSpaceCompaction); + ConfigFactory.getConfig().setEnableUnseqSpaceCompaction(enableUnseqSpaceCompaction); + ConfigFactory.getConfig().setEnableCrossSpaceCompaction(enableCrossSpaceCompaction); + } +} +``` + +### 集成测试的启动命令 + +1、在本地集群测试环境下运行集成测试 + +```shell script +mvn clean verify \ + -Dsession.test.skip=true \ + -Diotdb.test.skip=true \ + -Dcluster.test.skip=true \ + -Dtsfile.test.skip=true \ + -Dcommons.test.skip=true \ + -Dconfignode.test.skip=true \ + -Dconsensus.test.skip=true \ + -Djdbc.test.skip=true \ + -Dmetrics.test.skip=true \ + -pl integration-test -am -PClusterIT +``` +2、在本地单机测试环境下运行集成测试 + +```shell script +mvn clean verify \ + -Dsession.test.skip=true \ + -Diotdb.test.skip=true \ + -Dcluster.test.skip=true \ + -Dtsfile.test.skip=true \ + -Dcommons.test.skip=true \ + -Dconfignode.test.skip=true \ + -Dconsensus.test.skip=true \ + -pl integration-test -am +``` + +3、在远程测试环境下运行集成测试 + +```shell script +mvn clean verify -pl integration-test -am -PRemoteIT \ + -DRemoteIp=127.0.0.1 \ + -DRemotePort=6667 +``` + +## Q&A +### CI 出错后查看日志的方法 +1、点击出错的测试对应的 Details + + + +2、查看和下载日志 + + + +也可以点击左上角的 summary 然后查看和下载其他错误日志。 + + + diff --git a/docs/zh/UserGuide/IoTDB-Introduction/Architecture.md b/docs/zh/UserGuide/IoTDB-Introduction/Architecture.md index 101e6ecc4d62..3cca9ceb7644 100644 --- a/docs/zh/UserGuide/IoTDB-Introduction/Architecture.md +++ b/docs/zh/UserGuide/IoTDB-Introduction/Architecture.md @@ -25,7 +25,7 @@ IoTDB 套件由若干个组件构成,共同形成“数据收集-数据写入- 如下图展示了使用 IoTDB 套件全部组件后形成的整体应用架构。下文称所有组件形成 IoTDB 套件,而 IoTDB 特指其中的时间序列数据库组件。 - + 在上图中,用户可以通过 JDBC 将来自设备上传感器采集的时序数据、服务器负载和 CPU 内存等系统状态数据、消息队列中的时序数据、应用程序的时序数据或者其他数据库中的时序数据导入到本地或者远程的 IoTDB 中。用户还可以将上述数据直接写成本地(或位于 HDFS 上)的 TsFile 文件。 diff --git a/docs/zh/UserGuide/Maintenance-Tools/JMX-Tool.md b/docs/zh/UserGuide/Maintenance-Tools/JMX-Tool.md index 927a9e139aa0..b360f6e672e4 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/JMX-Tool.md +++ b/docs/zh/UserGuide/Maintenance-Tools/JMX-Tool.md @@ -34,7 +34,7 @@ Java VisualVM 提供了一个可视化的界面,用于查看 Java 应用程序 若新增用户,编辑`$IOTDB_HOME/conf/jmx.access`,添加新增用户权限 * IoTDB 不在本地 -编辑`$IOTDB_HOME/conf/iotdb-env.sh` +编辑`$IOTDB_HOME/conf/datanode-env.sh` 修改以下参数: ``` JMX_LOCAL="false" diff --git a/docs/zh/UserGuide/Maintenance-Tools/Maintenance-Command.md b/docs/zh/UserGuide/Maintenance-Tools/Maintenance-Command.md index 70df0c7953c8..33a28e45ed52 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/Maintenance-Command.md +++ b/docs/zh/UserGuide/Maintenance-Tools/Maintenance-Command.md @@ -111,3 +111,66 @@ KILL QUERY | | | | 其中 statement 最大显示长度为 64 字符。对于超过 64 字符的查询语句,将截取部分进行显示。 + +## 集群 Region 分布监控工具 + +集群中以 Region 作为数据复制和数据管理的单元,Region 的状态和分布对于系统运维和测试有很大帮助,如以下场景: + +- 查看集群中各个 Region 被分配到了哪些 DataNode,是否均衡 + +当前 IoTDB 支持使用如下 SQL 查看 Region: + +- `SHOW REGIONS`: 展示所有 Region +- `SHOW SCHEMA REGIONS`: 展示所有 SchemaRegion 分布 +- `SHOW DATA REGIONS`: 展示所有 DataRegion 分布 + +```sql +IoTDB> show regions ++--------+------------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+------------+------+-------------+-----+----------+----------+----+ +| 0|SchemaRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 1|SchemaRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 2|SchemaRegion| Up| root.sg| 1| 4| 127.0.0.1|6671| +| 3| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 4| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 5| DataRegion| Up| root.sg| 1| 1| 127.0.0.1|6667| +| 6| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 7| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 8| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 9| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 10| DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 11| DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 12| DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| ++--------+------------+------+-------------+-----+----------+----------+----+ +Total line number = 13 +It costs 0.107s +IoTDB> show schema regions ++--------+------------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+------------+------+-------------+-----+----------+----------+----+ +| 0|SchemaRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 1|SchemaRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 2|SchemaRegion| Up| root.sg| 1| 4| 127.0.0.1|6671| ++--------+------------+------+-------------+-----+----------+----------+----+ +Total line number = 3 +It costs 0.009s +IoTDB> show data regions ++--------+----------+------+-------------+-----+----------+----------+----+ +|RegionId| Type|Status|storage group|Slots|DataNodeId|Host|Port| ++--------+----------+------+-------------+-----+----------+----------+----+ +| 3|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 4|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 5|DataRegion| Up| root.sg| 1| 1| 127.0.0.1|6667| +| 6|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 7|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 8|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 9|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| +| 10|DataRegion| Up| root.sg| 0| 4| 127.0.0.1|6671| +| 11|DataRegion| Up| root.sg| 0| 3| 127.0.0.1|6669| +| 12|DataRegion| Up| root.sg| 0| 1| 127.0.0.1|6667| ++--------+----------+------+-------------+-----+----------+----------+----+ +Total line number = 10 +It costs 0.023s +``` + diff --git a/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md b/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md index 1e989a113a07..fbb8cf2d3761 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md +++ b/docs/zh/UserGuide/Maintenance-Tools/Metric-Tool.md @@ -76,119 +76,142 @@ IoTDB对外提供JMX和Prometheus格式的监控指标,对于JMX,可以通 #### 4.3.1. 接入层 -| Metric | Tag | level | 说明 | 示例 | -| ------------------- | --------------- | ------ | ---------------- | -------------------------------------------- | +| Metric | Tag | level | 说明 | 示例 | +| ------------------- | --------------- | --------- | ---------------- | -------------------------------------------- | | entry_seconds_count | name="接口名" | important | 接口累计访问次数 | entry_seconds_count{name="openSession",} 1.0 | | entry_seconds_sum | name="接口名" | important | 接口累计耗时(s) | entry_seconds_sum{name="openSession",} 0.024 | | entry_seconds_max | name="接口名" | important | 接口最大耗时(s) | entry_seconds_max{name="openSession",} 0.024 | | quantity_total | name="pointsIn" | important | 系统累计写入点数 | quantity_total{name="pointsIn",} 1.0 | -#### 4.3.2. 文件 +#### 4.3.2. Task -| Metric | Tag | level | 说明 | 示例 | -| ---------- | -------------------- | ------ | ----------------------------------- | --------------------------- | -| file_size | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件大小(byte) | file_size{name="wal",} 67.0 | -| file_count | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件个数 | file_count{name="seq",} 1.0 | - -#### 4.3.3. Flush - -| Metric | Tag | level | 说明 | 示例 | -| ----------------------- | ------------------------------------------- | ------ | -------------------------------- | --------------------------------------------------------------------------------------- | -| queue | name="flush",
status="running/waiting" | important | 当前时间flush任务数 | queue{name="flush",status="waiting",} 0.0
queue{name="flush",status="running",} 0.0 | -| cost_task_seconds_count | name="flush" | important | flush累计发生次数 | cost_task_seconds_count{name="flush",} 1.0 | -| cost_task_seconds_max | name="flush" | important | 到目前为止flush耗时(s)最大的一次 | cost_task_seconds_max{name="flush",} 0.363 | -| cost_task_seconds_sum | name="flush" | important | flush累计耗时(s) | cost_task_seconds_sum{name="flush",} 0.363 | - -#### 4.3.4. Compaction - -| Metric | Tag | level | 说明 | 示例 | -|-------------------------|-------------------------------------------------------------------------|--------------------|---------------------------|------------------------------------------------------| -| queue | name="compaction_inner/compaction_cross",
status="running/waiting" | important | 当前时间compaction任务数 | queue{name="compaction_inner",status="waiting",} 0.0 | -| cost_task_seconds_count | name="compaction" | important | compaction累计发生次数 | cost_task_seconds_count{name="compaction",} 1.0 | -| cost_task_seconds_max | name="compaction" | important | 到目前为止compaction耗时(s)最大的一次 | cost_task_seconds_max{name="compaction",} 0.363 | -| cost_task_seconds_sum | name="compaction" | important | compaction累计耗时(s) | cost_task_seconds_sum{name="compaction",} 0.363 | -| data_written | name="compaction",
type="aligned/not-aligned/total" | important | 合并文件时写入量 | data_written{name="compaction",type="total",} 10240 | -| data_read | name="compaction" | important | 合并文件时的读取量 | data_read={name="compaction",} 10240 | +| Metric | Tag | level | 说明 | 示例 | +| ----------------------- | ----------------------------------------------------------------------------- | --------- | ------------------------------- | -------------------------------------------------------------------------------------------------- | +| queue | name="compaction_inner/compaction_cross/flush",
status="running/waiting" | important | 当前时间任务数 | queue{name="flush",status="waiting",} 0.0
queue{name="compaction/flush",status="running",} 0.0 | +| cost_task_seconds_count | name="inner_compaction/cross_compaction/flush" | important | 任务累计发生次数 | cost_task_seconds_count{name="flush",} 1.0 | +| cost_task_seconds_max | name="inner_compaction/cross_compaction/flush" | important | 到目前为止任务耗时(s)最大的一次 | cost_task_seconds_max{name="flush",} 0.363 | +| cost_task_seconds_sum | name="inner_compaction/cross_compaction/flush" | important | 任务累计耗时(s) | cost_task_seconds_sum{name="flush",} 0.363 | +| data_written | name="compaction",
type="aligned/not-aligned/total" | important | 合并文件时写入量 | data_written{name="compaction",type="total",} 10240 | +| data_read | name="compaction" | important | 合并文件时的读取量 | data_read={name="compaction",} 10240 | -#### 4.3.5. 内存占用 +#### 4.3.3. 内存占用 -| Metric | Tag | 说明 | level | 示例 | -| ------ | --------------------------------------- | ------ | -------------------------------------------------- | --------------------------------- | +| Metric | Tag | level | 说明 | 示例 | +| ------ | --------------------------------------- | --------- | -------------------------------------------------- | --------------------------------- | | mem | name="chunkMetaData/storageGroup/mtree" | important | chunkMetaData/storageGroup/mtree占用的内存(byte) | mem{name="chunkMetaData",} 2050.0 | -#### 4.3.6. 缓存命中率 +#### 4.3.4. 缓存 -| Metric | Tag | level | 说明 | 示例 | -| --------- | --------------------------------------- | ------ | ------------------------------------------------ | --------------------------- | -| cache_hit | name="chunk/timeSeriesMeta/bloomFilter" | important | chunk/timeSeriesMeta缓存命中率,bloomFilter拦截率 | cache_hit{name="chunk",} 80 | +| Metric | Tag | level | 说明 | 示例 | +| ----------- | ----------------------------------------------------------------- | --------- | ------------------------------------------------------------ | --------------------------------------------------- | +| cache_hit | name="chunk/timeSeriesMeta/bloomFilter/SchemaCache" | important | chunk/timeSeriesMeta/SchemaCache缓存命中率,bloomFilter拦截率 | cache_hit{name="chunk",} 80 | +| cache_total | name="StorageGroup/SchemaPartition/DataPartition", type="hit/all" | important | StorageGroup/SchemaPartition/DataPartition 的命中/总次数 | cache_total{name="DataPartition",type="all",} 801.0 | -#### 4.3.7. 业务数据 +#### 4.3.5. 业务数据 -| Metric | Tag | level | 说明 | 示例 | -| -------- | ------------------------------------- | ------ | -------------------------------------------- | -------------------------------- | +| Metric | Tag | level | 说明 | 示例 | +| -------- | ------------------------------------- | --------- | -------------------------------------------- | -------------------------------- | | quantity | name="timeSeries/storageGroup/device" | important | 当前时间timeSeries/storageGroup/device的数量 | quantity{name="timeSeries",} 1.0 | -#### 4.3.8. 集群 - -| Metric | Tag | level | 说明 | 示例 | -| ------------------------- | ------------------------------- | ------ | ------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| cluster_node_leader_count | name="{{ip}}" | important | 节点上```dataGroupLeader```的数量,用来观察leader是否分布均匀 | cluster_node_leader_count{name="127.0.0.1",} 2.0 | -| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | 节点```uncommitted_log```的数量 | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 | -| cluster_node_status | name="{{ip}}" | important | 节点状态,1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 | -| cluster_elect_total | name="{{ip}}",status="fail/win" | important | 节点参与选举的次数及结果 | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 | +#### 4.3.6. 集群 + +| Metric | Tag | level | 说明 | 示例 | +| ------------------------- | ------------------------------------------------------------------ | --------- | ------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| cluster_node_leader_count | name="{{ip}}" | important | 节点上```dataGroupLeader```的数量,用来观察leader是否分布均匀 | cluster_node_leader_count{name="127.0.0.1",} 2.0 | +| cluster_uncommitted_log | name="{{ip_datagroupHeader}}" | important | 节点```uncommitted_log```的数量 | cluster_uncommitted_log{name="127.0.0.1_Data-127.0.0.1-40010-raftId-0",} 0.0 | +| cluster_node_status | name="{{ip}}" | important | 节点状态,1=online 2=offline | cluster_node_status{name="127.0.0.1",} 1.0 | +| cluster_elect_total | name="{{ip}}",status="fail/win" | important | 节点参与选举的次数及结果 | cluster_elect_total{name="127.0.0.1",status="win",} 1.0 | +| config_node | name="online" | core | 上线confignode的节点数量 | config_node{name="online",} 3.0 | +| data_node | name="online" | core | 上线datanode的节点数量 | data_node{name="online",} 3.0 | +| partition_table | name="number" | core | partition table表的个数 | partition_table{name="number",} 2.0 | +| region | name="total/{{ip}}:{{port}}",type="SchemaRegion/DataRegion" | important | 全部或某个节点的schemaRegion/dataRegion个数 | region{name="127.0.0.1:6671",type="DataRegion",} 10.0 | +| region | name="{{storageGroupName}}",type="SchemaRegion/DataRegion" | normal | 存储组的DataRegion/Schema个数 | region{name="root.schema.sg1",type="DataRegion",} 14.0 | +| slot | name="{{storageGroupName}}",type="schemaSlotNumber/dataSlotNumber" | normal | 存储组的schemaSlot/dataSlot个数 | slot{name="root.schema.sg1",type="schemaSlotNumber",} 2.0 | ### 4.4. IoTDB 预定义指标集 -用户可以在`iotdb-metric.yml`文件中,修改`predefinedMetrics`的值来启用预定义指标集,其中`LOGBACK`在`dropwizard`中不支持。 +用户可以在`iotdb-metric.yml`文件中,修改`predefinedMetrics`的值来启用预定义指标集,目前有`JVM`、`LOGBACK`、`FILE`、`PROCESS`、`SYSYTEM`这五种。 #### 4.4.1. JVM ##### 4.4.1.1. 线程 -| Metric | Tag | 说明 | 示例 | -| -------------------------- | ------------------------------------------------------------- | ------------------------ | -------------------------------------------------- | -| jvm_threads_live_threads | 无 | 当前线程数 | jvm_threads_live_threads 25.0 | -| jvm_threads_daemon_threads | 无 | 当前daemon线程数 | jvm_threads_daemon_threads 12.0 | -| jvm_threads_peak_threads | 无 | 峰值线程数 | jvm_threads_peak_threads 28.0 | -| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | 当前处于各种状态的线程数 | jvm_threads_states_threads{state="runnable",} 10.0 | +| Metric | Tag | level | 说明 | 示例 | +| -------------------------- | ------------------------------------------------------------- | --------- | ------------------------ | -------------------------------------------------- | +| jvm_threads_live_threads | 无 | important | 当前线程数 | jvm_threads_live_threads 25.0 | +| jvm_threads_daemon_threads | 无 | important | 当前daemon线程数 | jvm_threads_daemon_threads 12.0 | +| jvm_threads_peak_threads | 无 | important | 峰值线程数 | jvm_threads_peak_threads 28.0 | +| jvm_threads_states_threads | state="runnable/blocked/waiting/timed-waiting/new/terminated" | important | 当前处于各种状态的线程数 | jvm_threads_states_threads{state="runnable",} 10.0 | ##### 4.4.1.2. 垃圾回收 -| Metric | Tag | 说明 | 示例 | -| ----------------------------------- | ------------------------------------------------------ | -------------------------------------------- | --------------------------------------------------------------------------------------- | -| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | YGC/FGC发生次数及其原因 | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 | -| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | YGC/FGC累计耗时及其原因 | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 | -| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | YGC/FGC最大耗时及其原因 | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 | -| jvm_gc_overhead_percent | 无 | GC消耗cpu的比例 | jvm_gc_overhead_percent 0.0 | -| jvm_gc_memory_promoted_bytes_total | 无 | 从GC之前到GC之后老年代内存池大小正增长的累计 | jvm_gc_memory_promoted_bytes_total 8425512.0 | -| jvm_gc_max_data_size_bytes | 无 | 老年代内存的历史最大值 | jvm_gc_max_data_size_bytes 2.863661056E9 | -| jvm_gc_live_data_size_bytes | 无 | GC后老年代内存的大小 | jvm_gc_live_data_size_bytes 8450088.0 | -| jvm_gc_memory_allocated_bytes_total | 无 | 在一个GC之后到下一个GC之前年轻代增加的内存 | jvm_gc_memory_allocated_bytes_total 4.2979144E7 | +| Metric | Tag | level | 说明 | 示例 | +| ----------------------------------- | ------------------------------------------------------ | --------- | -------------------------------------------- | --------------------------------------------------------------------------------------- | +| jvm_gc_pause_seconds_count | action="end of major GC/end of minor GC",cause="xxxx" | important | YGC/FGC发生次数及其原因 | jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 | +| jvm_gc_pause_seconds_sum | action="end of major GC/end of minor GC",cause="xxxx" | important | YGC/FGC累计耗时及其原因 | jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.03 | +| jvm_gc_pause_seconds_max | action="end of major GC",cause="Metadata GC Threshold" | important | YGC/FGC最大耗时及其原因 | jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 | +| jvm_gc_memory_promoted_bytes_total | 无 | important | 从GC之前到GC之后老年代内存池大小正增长的累计 | jvm_gc_memory_promoted_bytes_total 8425512.0 | +| jvm_gc_max_data_size_bytes | 无 | important | 老年代内存的历史最大值 | jvm_gc_max_data_size_bytes 2.863661056E9 | +| jvm_gc_live_data_size_bytes | 无 | important | GC后老年代内存的大小 | jvm_gc_live_data_size_bytes 8450088.0 | +| jvm_gc_memory_allocated_bytes_total | 无 | important | 在一个GC之后到下一个GC之前年轻代增加的内存 | jvm_gc_memory_allocated_bytes_total 4.2979144E7 | ##### 4.4.1.3. 内存 -| Metric | Tag | 说明 | 示例 | -| ------------------------------- | ------------------------------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| jvm_buffer_memory_used_bytes | id="direct/mapped" | 已经使用的缓冲区大小 | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 | -| jvm_buffer_total_capacity_bytes | id="direct/mapped" | 最大缓冲区大小 | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 | -| jvm_buffer_count_buffers | id="direct/mapped" | 当前缓冲区数量 | jvm_buffer_count_buffers{id="direct",} 183.0 | -| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | 当前向JVM申请的内存大小 | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7
| -| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | JVM最大内存 | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 | -| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | JVM已使用内存大小 | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9
jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7
| +| Metric | Tag | level | 说明 | 示例 | +| ------------------------------- | ------------------------------- | --------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| jvm_buffer_memory_used_bytes | id="direct/mapped" | important | 已经使用的缓冲区大小 | jvm_buffer_memory_used_bytes{id="direct",} 3.46728099E8 | +| jvm_buffer_total_capacity_bytes | id="direct/mapped" | important | 最大缓冲区大小 | jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 | +| jvm_buffer_count_buffers | id="direct/mapped" | important | 当前缓冲区数量 | jvm_buffer_count_buffers{id="direct",} 183.0 | +| jvm_memory_committed_bytes | {area="heap/nonheap",id="xxx",} | important | 当前向JVM申请的内存大小 | jvm_memory_committed_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 3.9051264E7
| +| jvm_memory_max_bytes | {area="heap/nonheap",id="xxx",} | important | JVM最大内存 | jvm_memory_max_bytes{area="heap",id="Par Survivor Space",} 2.44252672E8
jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 | +| jvm_memory_used_bytes | {area="heap/nonheap",id="xxx",} | important | JVM已使用内存大小 | jvm_memory_used_bytes{area="heap",id="Par Eden Space",} 1.000128376E9
jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.9783808E7
| ##### 4.4.1.4. Classes -| Metric | Tag | 说明 | 示例 | -| ---------------------------------- | --------------------------------------------- | ---------------------- | ----------------------------------------------------------------------------------- | -| jvm_classes_unloaded_classes_total | 无 | jvm累计卸载的class数量 | jvm_classes_unloaded_classes_total 680.0 | -| jvm_classes_loaded_classes | 无 | jvm累计加载的class数量 | jvm_classes_loaded_classes 5975.0 | -| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | jvm耗费在编译上的时间 | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 | +| Metric | Tag | level | 说明 | 示例 | +| ---------------------------------- | --------------------------------------------- | --------- | ---------------------- | ----------------------------------------------------------------------------------- | +| jvm_classes_unloaded_classes_total | 无 | important | jvm累计卸载的class数量 | jvm_classes_unloaded_classes_total 680.0 | +| jvm_classes_loaded_classes | 无 | important | jvm累计加载的class数量 | jvm_classes_loaded_classes 5975.0 | +| jvm_compilation_time_ms_total | {compiler="HotSpot 64-Bit Tiered Compilers",} | important | jvm耗费在编译上的时间 | jvm_compilation_time_ms_total{compiler="HotSpot 64-Bit Tiered Compilers",} 107092.0 | -#### 4.4.2. 日志(logback) +#### 4.4.2. 文件(File) -| Metric | Tag | 说明 | 示例 | -| -------------------- | -------------------------------------- | --------------------------------------- | --------------------------------------- | -| logback_events_total | {level="trace/debug/info/warn/error",} | trace/debug/info/warn/error日志累计数量 | logback_events_total{level="warn",} 0.0 | +| Metric | Tag | level | 说明 | 示例 | +| ---------- | -------------------- | --------- | ----------------------------------- | --------------------------- | +| file_size | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件大小(byte) | file_size{name="wal",} 67.0 | +| file_count | name="wal/seq/unseq" | important | 当前时间wal/seq/unseq文件个数 | file_count{name="seq",} 1.0 | + +#### 4.4.3. 日志(logback) + +| Metric | Tag | level | 说明 | 示例 | +| -------------------- | -------------------------------------- | --------- | --------------------------------------- | --------------------------------------- | +| logback_events_total | {level="trace/debug/info/warn/error",} | important | trace/debug/info/warn/error日志累计数量 | logback_events_total{level="warn",} 0.0 | + +#### 4.4.4. 进程(Process) +| Metric | Tag | level | 说明 | 示例 | +| --------------------- | -------------- | ----- | ---------------------------------- | ----------------------------------------------- | +| process_cpu_load | name="cpu" | core | process当前CPU占用率(%) | process_cpu_load{name="process",} 5.0 | +| process_cpu_time | name="cpu" | core | process累计占用CPU时间(ns) | process_cpu_time{name="process",} 3.265625E9 | +| process_max_mem | name="memory" | core | JVM最大可用内存 | process_max_mem{name="process",} 3.545759744E9 | +| process_used_mem | name="memory" | core | JVM当前使用内存 | process_used_mem{name="process",} 4.6065456E7 | +| process_total_mem | name="memory" | core | JVM当前已申请内存 | process_total_mem{name="process",} 2.39599616E8 | +| process_free_mem | name="memory" | core | JVM当前剩余可用内存 | process_free_mem{name="process",} 1.94035584E8 | +| process_mem_ratio | name="memory" | core | 进程的内存占用比例 | process_mem_ratio{name="process",} 0.0 | +| process_threads_count | name="process" | core | 当前线程数 | process_threads_count{name="process",} 11.0 | +| process_status | name="process" | core | 进程存活状态,1.0为存活,0.0为终止 | process_status{name="process",} 1.0 | + +#### 4.4.5. 系统(System) +| Metric | Tag | level | 说明 | 示例 | +| ------------------------------ | ------------- | --------- | ------------------------------------------ | -------------------------------------------------------------- | +| sys_cpu_load | name="cpu" | core | system当前CPU占用率(%) | sys_cpu_load{name="system",} 15.0 | +| sys_cpu_cores | name="cpu" | core | jvm可用处理器数 | sys_cpu_cores{name="system",} 16.0 | +| sys_total_physical_memory_size | name="memory" | core | system最大物理内存 | sys_total_physical_memory_size{name="system",} 1.5950999552E10 | +| sys_free_physical_memory_size | name="memory" | core | system当前剩余可用内存 | sys_free_physical_memory_size{name="system",} 4.532396032E9 | +| sys_total_swap_space_size | name="memory" | core | system交换区最大空间 | sys_total_swap_space_size{name="system",} 2.1051273216E10 | +| sys_free_swap_space_size | name="memory" | core | system交换区剩余可用空间 | sys_free_swap_space_size{name="system",} 2.931576832E9 | +| sys_committed_vm_size | name="memory" | important | system保证可用于正在运行的进程的虚拟内存量 | sys_committed_vm_size{name="system",} 5.04344576E8 | +| sys_disk_total_space | name="disk" | core | 磁盘总大小 | sys_disk_total_space{name="system",} 5.10770798592E11 | +| sys_disk_free_space | name="disk" | core | 磁盘可用大小 | sys_disk_free_space{name="system",} 3.63467845632E11 | ### 4.5. 自定义添加埋点 @@ -217,6 +240,9 @@ metric采集默认是关闭的,需要先到conf/iotdb-metric.yml中打开后 # 是否启动监控模块,默认为false enableMetric: false +# 是否启用操作延迟统计 +enablePerformanceStat: false + # 数据提供方式,对外部通过jmx和prometheus协议提供metrics的数据, 可选参数:[JMX, PROMETHEUS, IOTDB],IOTDB是默认关闭的。 metricReporterList: - JMX @@ -228,9 +254,10 @@ monitorType: MICROMETER # 初始化metric的级别,可选参数: [CORE, IMPORTANT, NORMAL, ALL] metricLevel: IMPORTANT -# 预定义的指标集, 可选参数: [JVM, LOGBACK], 其中LOGBACK在dropwizard中不支持 +# 预定义的指标集, 可选参数: [JVM, LOGBACK, FILE, PROCESS, SYSTEM] predefinedMetrics: - JVM + - FILE # Prometheus Reporter 使用的端口 prometheusExporterPort: 9091 diff --git a/docs/zh/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md b/docs/zh/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md index 3e90ceefbdb7..49eca9d4e89c 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md +++ b/docs/zh/UserGuide/Maintenance-Tools/Monitor-and-Log-Tools.md @@ -49,7 +49,7 @@ #### 配置参数 -配置文件位置:conf/iotdb-engine.properties +配置文件位置:conf/iotdb-datanode.properties
diff --git a/docs/zh/UserGuide/Maintenance-Tools/Sync-Tool.md b/docs/zh/UserGuide/Maintenance-Tools/Sync-Tool.md index b261531660a5..7ca0492ea230 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/Sync-Tool.md +++ b/docs/zh/UserGuide/Maintenance-Tools/Sync-Tool.md @@ -116,7 +116,7 @@ IoTDB> DROP PIPE my_pipe ## 5.配置参数 -所有参数修改均在`$IOTDB_HOME$/conf/iotdb-engine.properties`中,所有修改完成之后执行`load configuration`之后即可立刻生效。 +所有参数修改均在`$IOTDB_HOME$/conf/iotdb-datanode.properties`中,所有修改完成之后执行`load configuration`之后即可立刻生效。 #### 5.1发送端相关 @@ -292,7 +292,7 @@ IoTDB> SHOW PIPESERVER #### **接收端操作** -- `vi conf/iotdb-engine.properties` 配置云端参数,将白名单设置为仅接收来自IP为 192.168.0.1的边端的数据 +- `vi conf/iotdb-datanode.properties` 配置云端参数,将白名单设置为仅接收来自IP为 192.168.0.1的边端的数据 ``` #################### @@ -426,7 +426,7 @@ It costs 0.134s ``` - 原因:接收端未启动或接收端无法连接 - - 解决方案:在接收端执行 `SHOW PIPESERVER` 检查是否启动接收端,若未启动使用 `START PIPESERVER` 启动;检查接收端`iotdb-engine.properties`中的白名单是否包含发送端ip。 + - 解决方案:在接收端执行 `SHOW PIPESERVER` 检查是否启动接收端,若未启动使用 `START PIPESERVER` 启动;检查接收端`iotdb-datanode.properties`中的白名单是否包含发送端ip。 - 执行 diff --git a/docs/zh/UserGuide/Maintenance-Tools/Watermark-Tool.md b/docs/zh/UserGuide/Maintenance-Tools/Watermark-Tool.md index b944545e48c4..e1fbb7bd7b2c 100644 --- a/docs/zh/UserGuide/Maintenance-Tools/Watermark-Tool.md +++ b/docs/zh/UserGuide/Maintenance-Tools/Watermark-Tool.md @@ -25,7 +25,7 @@ under the License. #### 配置 -IoTDB 默认关闭水印嵌入功能。为了使用这个功能,第一步要做的事情是修改配置文件`iotdb-engine.properties`中的以下各项: +IoTDB 默认关闭水印嵌入功能。为了使用这个功能,第一步要做的事情是修改配置文件`iotdb-datanode.properties`中的以下各项: | 名称 | 示例 | 解释 | | ----------------------- | ------------------------------------------------------ | ----------------------------------- | @@ -43,7 +43,7 @@ IoTDB 默认关闭水印嵌入功能。为了使用这个功能,第一步要 - 均是正整数 - `embed_row_cycle`控制了被嵌入水印的行占总行数的比例。`embed_row_cycle`越小,被嵌入水印的行的比例就越大。当`embed_row_cycle`等于 1 的时候,所有的行都将嵌入水印。 - GroupBasedLSBMethod 使用 LSB 嵌入。`embed_lsb_num`控制了允许嵌入水印的最低有效位的数量。`embed_lsb_num`越大,数值的可变化范围就越大。 -- `watermark_secret_key`, `watermark_bit_string`和`watermark_method`都不应该被攻击者获得。您需要自己负责配置文件`iotdb-engine.properties`的安全管理。 +- `watermark_secret_key`, `watermark_bit_string`和`watermark_method`都不应该被攻击者获得。您需要自己负责配置文件`iotdb-datanode.properties`的安全管理。 #### 使用示例 diff --git a/docs/zh/UserGuide/Operate-Metadata/Auto-Create-MetaData.md b/docs/zh/UserGuide/Operate-Metadata/Auto-Create-MetaData.md index 9e2a99c7e319..edd60a006658 100644 --- a/docs/zh/UserGuide/Operate-Metadata/Auto-Create-MetaData.md +++ b/docs/zh/UserGuide/Operate-Metadata/Auto-Create-MetaData.md @@ -82,14 +82,14 @@ ### 类型推断 -| 数据(String) | 字符串格式 | iotdb-engine.properties配置项 | 默认值 | -|:---:|:---|:---|:---| -| true | boolean | boolean\_string\_infer\_type | BOOLEAN | -| 1 | integer | integer\_string\_infer\_type | FLOAT | -| 17000000(大于 2^24 的整数) | integer | long\_string\_infer\_type | DOUBLE | +| 数据(String) | 字符串格式 | iotdb-datanode.properties配置项 | 默认值 | +|:---:|:---|:------------------------------|:---| +| true | boolean | boolean\_string\_infer\_type | BOOLEAN | +| 1 | integer | integer\_string\_infer\_type | FLOAT | +| 17000000(大于 2^24 的整数) | integer | long\_string\_infer\_type | DOUBLE | | 1.2 | floating | floating\_string\_infer\_type | FLOAT | -| NaN | nan | nan\_string\_infer\_type | DOUBLE | -| 'I am text' | text | 无 | 无 | +| NaN | nan | nan\_string\_infer\_type | DOUBLE | +| 'I am text' | text | 无 | 无 | * 可配置的数据类型包括:BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT @@ -97,14 +97,14 @@ ### 编码方式 -| 数据类型 | iotdb-engine.properties配置项 | 默认值 | -|:---|:---|:---| -| BOOLEAN | default\_boolean\_encoding | RLE | -| INT32 | default\_int32\_encoding | RLE | -| INT64 | default\_int64\_encoding | RLE | -| FLOAT | default\_float\_encoding | GORILLA | -| DOUBLE | default\_double\_encoding | GORILLA | -| TEXT | default\_text\_encoding | PLAIN | +| 数据类型 | iotdb-datanode.properties配置项 | 默认值 | +|:---|:-----------------------------|:---| +| BOOLEAN | default\_boolean\_encoding | RLE | +| INT32 | default\_int32\_encoding | RLE | +| INT64 | default\_int64\_encoding | RLE | +| FLOAT | default\_float\_encoding | GORILLA | +| DOUBLE | default\_double\_encoding | GORILLA | +| TEXT | default\_text\_encoding | PLAIN | * 可配置的编码方式包括:PLAIN, RLE, TS_2DIFF, GORILLA, DICTIONARY diff --git a/docs/zh/UserGuide/Process-Data/Triggers.md b/docs/zh/UserGuide/Process-Data/Triggers.md index 94a9dc60293d..5413f8176654 100644 --- a/docs/zh/UserGuide/Process-Data/Triggers.md +++ b/docs/zh/UserGuide/Process-Data/Triggers.md @@ -277,7 +277,7 @@ SHOW TRIGGERS * `START_TRIGGER`:具备该权限的用户才被允许启动已被停止的触发器。该权限需要与触发器的路径绑定。 * `STOP_TRIGGER`:具备该权限的用户才被允许停止正在运行的触发器。该权限需要与触发器的路径绑定。 -更多用户权限相关的内容,请参考 [权限管理语句](../Operation%20Manual/Administration.md)。 +更多用户权限相关的内容,请参考 [权限管理语句](../Administration-Management/Administration.md)。 ## 实用工具类 @@ -617,6 +617,115 @@ annotations.put("description", "{{.alertname}}: {{.series}} is {{.value}}"); alertManagerHandler.onEvent(new AlertManagerEvent(alertName, extraLabels, annotations)); ``` +#### ForwardSink + +触发器可以使用ForwardSink通过HTTP和MQTT协议转发写入的数据,其内置了HTTPForwardHandler和MQTTForwardHandler。为提高连接使用效率,所有HTTPForwardHandler共用一个连接池,而host,port和username参数相同的MQTTForwardHandler共用一个连接池。 + +MQTTForwardHandler与MQTTHandler的区别在于,前者使用连接池而后者没有使用连接池,并且消息的格式也不同。 + +使用示例见[ForwardTrigger](#ForwardTrigger)。 + +## ForwardTrigger + +ForwardTrigger是内置的用于实现数据分发的触发器,它使用ForwardSink和消费队列实现了对触发事件的异步批量处理。采用异步的方式进行转发,可以避免因为转发阻塞导致的系统阻塞。而采用ForwardSink中的连接池可使得池中的连接可以得到高效、安全的复用,避免了连接频繁建立、关闭的开销。 + +Forward Queue Consume + +### 触发流程 +1. 触发事件到来。 +2. ForwardTrigger将触发事件放入队列池。 +3. 完成触发事件。 + +### 队列池消费流程 +1. 将触发事件按照Device入队(如没有Device,则轮询)。 +2. 每个队列消费者线程监控当前队列,若超时或达到最大转发批量则调用Handler批量转发。 +3. Handler批量序列化触发事件,消息封装完成后调用内置的连接池完成转发。 + +### 消息格式 +目前消息格式仅支持固定模板的JSON格式,模板如下: +``` +[{"device":"%s","measurement":"%s","timestamp":%d,"value":%s}] +``` + +### 使用示例 +#### 创建ForwardTrigger +创建一个使用HTTP协议的forward_http触发器和一个使用MQTT协议的forward_mqtt触发器,两者分别订阅前缀路径`root.http`和`root.mqtt`。 +```sql +CREATE trigger forward_http AFTER INSERT ON root.http +AS 'org.apache.iotdb.db.engine.trigger.builtin.ForwardTrigger' +WITH ('protocol' = 'http', 'endpoint' = 'http://127.0.0.1:8080/forward_receive') + +CREATE trigger forward_mqtt AFTER INSERT ON root.mqtt +AS 'org.apache.iotdb.db.engine.trigger.builtin.ForwardTrigger' +WITH ('protocol' = 'mqtt', 'host' = '127.0.0.1', 'port' = '1883', 'username' = 'root', 'password' = 'root', 'topic' = 'mqtt-test') +``` + +#### 激发触发器 +向两个前缀路径的子路径插入数据,激发触发器。 +```sql +INSERT INTO root.http.d1(timestamp, s1) VALUES (1, 1); +INSERT INTO root.http.d1(timestamp, s2) VALUES (2, true); +INSERT INTO root.mqtt.d1(timestamp, s1) VALUES (1, 1); +INSERT INTO root.mqtt.d1(timestamp, s2) VALUES (2, true); +``` + +#### 接收转发的消息 +触发器激发后,在HTTP接收端会接收到如下格式的JSON数据: +```json +[ + { + "device":"root.http.d1", + "measurement":"s1", + "timestamp":1, + "value":1.0 + }, + { + "device":"root.http.d1", + "measurement":"s2", + "timestamp":2, + "value":true + } +] +``` + +触发器触发后,在MQTT接收端会接收到如下格式的JSON数据: +```json +[ + { + "device":"root.mqtt.d1", + "measurement":"s1", + "timestamp":1, + "value":1.0 + }, + { + "device":"root.mqtt.d1", + "measurement":"s2", + "timestamp":2, + "value":true + } +] +``` + +### ForwardTrigger的配置参数 +| 参数 | 必填 | 默认值 | 上限 | 描述 | +|--------------------|------| ------------ | ---- |--------------------------------------------------------------------------------------------------------------------------------------| +| protocol | true | http | | 转发协议,如HTTP/MQTT | +| queueNumber | | 8 | 8 | 队列数量,与全局参数trigger_forward_max_queue_number比较取小 | +| queueSize | | 2000 | 2000 | 队列大小,与全局参数trigger_forward_max_size_per_queue比较取小 | +| batchSize | | 50 | 50 | 每次最大转发批量,与全局参数trigger_forward_batch_size比较取小 | +| stopIfException | | false | | 出现异常是否终止 | +| endpoint | true | | | 请求端点地址(HTTP协议参数)
说明:HTTP连接池参数取决于全局参数
trigger_forward_http_pool_size=200

trigger_forward_http_pool_max_per_route=20 | +| host | true | | | MQTT Broker主机名(MQTT 协议参数) | +| port | true | | | MQTT Broker端口号(MQTT 协议参数) | +| username | true | | | 用户名(MQTT 协议参数) | +| password | true | | | 密码(MQTT 协议参数) | +| topic | true | | | MQTT消息的主题(MQTT 协议参数) | +| reconnectDelay | | 10ms | | 重连等待时间(MQTT 协议参数) | +| connectAttemptsMax | | 3 | | 最大尝试连接次数(MQTT 协议参数) | +| qos | | exactly_once | | 服务质量保证(MQTT 协议参数),可选exactly_once,at_least_once,at_most_once | +| poolSize | | 4 | 4 | MQTT连接池大小(MQTT 协议参数),与全局参数trigger_forward_mqtt_pool_size比较取小 | +| retain | | false | | Publish后是否让MQTT Broker保持消息(MQTT 协议参数) | + ## 完整的 Maven 示例项目 如果您使用 [Maven](http://search.maven.org/),可以参考我们编写的示例项目 **trigger-example**。 @@ -742,7 +851,7 @@ public class TriggerExample implements Trigger { 您可以按照下面的步骤试用这个触发器: -* 在`iotdb-engine.properties`中启用 MQTT 服务 +* 在`iotdb-datanode.properties`中启用 MQTT 服务 ``` properties # whether to enable the mqtt service. diff --git a/docs/zh/UserGuide/Process-Data/UDF-User-Defined-Function.md b/docs/zh/UserGuide/Process-Data/UDF-User-Defined-Function.md index 90d9215b1ee2..c789b3a597f3 100644 --- a/docs/zh/UserGuide/Process-Data/UDF-User-Defined-Function.md +++ b/docs/zh/UserGuide/Process-Data/UDF-User-Defined-Function.md @@ -41,7 +41,7 @@ IoTDB 支持两种类型的 UDF 函数,如下表所示。 ``` xml org.apache.iotdb - iotdb-server + udf-api 0.14.0-SNAPSHOT provided @@ -49,7 +49,7 @@ IoTDB 支持两种类型的 UDF 函数,如下表所示。 ## UDTF(User Defined Timeseries Generating Function) -编写一个 UDTF 需要继承`org.apache.iotdb.db.query.udf.api.UDTF`类,并至少实现`beforeStart`方法和一种`transform`方法。 +编写一个 UDTF 需要继承`org.apache.iotdb.udf.api.UDTF`类,并至少实现`beforeStart`方法和一种`transform`方法。 下表是所有可供用户实现的接口说明。 @@ -92,7 +92,7 @@ IoTDB 支持两种类型的 UDF 函数,如下表所示。 #### UDFParameters -`UDFParameters`的作用是解析 SQL 语句中的 UDF 参数(SQL 中 UDF 函数名称后括号中的部分)。参数包括路径(及其序列类型)参数和字符串 key-value 对形式输入的属性参数。 +`UDFParameters`的作用是解析 SQL 语句中的 UDF 参数(SQL 中 UDF 函数名称后括号中的部分)。参数包括序列类型参数和字符串 key-value 对形式输入的属性参数。 例子: @@ -104,11 +104,6 @@ SELECT UDF(s1, s2, 'key1'='iotdb', 'key2'='123.45') FROM root.sg.d; ``` java void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) throws Exception { - // parameters - for (PartialPath path : parameters.getPaths()) { - TSDataType dataType = parameters.getDataType(path); - // do something - } String stringValue = parameters.getString("key1"); // iotdb Float floatValue = parameters.getFloat("key2"); // 123.45 Double doubleValue = parameters.getDouble("key3"); // null @@ -134,7 +129,7 @@ void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) th // configurations configurations .setAccessStrategy(new RowByRowAccessStrategy()) - .setOutputDataType(TSDataType.INT32); + .setOutputDataType(Type.INT32); } ``` @@ -190,7 +185,7 @@ void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) th | `FLOAT` | `float` | | `DOUBLE` | `double` | | `BOOLEAN` | `boolean` | -| `TEXT` | `java.lang.String` 和 `org.apache.iotdb.tsfile.utils.Binary` | +| `TEXT` | `java.lang.String` 和 `org.apache.iotdb.udf.api.type.Binary` | UDTF 输出序列的类型是运行时决定的。您可以根据输入序列类型动态决定输出序列类型。 @@ -216,20 +211,20 @@ void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) th 下面是一个实现了`void transform(Row row, PointCollector collector) throws Exception`方法的完整 UDF 示例。它是一个加法器,接收两列时间序列输入,当这两个数据点都不为`null`时,输出这两个数据点的代数和。 ``` java -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.Row; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.RowByRowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Adder implements UDTF { @Override public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { configurations - .setOutputDataType(TSDataType.INT64) + .setOutputDataType(Type.INT64) .setAccessStrategy(new RowByRowAccessStrategy()); } @@ -253,20 +248,20 @@ public class Adder implements UDTF { ```java import java.io.IOException; -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.RowWindow; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Counter implements UDTF { @Override public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { configurations - .setOutputDataType(TSDataType.INT32) + .setOutputDataType(Type.INT32) .setAccessStrategy(new SlidingTimeWindowAccessStrategy( parameters.getLong("time_interval"), parameters.getLong("sliding_step"), @@ -295,13 +290,13 @@ public class Counter implements UDTF { ```java import java.io.IOException; -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.Row; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.RowByRowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; public class Max implements UDTF { @@ -457,7 +452,7 @@ SHOW FUNCTIONS ## 配置项 -在 SQL 语句中使用自定义函数时,可能提示内存不足。这种情况下,您可以通过更改配置文件`iotdb-engine.properties`中的`udf_initial_byte_array_length_for_memory_control`,`udf_memory_budget_in_mb`和`udf_reader_transformer_collector_memory_proportion`并重启服务来解决此问题。 +在 SQL 语句中使用自定义函数时,可能提示内存不足。这种情况下,您可以通过更改配置文件`iotdb-datanode.properties`中的`udf_initial_byte_array_length_for_memory_control`,`udf_memory_budget_in_mb`和`udf_reader_transformer_collector_memory_proportion`并重启服务来解决此问题。 ## 贡献 UDF diff --git a/docs/zh/UserGuide/Query-Data/Query-Filter.md b/docs/zh/UserGuide/Query-Data/Query-Filter.md index aac9b820ba3c..569bc04b02cc 100644 --- a/docs/zh/UserGuide/Query-Data/Query-Filter.md +++ b/docs/zh/UserGuide/Query-Data/Query-Filter.md @@ -26,8 +26,9 @@ 支持的运算符如下: - 比较运算符:大于(`>`)、大于等于( `>=`)、等于( `=` 或 `==`)、不等于( `!=` 或 `<>`)、小于等于( `<=`)、小于( `<`)。 -- 范围包含运算符:包含( `IN` )。 - 逻辑运算符:与( `AND` 或 `&` 或 `&&`)、或( `OR` 或 `|` 或 `||`)、非( `NOT` 或 `!`)。 +- 范围包含运算符:包含( `IN` )。 +- 字符串匹配运算符:`LIKE`, `REGEXP`。 ## 时间过滤条件 @@ -90,6 +91,20 @@ select code from root.sg1.d1 where code not in ('200', '300', '400', '500'); ``` +## 空值过滤 +使用空值过滤条件可以筛选出值为空或非空的数据 +1. 选择值为空的数据: + + ```sql + select code from root.sg1.d1 where temperature is null; + ```` + +2. 选择值为非空的数据: + + ```sql + select code from root.sg1.d1 where temperature is not null; + ```` + ## 模糊查询 在值过滤条件中,对于 TEXT 类型的数据,支持使用 `Like` 和 `Regexp` 运算符对数据进行模糊匹配 diff --git a/docs/zh/UserGuide/Query-Data/Select-Expression.md b/docs/zh/UserGuide/Query-Data/Select-Expression.md index 5758f601fc63..c41e7de0669b 100644 --- a/docs/zh/UserGuide/Query-Data/Select-Expression.md +++ b/docs/zh/UserGuide/Query-Data/Select-Expression.md @@ -111,6 +111,7 @@ It costs 0.014s ``` ## 逻辑运算查询 +### 运算符 #### 一元逻辑运算符 支持运算符 `!` @@ -126,7 +127,7 @@ It costs 0.014s 输入数据类型: `INT32`, `INT64`, `FLOAT`, `DOUBLE` -会将所有数据转换为`DOUBLE`类型后进行比较。`==`和`!=`可以直接比较两个`BOOLEAN` +注意:会将所有数据转换为`DOUBLE`类型后进行比较。`==`和`!=`可以直接比较两个`BOOLEAN` 返回类型:`BOOLEAN` @@ -140,12 +141,28 @@ It costs 0.014s 注意:当某个时间戳下左操作数和右操作数都为`BOOLEAN`类型时,二元逻辑操作才会有输出结果 +#### IN 运算符 + +支持运算符 `IN` + +输入数据类型:`All Types` + +返回类型 `BOOLEAN` + +#### 字符串匹配运算符 + +支持运算符 `LIKE`, `REGEXP` + +输入数据类型:`TEXT` + +返回类型:`BOOLEAN` + ### 使用示例 -例如: +输入1: ```sql select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; ``` -输出: +输出1: ``` IoTDB> select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; +-----------------------------+-----------+-----------+----------------+--------------------------+---------------------------+------------------------------------------------+ @@ -160,6 +177,21 @@ IoTDB> select a, b, a > 10, a <= b, !(a <= b), a > 10 && a > b from root.test; +-----------------------------+-----------+-----------+----------------+--------------------------+---------------------------+------------------------------------------------+ ``` +输入2: +```sql +select a, b, a in (1, 2), b like '1%', b regexp '[0-2]' from root.test; +``` + +输出2: +``` ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +| Time|root.test.a|root.test.b|root.test.a IN (1,2)|root.test.b LIKE '^1.*?$'|root.test.b REGEXP '[0-2]'| ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +|1970-01-01T08:00:00.001+08:00| 1| 111test111| true| true| true| +|1970-01-01T08:00:00.003+08:00| 3| 333test333| false| false| false| ++-----------------------------+-----------+-----------+--------------------+-------------------------+--------------------------+ +``` + ## 运算符优先级 |优先级 |运算符 |含义 | diff --git a/docs/zh/UserGuide/QuickStart/QuickStart.md b/docs/zh/UserGuide/QuickStart/QuickStart.md index 983766d6da71..334741909889 100644 --- a/docs/zh/UserGuide/QuickStart/QuickStart.md +++ b/docs/zh/UserGuide/QuickStart/QuickStart.md @@ -258,6 +258,6 @@ Windows 系统停止命令如下: 配置文件在"conf"文件夹下,包括: - * 环境配置模块 (`iotdb-env.bat`, `iotdb-env.sh`), - * 系统配置模块 (`iotdb-engine.properties`) + * 环境配置模块 (`datanode-env.bat`, `datanode-env.sh`), + * 系统配置模块 (`iotdb-datanode.properties`) * 日志配置模块 (`logback.xml`). diff --git a/docs/zh/UserGuide/QuickStart/ServerFileList.md b/docs/zh/UserGuide/QuickStart/ServerFileList.md index 4c097263eb96..214099f2b5bd 100644 --- a/docs/zh/UserGuide/QuickStart/ServerFileList.md +++ b/docs/zh/UserGuide/QuickStart/ServerFileList.md @@ -27,9 +27,9 @@ ## 配置文件 > conf 目录下 -1. iotdb-engine.properties +1. iotdb-datanode.properties 2. logback.xml -3. iotdb-env.sh +3. datanode-env.sh 4. jmx.access 5. jmx.password 6. iotdb-sync-client.properties diff --git a/docs/zh/UserGuide/Reference/Config-Manual.md b/docs/zh/UserGuide/Reference/Config-Manual.md index 9404d1f91f4b..c283408a3e05 100644 --- a/docs/zh/UserGuide/Reference/Config-Manual.md +++ b/docs/zh/UserGuide/Reference/Config-Manual.md @@ -23,20 +23,20 @@ 为方便 IoTDB Server 的配置与管理,IoTDB Server 为用户提供三种配置项,使得用户可以在启动服务或服务运行时对其进行配置。 -三种配置项的配置文件均位于 IoTDB 安装目录:`$IOTDB_HOME/conf`文件夹下,其中涉及 server 配置的共有 2 个文件,分别为:`iotdb-env.sh`, `iotdb-engine.properties` +三种配置项的配置文件均位于 IoTDB 安装目录:`$IOTDB_HOME/conf`文件夹下,其中涉及 server 配置的共有 2 个文件,分别为:`datanode-env.sh`, `iotdb-datanode.properties` 。用户可以通过更改其中的配置项对系统运行的相关配置项进行配置。 配置文件的说明如下: -* `iotdb-env.sh`:环境配置项的默认配置文件。用户可以在文件中配置 JAVA-JVM 的相关系统配置项。 +* `datanode-env.sh`:环境配置项的默认配置文件。用户可以在文件中配置 JAVA-JVM 的相关系统配置项。 -* `iotdb-engine.properties`:IoTDB 引擎层系统配置项的默认配置文件。用户可以在文件中配置 IoTDB 引擎运行时的相关参数,如 JDBC 服务监听端口 (`rpc_port`)、overflow +* `iotdb-datanode.properties`:IoTDB 引擎层系统配置项的默认配置文件。用户可以在文件中配置 IoTDB 引擎运行时的相关参数,如 JDBC 服务监听端口 (`rpc_port`)、overflow 数据文件存储目录 (`overflow_data_dir`) 等。此外,用户可以在文件中配置 IoTDB 存储时 TsFile 文件的相关信息,如每次将内存中的数据写入到磁盘时的数据大小 (`group_size_in_byte`) ,内存中每个列打一次包的大小 (`page_size_in_byte`) 等。 ## 热修改配置项 -为方便用户使用,IoTDB Server 为用户提供了热修改功能,即在系统运行过程中修改`iotdb-engine.properties`中部分配置参数并即时应用到系统中。下面介绍的参数中,改后 生效方式为`触发生效` +为方便用户使用,IoTDB Server 为用户提供了热修改功能,即在系统运行过程中修改`iotdb-datanode.properties`中部分配置参数并即时应用到系统中。下面介绍的参数中,改后 生效方式为`触发生效` 的均为支持热修改的配置参数。 触发方式:客户端发送```load configuration```命令至 IoTDB @@ -44,8 +44,8 @@ Server,客户端的使用方式详见 [SQL 命令行终端(CLI)](https://i ## 环境配置项 -环境配置项主要用于对 IoTDB Server 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。IoTDB Server 启动时,此部分配置会被传给 JVM。用户可以通过查看 `iotdb-env.sh` -(或`iotdb-env.bat`) 文件查看环境配置项内容。详细配置项说明如下: +环境配置项主要用于对 IoTDB Server 运行的 Java 环境相关参数进行配置,如 JVM 相关配置。IoTDB Server 启动时,此部分配置会被传给 JVM。用户可以通过查看 `datanode-env.sh` +(或`datanode-env.bat`) 文件查看环境配置项内容。详细配置项说明如下: * JMX\_LOCAL @@ -86,7 +86,7 @@ Server,客户端的使用方式详见 [SQL 命令行终端(CLI)](https://i ## 系统配置项 系统配置项是 IoTDB Server 运行的核心配置,它主要用于设置 IoTDB Server 文件层和引擎层的参数,便于用户根据自身需求调整 Server -的相关配置,以达到较好的性能表现。系统配置项可分为两大模块:文件层配置项和引擎层配置项。用户可以通过`iotdb-engine.properties`, 文件查看和修改两种配置项的内容。在 0.7.0 版本中字符串类型的配置项大小写敏感。 +的相关配置,以达到较好的性能表现。系统配置项可分为两大模块:文件层配置项和引擎层配置项。用户可以通过`iotdb-datanode.properties`, 文件查看和修改两种配置项的内容。在 0.7.0 版本中字符串类型的配置项大小写敏感。 ### RPC配置 @@ -534,11 +534,11 @@ Server,客户端的使用方式详见 [SQL 命令行终端(CLI)](https://i * avg\_series\_point\_number\_threshold |名字| avg\_series\_point\_number\_threshold | -|:---:|:---| -|描述| 内存中平均每个时间序列点数最大值,达到触发 flush | -|类型| Int32 | -|默认值| 10000 | -|改后生效方式|重启服务生效| +|:---:|:--------------------------------------| +|描述| 内存中平均每个时间序列点数最大值,达到触发 flush | +|类型| Int32 | +|默认值| 100000 | +|改后生效方式| 重启服务生效 | * concurrent\_flush\_thread diff --git a/docs/zh/UserGuide/Reference/Keywords.md b/docs/zh/UserGuide/Reference/Keywords.md index 90fab16089e2..92ec09bcc0f5 100644 --- a/docs/zh/UserGuide/Reference/Keywords.md +++ b/docs/zh/UserGuide/Reference/Keywords.md @@ -159,30 +159,6 @@ - WITH - WITHOUT - WRITABLE - -- 数据类型 - - BOOLEAN - - DOUBLE - - FLOAT - - INT32 - - INT64 - - TEXT - -- 编码类型 - - DICTIONARY - - DIFF - - GORILLA - - PLAIN - - REGULAR - - RLE - - TS_2DIFF - -- 压缩类型 - - GZIP - - LZ4 - - SNAPPY - - UNCOMPRESSED - - 权限类型 - SET_STORAGE_GROUP - CREATE_TIMESERIES diff --git a/docs/zh/UserGuide/Reference/Syntax-Conventions.md b/docs/zh/UserGuide/Reference/Syntax-Conventions.md index a02c1fcd3f2b..8337ed3d5797 100644 --- a/docs/zh/UserGuide/Reference/Syntax-Conventions.md +++ b/docs/zh/UserGuide/Reference/Syntax-Conventions.md @@ -21,23 +21,154 @@ # 语法约定 +## 旧语法约定中的问题(0.14 版本不兼容的语法) + +在之前版本的语法约定中,为了保持兼容性,我们引入了一些会引起歧义的规定。为了避免歧义,我们设计了新的语法约定,本章将说明旧语法约定中存在的问题,以及我们做出改动的原因。 + +### 标识符限制增强 + +在0.13及之前版本中,不使用反引号引用的标识符(包括路径结点)允许为纯数字(纯数字路径名在 `SELECT` 子句中需要用反引号括起),且允许包含部分特殊字符,**在0.14版本中,不使用反引号引用的标识符不允许为纯数字,不使用反引号引用的标识符,只允许包含字母、中文字符、下划线。** + +### 路径名使用的相关问题 + +在旧语法约定中,什么时候需要给路径结点名添加引号,用单双引号还是反引号的规则较为复杂,在新的语法约定中我们做了统一,具体可以参考本文档的相关章节。 + +#### 单双引号和反引号的使用时机 + +在之前的语法约定中,路径结点名被定义成标识符,但是当需要在路径结点名中使用路径分隔符 . 时,需要使用单引号或者双引号引用。这与标识符使用反引号引用的规则相悖。 + +```SQL +# 在之前的语法约定中,如果需要创建时间序列 root.sg.`www.baidu.com`,需要使用下述语句: +create root.sg.'www.baidu.com' with datatype=BOOLEAN, encoding=PLAIN + +# 该语句创建的时间序列实际为 root.sg.'www.baidu.com',即引号一并存入,该时间序列的三个结点为{"root","sg","'www.baidu.com'"} + +# 在查询语句中,如果希望查询该时间序列的数据,查询语句如下: +select 'www.baidu.com' from root.sg; +``` + +而在新语法约定中,特殊路径结点名统一使用反引号引用: + +```SQL +# 在现有语法约定中,如果需要创建时间序列 root.sg.`www.baidu.com`,语法如下: +create root.sg.`www.baidu.com` with datatype = BOOLEAN, encoding = PLAIN + +# 查询该时间序列可以通过如下语句: +select `www.baidu.com` from root.sg; +``` + +#### 路径结点名内部使用引号的问题 + +在旧语法约定中,在路径结点名中使用单引号 ' 和 双引号 " 时,需要使用反斜杠 \ 进行转义,且反斜杠会被视为路径结点名的一部分存入,而在使用其它标识符时没有这个限制,造成了不统一。 + +```SQL +# 创建时间序列 root.sg.\"a +create timeseries root.sg.`\"a` with datatype=TEXT,encoding=PLAIN; + +# 查询时间序列 root.sg.\"a +select `\"a` from root.sg; ++-----------------------------+-----------+ +| Time|root.sg.\"a| ++-----------------------------+-----------+ +|1970-01-01T08:00:00.004+08:00| test| ++-----------------------------+-----------+ +``` + +在新语法约定中,特殊路径结点名统一使用反引号进行引用,在路径结点名中使用单双引号无须添加反斜杠转义,使用反引号需要双写,具体可以参考新语法约定路径结点名章节。 + +### Session 接口相关 + +#### Session 接口语法限制 + +在0.13版本中,对于非SQL接口中使用路径结点的限制如下: + +- 经参数传入的路径或路径前缀中的节点: + - 在 SQL 语句中需要使用反引号(`)进行转义的,此处均不需要进行转义。 + - 使用单引号或双引号括起的节点,仍需要使用单引号或双引号括起,并且要针对 JAVA 字符串进行反转义。 + - 对于 `checkTimeseriesExists` 接口,由于内部调用了 IoTDB-SQL 接口,因此需要和 SQL 语法规范保持一致,并且针对 JAVA 字符串进行反转义。 + +**0.14 版本中,对非SQL接口中使用路径结点的限制增强:** + +- **经参数传入的路径或路径前缀中的节点: 在 SQL 语句中需要使用反引号(`)进行转义的,均需要使用反引号进行转义。** + +- **语法说明相关代码示例可以参考:**`example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` + +#### SQL和Session接口对字符串反转义处理不一致 + +在之前版本中,使用字符串时,SQL 和 Session 接口存在不一致的情况。比如使用 SQL 插入 Text 类型数据时,会对字符串进行反转义处理,而使用 Session 接口时不会进行这样的处理,存在不一致。**在新的语法约定中,我们统一不对字符串做反转义处理,存入什么内容,在查询时就会得到什么内容(字符串内部使用单双引号的规则可以参考本文档字符串常量章节)。** + +下面是旧语法约定中不一致的例子: + +使用 Session 的 insertRecord 方法向时序 root.sg.a 中插入数据 + +```Java +// session 插入 +String deviceId = "root.sg"; +List measurements = new ArrayList<>(); +measurements.add("a"); +String[] values = new String[]{"\\\\", "\\t", "\\\"", "\\u96d5"}; +for(int i = 0; i <= values.length; i++){ + List valueList = new ArrayList<>(); + valueList.add(values[i]); + session.insertRecord(deviceId, i + 1, measurements, valueList); + } +``` + +查询 root.sg.a 的数据,可以看到没有做反转义处理: + +```Plain%20Text +// 查询结果 ++-----------------------------+---------+ +| Time|root.sg.a| ++-----------------------------+---------+ +|1970-01-01T08:00:00.001+08:00| \\| +|1970-01-01T08:00:00.002+08:00| \t| +|1970-01-01T08:00:00.003+08:00| \"| +|1970-01-01T08:00:00.004+08:00| \u96d5| ++-----------------------------+---------+ +``` + +而使用 SQL 向 root.sg.a 中插入数据 + +```SQL +# SQL 插入 +insert into root.sg(time, a) values(1, "\\") +insert into root.sg(time, a) values(2, "\t") +insert into root.sg(time, a) values(3, "\"") +insert into root.sg(time, a) values(4, "\u96d5") +``` + +查询 root.sg.a 的数据,可以看到字符串进行了反转义: + +```Plain%20Text +// 查询结果 ++-----------------------------+---------+ +| Time|root.sg.a| ++-----------------------------+---------+ +|1970-01-01T08:00:00.001+08:00| \| +|1970-01-01T08:00:00.002+08:00| | +|1970-01-01T08:00:00.003+08:00| "| +|1970-01-01T08:00:00.004+08:00| 雕| ++-----------------------------+---------+ +``` + ## 字面值常量 该部分对 IoTDB 中支持的字面值常量进行说明,包括字符串常量、数值型常量、时间戳常量、布尔型常量和空值。 ### 字符串常量 -字符串是由单引号(`'`)或双引号(`"`)字符括起来的字符序列。示例如下: +> 我们参照了 MySQL 对 字符串的定义:A string is a sequence of bytes or characters, enclosed within either single quote (`'`) or double quote (`"`) characters. + +MySQL 对字符串的定义可以参考:[MySQL :: MySQL 8.0 Reference Manual :: 9.1.1 String Literals](https://dev.mysql.com/doc/refman/8.0/en/string-literals.html) + +即在 IoTDB 中,字符串是由**单引号(`'`)或双引号(`"`)字符括起来的字符序列**。示例如下: ```Plain%20Text 'a string' "another string" ``` -除文件路径以外,我们会对字符串常量做反转义处理,具体使用可以参考使用场景中的示例。 - -转义字符可以参考链接:[Characters (The Java™ Tutorials > Learning the Java Language > Numbers and Strings)](https://docs.oracle.com/javase/tutorial/java/data/characters.html) - #### 使用场景 - `INSERT` 或者 `SELECT` 中用于表达 `TEXT` 类型数据的场景。 @@ -47,20 +178,19 @@ insert into root.ln.wf02.wt02(timestamp,hardware) values(1, 'v1') insert into root.ln.wf02.wt02(timestamp,hardware) values(2, '\\') - # 查询 root.ln.wf02.wt02的数据,结果如下,可以看到\\被转义为了\ +-----------------------------+--------------------------+ | Time|root.ln.wf02.wt02.hardware| +-----------------------------+--------------------------+ |1970-01-01T08:00:00.001+08:00| v1| +-----------------------------+--------------------------+ - |1970-01-01T08:00:00.002+08:00| \| + |1970-01-01T08:00:00.002+08:00| \\| +-----------------------------+--------------------------+ # select 示例 select code from root.sg1.d1 where code in ('string1', 'string2'); ``` - -- `LOAD` / `REMOVE` / `SETTLE` 指令中的文件路径。由于windows系统使用反斜杠\作为路径分隔符,文件路径我们不会做反转义处理。 + +- `LOAD` / `REMOVE` / `SETTLE` 指令中的文件路径。 ```SQL # load 示例 @@ -76,7 +206,7 @@ - 用户密码。 ```SQL - # 示例,'write_pwd'即为用户密码 + # 示例,write_pwd 即为用户密码 CREATE USER ln_write_user 'write_pwd' ``` @@ -108,73 +238,13 @@ +-----------------------------+-----------|-----+ ``` -- 用于表示键值对,键值对的键可以被定义成字符串或者标识符,键值对的值可以被定义成常量(包括字符串)或者标识符,更推荐将键值对表示为字符串。示例如下: - - 1. 触发器中表示触发器属性的键值对。参考示例语句中 WITH 后的属性键值对。 - - ```SQL - # 示例 - CREATE TRIGGER `alert-listener-sg1d1s1` - AFTER INSERT - ON root.sg1.d1.s1 - AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' - WITH ( - 'lo' = '0', - 'hi' = '100.0' - ) - ``` - - 2. UDF 中函数输入参数中的属性键值对。参考示例语句中 SELECT 子句中的属性键值对。 - - ```SQL - # 示例 - SELECT example(s1, s2, 'key1'='value1', 'key2'='value2') FROM root.sg.d1; - ``` - - 3. 时间序列中用于表示标签和属性的键值对。 - - ```SQL - # 创建时间序列时设定标签和属性 - CREATE timeseries root.turbine.d1.s1(temprature) - WITH datatype=FLOAT, encoding=RLE, compression=SNAPPY, 'max_point_number' = '5' - TAGS('tag1' = 'v1', 'tag2'= 'v2') ATTRIBUTES('attr1' = 'v1', 'attr2' = 'v2') - - # 修改时间序列的标签和属性 - ALTER timeseries root.turbine.d1.s1 SET 'newTag1' = 'newV1', 'attr1' = 'newV1' - - # 修改标签名 - ALTER timeseries root.turbine.d1.s1 RENAME 'tag1' TO 'newTag1' - - # 插入别名、标签、属性 - ALTER timeseries root.turbine.d1.s1 UPSERT - ALIAS='newAlias' TAGS('tag2' = 'newV2', tag3=v3) ATTRIBUTES('attr3' ='v3', 'attr4'='v4') - - # 添加新的标签 - ALTER timeseries root.turbine.d1.s1 ADD TAGS 'tag3' = 'v3', 'tag4' = 'v4' - - # 添加新的属性 - ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES 'attr3' = 'v3', 'attr4' = 'v4' - - # 查询符合条件的时间序列信息 - SHOW timeseries root.ln.** WHRER 'unit' = 'c' - ``` - - 4. 创建 Pipe 以及 PipeSink 时表示属性的键值对。 - - ```SQL - # 创建 PipeSink 时表示属性 - CREATE PIPESINK my_iotdb AS IoTDB ('ip' = '输入你的IP') - - # 创建 Pipe 时在 WITH 子句中表示属性 - CREATE PIPE my_pipe TO my_iotdb FROM - (select ** from root WHERE time>=yyyy-mm-dd HH:MM:SS) WITH 'SyncDelOp' = 'true' - ``` +- 用于表示键值对,键值对的键和值可以被定义成常量(包括字符串)或者标识符,具体请参考键值对章节。 #### 如何在字符串内使用引号 - 在单引号引起的字符串内,双引号无需特殊处理。同理,在双引号引起的字符串内,单引号无需特殊处理。 -- 在引号前使用转义符 (\)。 +- 在引号前使用转义符 (\\)。 - 在单引号引起的字符串里,可以通过双写单引号来表示一个单引号,即单引号 ' 可以表示为 ''。 @@ -228,44 +298,43 @@ ### 使用场景 -在 IoTDB 中,触发器名称、UDF函数名、元数据模板名称、用户与角色名、连续查询标识、Pipe、PipeSink、键值对中的键和值、别名等被称为标识符。 +在 IoTDB 中,触发器名称、UDF函数名、元数据模板名称、用户与角色名、连续查询标识、Pipe、PipeSink、键值对中的键和值、别名等可以作为标识符。 ### 约束 请注意,此处约束是标识符的通用约束,具体标识符可能还附带其它约束条件,如用户名限制字符数大于等于4,更严格的约束请参考具体标识符相关的说明文档。 -标识符命名有以下约束: +**标识符命名有以下约束:** - 不使用反引号括起的标识符中,允许出现以下字符: - - - [ 0-9 a-z A-Z _ : @ # $ { } ] (字母,数字,部分特殊字符) - + - [ 0-9 a-z A-Z _ ] (字母,数字,下划线) - ['\u2E80'..'\u9FFF'] (UNICODE 中文字符) - + - 标识符允许使用数字开头、不使用反引号括起的标识符不能全部为数字。 - 标识符是大小写敏感的。 -如果出现如下情况,标识符需要使用反引号进行引用: +- 标识符允许为关键字。 + +**如果出现如下情况,标识符需要使用反引号进行引用:** - 标识符包含不允许的特殊字符。 -- 标识符为系统关键字。 - 标识符为纯数字。 ### 如何在反引号引起的标识符中使用引号 -在反引号引起的标识符中可以直接使用单引号和双引号。 +**在反引号引起的标识符中可以直接使用单引号和双引号。** -在用反引号引用的标识符中,可以通过双写反引号的方式使用反引号,即 ` 可以表示为 ``,示例如下: +**在用反引号引用的标识符中,可以通过双写反引号的方式使用反引号,即 ` 可以表示为 ``**,示例如下: ```SQL -# 创建模板 t1't"t -create schema template `t1't"t` -(temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) - # 创建模板 t1`t create schema template `t1``t` (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) + +# 创建模板 t1't"t +create schema template `t1't"t` +(temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` ### 特殊情况示例 @@ -289,14 +358,14 @@ create schema template `t1``t` - UDF 名称出现上述特殊情况时需使用反引号引用: ```sql - # 创建名为 select 的 UDF,select 为系统关键字,所以需要用反引号引用 - CREATE FUNCTION `select` AS 'org.apache.iotdb.udf.UDTFExample' + # 创建名为 111 的 UDF,111 为纯数字,所以需要用反引号引用。 + CREATE FUNCTION `111` AS 'org.apache.iotdb.udf.UDTFExample' ``` - 元数据模板名称出现上述特殊情况时需使用反引号引用: ```sql - # 创建名为 111 的元数据模板,111 为纯数字,需要用反引号引用 + # 创建名为 111 的元数据模板,111 为纯数字,需要用反引号引用。 create schema template `111` (temperature FLOAT encoding=RLE, status BOOLEAN encoding=PLAIN compression=SNAPPY) ``` @@ -307,8 +376,8 @@ create schema template `t1``t` # 创建用户 special`user. CREATE USER `special``user.` 'write_pwd' - # 创建角色 `select` - CREATE ROLE `select` + # 创建角色 111 + CREATE ROLE `111` ``` - 连续查询标识出现上述特殊情况时需使用反引号引用: @@ -340,38 +409,13 @@ create schema template `t1``t` ```sql select s1 as temperature, s2 as speed from root.ln.wf01.wt01; # 表头如下所示 - +-----------------------------+-----------|-----+ + +-----------------------------+-----------+-----+ | Time|temperature|speed| - +-----------------------------+-----------|-----+ + +-----------------------------+-----------+-----+ ``` -- 用于表示键值对,键值对的键可以被定义成字符串或者标识符,键值对的值可以被定义成常量(包括字符串)或者标识符,更推荐将键值对表示为字符串。键值对的使用范围和字符串常量中提到的一致,下面以时间序列中用于表示标签和属性的键值对作为示例: +- 用于表示键值对,键值对的键和值可以被定义成常量(包括字符串)或者标识符,具体请参考键值对章节。 - ```SQL - # 创建时间序列时设定标签和属性 - CREATE timeseries root.turbine.d1.s1(temprature) - WITH datatype=FLOAT, encoding=RLE, compression=SNAPPY, max_point_number = 5 - TAGS(tag1 = v1, tag2= v2) ATTRIBUTES(attr1 = v1, attr2 = v2) - - # 修改时间序列的标签和属性 - ALTER timeseries root.turbine.d1.s1 SET newTag1 = newV1, attr1 = newV1 - - # 修改标签名 - ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 - - # 插入别名、标签、属性 - ALTER timeseries root.turbine.d1.s1 UPSERT - ALIAS = newAlias TAGS(tag2 = newV2, tag3 = v3) ATTRIBUTES(attr3 = v3, attr4 = v4) - - # 添加新的标签 - ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3 = v3, tag4 = v4 - - # 添加新的属性 - ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3 = v3, attr4 = v4 - - # 查询符合条件的时间序列信息 - SHOW timeseries root.ln.** WHRER unit = c - ``` ## 路径结点名 @@ -386,7 +430,7 @@ create schema template `t1``t` 由于通配符 * 在查询表达式中也可以表示乘法符号,下述例子用于帮助您区分两种情况: ```SQL -# 创建时间序列 root.sg.a*b +# 创建时间序列 root.sg.`a*b` create timeseries root.sg.`a*b` with datatype=FLOAT,encoding=PLAIN; # 请注意,如标识符部分所述,a*b包含特殊字符,需要用``括起来使用 # create timeseries root.sg.a*b with datatype=FLOAT,encoding=PLAIN 是错误用法 @@ -397,7 +441,7 @@ create timeseries root.sg.a with datatype=FLOAT,encoding=PLAIN; # 创建时间序列 root.sg.b create timeseries root.sg.b with datatype=FLOAT,encoding=PLAIN; -# 查询时间序列 root.sg.a*b +# 查询时间序列 root.sg.`a*b` select `a*b` from root.sg # 其结果集表头为 |Time|root.sg.a*b| @@ -410,9 +454,7 @@ select a*b from root.sg ### 标识符 -路径结点名不为通配符时,使用方法和标识符一致。 - -使用反引号引起的路径结点名,若其中含有特殊字符 . `,在结果集中展示时会添加反引号,其它情况下会正常展示,具体请参考特殊情况示例中结果集的示例。 +路径结点名不为通配符时,使用方法和标识符一致。**在 SQL 中需要使用反引号引用的路径结点,在结果集中也会用反引号引起。** 需要使用反引号进行引用的部分特殊情况示例: @@ -422,9 +464,6 @@ select a*b from root.sg # 路径结点名中包含特殊字符,时间序列各结点为["root","sg","www.`baidu.com"] create timeseries root.sg.`www.``baidu.com`.a with datatype=FLOAT,encoding=PLAIN; -# 路径结点名为系统关键字 -create timeseries root.sg.`select`.a with datatype=FLOAT,encoding=PLAIN; - # 路径结点名为纯数字 create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN; ``` @@ -435,8 +474,7 @@ create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN; +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ | timeseries|alias|storage group|dataType|encoding|compression|tags|attributes| +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ -| root.sg.select.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| -| root.sg.111.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| +| root.sg.`111`.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| |root.sg.`www.``baidu.com`.a| null| root.sg| FLOAT| PLAIN| SNAPPY|null| null| +---------------------------+-----+-------------+--------+--------+-----------+----+----------+ ``` @@ -444,12 +482,9 @@ create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN; - 插入数据时,如下情况需要使用反引号对特殊节点名进行引用: ```SQL -# 路径结点名中包含特殊字符 . 和 ` +# 路径结点名中包含特殊字符 insert into root.sg.`www.``baidu.com`(timestamp, a) values(1, 2); -# 路径结点名为系统关键字 -insert into root.sg.`select`(timestamp, a) values (1, 2); - # 路径结点名为纯数字 insert into root.sg(timestamp, `111`) values (1, 2); ``` @@ -457,12 +492,9 @@ insert into root.sg(timestamp, `111`) values (1, 2); - 查询数据时,如下情况需要使用反引号对特殊节点名进行引用: ```SQL -# 路径结点名中包含特殊字符 . 和 ` +# 路径结点名中包含特殊字符 select a from root.sg.`www.``baidu.com`; -# 路径结点名为系统关键字 -select a from root.sg.`select` - # 路径结点名为纯数字 select `111` from root.sg ``` @@ -477,27 +509,218 @@ select `111` from root.sg |1970-01-01T08:00:00.001+08:00| 2.0| +-----------------------------+---------------------------+ -# select a from root.sg.`select` 结果集 -+-----------------------------+----------------+ -| Time|root.sg.select.a| -+-----------------------------+----------------+ -|1970-01-01T08:00:00.001+08:00| 2.0| -+-----------------------------+----------------+ - # select `111` from root.sg 结果集 -+-----------------------------+-----------+ -| Time|root.sg.111| -+-----------------------------+-----------+ -|1970-01-01T08:00:00.001+08:00| 2.0| -+-----------------------------+-----------+ ++-----------------------------+-------------+ +| Time|root.sg.`111`| ++-----------------------------+-------------+ +|1970-01-01T08:00:00.001+08:00| 2.0| ++-----------------------------+-------------+ +``` + +## 键值对 + +**键值对的键和值可以被定义为标识符或者常量。** + +下面将介绍键值对的使用场景。 + +- 触发器中表示触发器属性的键值对。参考示例语句中 WITH 后的属性键值对。 + +```SQL +# 以字符串形式表示键值对 +CREATE TRIGGER `alert-listener-sg1d1s1` +AFTER INSERT +ON root.sg1.d1.s1 +AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' +WITH ( + 'lo' = '0', + 'hi' = '100.0' +) + +# 以标识符和常量形式表示键值对 +CREATE TRIGGER `alert-listener-sg1d1s1` +AFTER INSERT +ON root.sg1.d1.s1 +AS 'org.apache.iotdb.db.engine.trigger.example.AlertListener' +WITH ( + lo = 0, + hi = 100.0 +) +``` + +- 时间序列中用于表示标签和属性的键值对。 + +```sql +# 创建时间序列时设定标签和属性,用字符串来表示键值对。 +CREATE timeseries root.turbine.d1.s1(temprature) +WITH datatype = FLOAT, encoding = RLE, compression = SNAPPY, 'max_point_number' = '5' +TAGS('tag1' = 'v1', 'tag2'= 'v2') ATTRIBUTES('attr1' = 'v1', 'attr2' = 'v2') + +# 创建时间序列时设定标签和属性,用标识符和常量来表示键值对。 +CREATE timeseries root.turbine.d1.s1(temprature) +WITH datatype = FLOAT, encoding = RLE, compression = SNAPPY, max_point_number = 5 +TAGS(tag1 = v1, tag2 = v2) ATTRIBUTES(attr1 = v1, attr2 = v2) +``` + +```sql +# 修改时间序列的标签和属性 +ALTER timeseries root.turbine.d1.s1 SET 'newTag1' = 'newV1', 'attr1' = 'newV1' + +ALTER timeseries root.turbine.d1.s1 SET newTag1 = newV1, attr1 = newV1 +``` + +```sql +# 修改标签名 +ALTER timeseries root.turbine.d1.s1 RENAME 'tag1' TO 'newTag1' + +ALTER timeseries root.turbine.d1.s1 RENAME tag1 TO newTag1 +``` + +```sql +# 插入别名、标签、属性 +ALTER timeseries root.turbine.d1.s1 UPSERT +ALIAS='newAlias' TAGS('tag2' = 'newV2', 'tag3' = 'v3') ATTRIBUTES('attr3' ='v3', 'attr4'='v4') + +ALTER timeseries root.turbine.d1.s1 UPSERT +ALIAS = newAlias TAGS(tag2 = newV2, tag3 = v3) ATTRIBUTES(attr3 = v3, attr4 = v4) +``` + +```sql +# 添加新的标签 +ALTER timeseries root.turbine.d1.s1 ADD TAGS 'tag3' = 'v3', 'tag4' = 'v4' + +ALTER timeseries root.turbine.d1.s1 ADD TAGS tag3 = v3, tag4 = v4 +``` + +```sql +# 添加新的属性 +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES 'attr3' = 'v3', 'attr4' = 'v4' + +ALTER timeseries root.turbine.d1.s1 ADD ATTRIBUTES attr3 = v3, attr4 = v4 +``` + +```sql +# 查询符合条件的时间序列信息 +SHOW timeseries root.ln.** WHRER 'unit' = 'c' + +SHOW timeseries root.ln.** WHRER unit = c +``` + +- 创建 Pipe 以及 PipeSink 时表示属性的键值对。 + +```SQL +# 创建 PipeSink 时表示属性 +CREATE PIPESINK my_iotdb AS IoTDB ('ip' = '输入你的IP') + +# 创建 Pipe 时在 WITH 子句中表示属性 +CREATE PIPE my_pipe TO my_iotdb FROM +(select ** from root WHERE time>=yyyy-mm-dd HH:MM:SS) WITH 'SyncDelOp' = 'true' ``` ## 关键字和保留字 -关键字是在 SQL 具有特定含义的词,不能直接用于标识符,需要使用反引号进行转义。保留字是关键字的一个子集,保留字不能用于标识符(即使进行了转义)。 +关键字是在 SQL 具有特定含义的词,可以作为标识符。保留字是关键字的一个子集,保留字不能用于标识符。 关于 IoTDB 的关键字和保留字列表,可以查看 [关键字和保留字](https://iotdb.apache.org/zh/UserGuide/Master/Reference/Keywords.html) 。 +## Session、TsFile API + +在使用Session、TsFIle API时,如果您调用的方法需要以字符串形式传入物理量(measurement)、设备(device)、存储组(storage group)、路径(path)等参数,**请保证所传入字符串与使用 SQL 语句时的写法一致**,下面是一些帮助您理解的例子。具体代码示例可以参考:`example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java` + +1. 以创建时间序列 createTimeseries 为例: + +```Java +public void createTimeseries( + String path, + TSDataType dataType, + TSEncoding encoding, + CompressionType compressor) + throws IoTDBConnectionException, StatementExecutionException; +``` + +如果您希望创建时间序列 root.sg.a,root.sg.\`a.\`\`"b\`,root.sg.\`111\`,您使用的 SQL 语句应该如下所示: + +```SQL +create timeseries root.sg.a with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; + +# 路径结点名中包含特殊字符,时间序列各结点为["root","sg","a.`\"b"] +create timeseries root.sg.`a.``"b` with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; + +# 路径结点名为纯数字 +create timeseries root.sg.`111` with datatype=FLOAT,encoding=PLAIN,compressor=SNAPPY; +``` + +您在调用 createTimeseries 方法时,应该按照如下方法赋值 path 字符串,保证 path 字符串内容与使用 SQL 时一致: + +```Java +// 时间序列 root.sg.a +String path = "root.sg.a"; + +// 时间序列 root.sg.`a``"b` +String path = "root.sg.`a``\"b`"; + +// 时间序列 root.sg.`111` +String path = "root.sg.`111`"; +``` + +2. 以插入数据 insertRecord 为例: + +```Java +public void insertRecord( + String deviceId, + long time, + List measurements, + List types, + Object... values) + throws IoTDBConnectionException, StatementExecutionException; +``` + +如果您希望向时间序列 root.sg.a,root.sg.\`a.\`\`"b\`,root.sg.\`111\`中插入数据,您使用的 SQL 语句应该如下所示: + +```SQL +insert into root.sg(timestamp, a, `a.``"b`, `111`) values (1, 2, 2, 2); +``` + +您在调用 insertRecord 方法时,应该按照如下方法赋值 deviceId 和 measurements: + +```Java +// deviceId 为 root.sg +String deviceId = "root.sg"; + +// measurements +String[] measurements = new String[]{"a", "`a.``\"b`", "`111`"}; +List measurementList = Arrays.asList(measurements); +``` + +3. 以查询数据 executeRawDataQuery 为例: + +```Java +public SessionDataSet executeRawDataQuery( + List paths, + long startTime, + long endTime) + throws StatementExecutionException, IoTDBConnectionException; +``` + +如果您希望查询时间序列 root.sg.a,root.sg.\`a.\`\`"b\`,root.sg.\`111\`的数据,您使用的 SQL 语句应该如下所示: + +```SQL +select a from root.sg + +# 路径结点名中包含特殊字符 +select `a.``"b` from root.sg; + +# 路径结点名为纯数字 +select `111` from root.sg +``` + +您在调用 executeRawDataQuery 方法时,应该按照如下方法赋值 paths: + +```Java +// paths +String[] paths = new String[]{"root.sg.a", "root.sg.`a.``\"b`", "root.sg.`111`"}; +List pathList = Arrays.asList(paths); +``` + ## 了解更多 请阅读代码仓库中的词法和语法描述文件: diff --git a/docs/zh/UserGuide/Reference/TSDB-Comparison.md b/docs/zh/UserGuide/Reference/TSDB-Comparison.md index 75c1928bae1c..6bee5662f489 100644 --- a/docs/zh/UserGuide/Reference/TSDB-Comparison.md +++ b/docs/zh/UserGuide/Reference/TSDB-Comparison.md @@ -102,8 +102,8 @@ Prometheus 和 Druid 也因为时间序列数据管理而闻名,但是 Prometh - *SQL-like*: - - IoTDB 和 InfluxDB 支持 SQL-like 语言。另外,IoTDB 和 Calcite 的集成几乎完成(PR 已经提交),这意味着 IoTDB 很快就能支持标准 SQL。 - - OpenTSDB 和 KairosDB 只支持 Rest API。IoTDB 也支持 Rest API(PR 已经提交)。 + - IoTDB 和 InfluxDB 支持 SQL-like 语言。 + - OpenTSDB 和 KairosDB 只支持 Rest API。IoTDB 也支持 Rest API。 - TimescaleDB 使用的是和 PostgreSQL 一样的 SQL。 - *Schema*: @@ -233,9 +233,6 @@ Prometheus 和 Druid 也因为时间序列数据管理而闻名,但是 Prometh 现在只有 IoTDB 支持了 JDBC driver(虽然不是所有接口都实现),这使得 IoTDB 可以整合许多其它的基于 JDBC driver 的软件。 -- *Standard SQL*: - - 正如之前提到的,IoTDB 和 Calcite 的集成几乎完成(PR 已经提交),这意味着 IoTDB 很快就能支持标准 SQL。 - *Spark and Hive integration*: diff --git a/docs/zh/UserGuide/UDF-Library/String-Processing.md b/docs/zh/UserGuide/UDF-Library/String-Processing.md index 2b5fec4e8306..4ff0d7c2d687 100644 --- a/docs/zh/UserGuide/UDF-Library/String-Processing.md +++ b/docs/zh/UserGuide/UDF-Library/String-Processing.md @@ -21,6 +21,580 @@ # 字符串处理 +## Length + +### 函数简介 + +本函数用于获取输入序列的长度。 + +**函数名:** LENGTH + +**输入序列:** 仅支持单个输入序列,类型为 TEXT。 + +**输出序列:** 输出单个序列,类型为 INT32。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, length(s1) from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+----------------------+ +| Time|root.sg1.d1.s1|length(root.sg1.d1.s1)| ++-----------------------------+--------------+----------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 6| +|1970-01-01T08:00:00.002+08:00| 22test22| 8| ++-----------------------------+--------------+----------------------+ +``` + +## StrLocate + +### 函数简介 + +本函数用于获取`target`子串第一次出现在输入序列的位置,如果输入序列中不包含`target`则返回 -1 。 + +**函数名:** LENGTH + +**输入序列:** 仅支持单个输入序列,类型为 TEXT。 + +**参数:** + ++ `target`: 需要被定位的子串。 ++ `reverse`: 指定是否需要倒序定位,默认值为`false`, 即从左至右定位。 + +**输出序列:** 输出单个序列,类型为INT32。 + +**提示:** 下标从 0 开始。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, locate(s1, "target"="1") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+------------------------------------+ +| Time|root.sg1.d1.s1|locate(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 0| +|1970-01-01T08:00:00.002+08:00| 22test22| -1| ++-----------------------------+--------------+------------------------------------+ +``` + +另一个用于查询的 SQL 语句: + +```sql +select s1, locate(s1, "target"="1", "reverse"="true") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+------------------------------------------------------+ +| Time|root.sg1.d1.s1|locate(root.sg1.d1.s1, "target"="1", "reverse"="true")| ++-----------------------------+--------------+------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 5| +|1970-01-01T08:00:00.002+08:00| 22test22| -1| ++-----------------------------+--------------+------------------------------------------------------+ +``` + +## StartsWith + +### 函数简介 + +本函数用于判断输入序列是否有指定前缀。 + +**函数名:** STARTSWITH + +**输入序列:** 仅支持单个输入序列,类型为 TEXT。 + +**参数:** ++ `target`: 需要匹配的前缀。 + +**输出序列:** 输出单个序列,类型为 BOOLEAN。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, startswith(s1, "target"="1") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+----------------------------------------+ +| Time|root.sg1.d1.s1|startswith(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+----------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| true| +|1970-01-01T08:00:00.002+08:00| 22test22| false| ++-----------------------------+--------------+----------------------------------------+ +``` + +## EndsWith + +### 函数简介 + +本函数用于判断输入序列是否有指定后缀。 + +**函数名:** ENDSWITH + +**输入序列:** 仅支持单个输入序列,类型为 TEXT。 + +**参数:** ++ `target`: 需要匹配的后缀。 + +**输出序列:** 输出单个序列,类型为 BOOLEAN。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, endswith(s1, "target"="1") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+--------------------------------------+ +| Time|root.sg1.d1.s1|endswith(root.sg1.d1.s1, "target"="1")| ++-----------------------------+--------------+--------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| true| +|1970-01-01T08:00:00.002+08:00| 22test22| false| ++-----------------------------+--------------+--------------------------------------+ +``` + +## Concat + +### 函数简介 + +本函数用于拼接输入序列和`target`字串。 + +**函数名:** CONCAT + +**输入序列:** 至少一个输入序列,类型为 TEXT。 + +**参数:** ++ `targets`: 一系列 K-V, key需要以`target`为前缀且不重复, value是待拼接的字符串。 ++ `series_behind`: 指定拼接时时间序列是否在后面,默认为`false`。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** ++ 如果输入序列是NULL, 跳过该序列的拼接。 ++ 函数只能将输入序列和`targets`区分开各自拼接。`concat(s1, "target1"="IoT", s2, "target2"="DB")`和 + `concat(s1, s2, "target1"="IoT", "target2"="DB")`得到的结果是一样的。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+--------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2| ++-----------------------------+--------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| ++-----------------------------+--------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|concat(root.sg1.d1.s1, root.sg1.d1.s2, "target1"="IoT", "target2"="DB")| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| 1test1IoTDB| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| 22test222222testIoTDB| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------+ +``` + +另一个用于查询的 SQL 语句: + +```sql +select s1, s2, concat(s1, s2, "target1"="IoT", "target2"="DB", "series_behind"="true") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|concat(root.sg1.d1.s1, root.sg1.d1.s2, "target1"="IoT", "target2"="DB", "series_behind"="true")| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| IoTDB1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| IoTDB22test222222test| ++-----------------------------+--------------+--------------+-----------------------------------------------------------------------------------------------+ +``` + +## Substr + +### 函数简介 + +本函数用于获取下标从`start`到`end - 1`的子串 + +**函数名:** SUBSTR + +**输入序列:** 仅支持单个输入序列,类型为TEXT。 + +**参数:** ++ `start`: 指定子串开始下标。 ++ `end`: 指定子串结束下标。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, substr(s1, "start"="0", "end"="2") from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+----------------------------------------------+ +| Time|root.sg1.d1.s1|substr(root.sg1.d1.s1, "start"="0", "end"="2")| ++-----------------------------+--------------+----------------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 1t| +|1970-01-01T08:00:00.002+08:00| 22test22| 22| ++-----------------------------+--------------+----------------------------------------------+ +``` + +## Upper + +### 函数简介 + +本函数用于将输入序列转化为大写。 + +**函数名:** UPPER + +**输入序列:** 仅支持单个输入序列,类型为TEXT。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| +|1970-01-01T08:00:00.002+08:00| 22test22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, upper(s1) from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+---------------------+ +| Time|root.sg1.d1.s1|upper(root.sg1.d1.s1)| ++-----------------------------+--------------+---------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| 1TEST1| +|1970-01-01T08:00:00.002+08:00| 22test22| 22TEST22| ++-----------------------------+--------------+---------------------+ +``` + +## Lower + +### 函数简介 + +本函数用于将输入序列转换为小写。 + +**函数名:** LOWER + +**输入序列:** 仅支持单个输入序列,类型为TEXT。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s1| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1TEST1| +|1970-01-01T08:00:00.002+08:00| 22TEST22| ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, lower(s1) from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+---------------------+ +| Time|root.sg1.d1.s1|lower(root.sg1.d1.s1)| ++-----------------------------+--------------+---------------------+ +|1970-01-01T08:00:00.001+08:00| 1TEST1| 1test1| +|1970-01-01T08:00:00.002+08:00| 22TEST22| 22test22| ++-----------------------------+--------------+---------------------+ +``` + +## Trim + +### 函数简介 + +本函数用于移除输入序列前后的空格。 + +**函数名:** TRIM + +**输入序列:** 仅支持单个输入序列,类型为TEXT。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** 如果输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+ +| Time|root.sg1.d1.s3| ++-----------------------------+--------------+ +|1970-01-01T08:00:00.002+08:00| 3querytest3| +|1970-01-01T08:00:00.003+08:00| 3querytest3 | ++-----------------------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s3, trim(s3) from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+--------------------+ +| Time|root.sg1.d1.s3|trim(root.sg1.d1.s3)| ++-----------------------------+--------------+--------------------+ +|1970-01-01T08:00:00.002+08:00| 3querytest3| 3querytest3| +|1970-01-01T08:00:00.003+08:00| 3querytest3 | 3querytest3| ++-----------------------------+--------------+--------------------+ +``` + +## StrCmp + +### 函数简介 + +本函数用于比较两个输入序列。 如果值相同返回 `0` , 序列1的值小于序列2的值返回一个`负数`,序列1的值大于序列2的值返回一个`正数`。 + +**函数名:** StrCmp + +**输入序列:** 输入两个序列,类型均为 TEXT。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +**提示:** 如果任何一个输入是NULL,返回NULL。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+--------------+--------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2| ++-----------------------------+--------------+--------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| ++-----------------------------+--------------+--------------+ +``` + +用于查询的 SQL 语句: + +```sql +select s1, s2, strcmp(s1, s2) from root.sg1.d1 +``` + +输出序列: + +``` ++-----------------------------+--------------+--------------+--------------------------------------+ +| Time|root.sg1.d1.s1|root.sg1.d1.s2|strcmp(root.sg1.d1.s1, root.sg1.d1.s2)| ++-----------------------------+--------------+--------------+--------------------------------------+ +|1970-01-01T08:00:00.001+08:00| 1test1| null| null| +|1970-01-01T08:00:00.002+08:00| 22test22| 2222test| 66| ++-----------------------------+--------------+--------------+--------------------------------------+ +``` + +## StrReplace + +### 函数简介 + +本函数用于将文本中的子串替换为指定的字符串。 + +**函数名:** STRREPLACE + +**输入序列:** 仅支持单个输入序列,类型为 TEXT。 + +**参数:** + ++ `target`: 需要替换的字符子串 ++ `replace`: 替换后的字符串。 ++ `limit`: 替换次数,大于等于 -1 的整数,默认为 -1 表示所有匹配的子串都会被替换。 ++ `offset`: 需要跳过的匹配次数,即前`offset`次匹配到的字符子串并不会被替换,默认为 0。 ++ `reverse`: 是否需要反向计数,默认为 false 即按照从左向右的次序。 + +**输出序列:** 输出单个序列,类型为 TEXT。 + +### 使用示例 + +输入序列: + +``` ++-----------------------------+---------------+ +| Time|root.test.d1.s1| ++-----------------------------+---------------+ +|2021-01-01T00:00:01.000+08:00| A,B,A+,B-| +|2021-01-01T00:00:02.000+08:00| A,A+,A,B+| +|2021-01-01T00:00:03.000+08:00| B+,B,B| +|2021-01-01T00:00:04.000+08:00| A+,A,A+,A| +|2021-01-01T00:00:05.000+08:00| A,B-,B,B| ++-----------------------------+---------------+ +``` + +用于查询的 SQL 语句: + +```sql +select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 +``` + +输出序列: + +``` ++-----------------------------+-----------------------------------------+ +| Time|strreplace(root.test.d1.s1, "target"=",",| +| | "replace"="/", "limit"="2")| ++-----------------------------+-----------------------------------------+ +|2021-01-01T00:00:01.000+08:00| A/B/A+,B-| +|2021-01-01T00:00:02.000+08:00| A/A+/A,B+| +|2021-01-01T00:00:03.000+08:00| B+/B/B| +|2021-01-01T00:00:04.000+08:00| A+/A/A+,A| +|2021-01-01T00:00:05.000+08:00| A/B-/B,B| ++-----------------------------+-----------------------------------------+ +``` + +另一个用于查询的 SQL 语句: + +```sql +select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 +``` + +输出序列: + +``` ++-----------------------------+-----------------------------------------------------+ +| Time|strreplace(root.test.d1.s1, "target"=",", "replace"= | +| | "|", "limit"="1", "offset"="1", "reverse"="true")| ++-----------------------------+-----------------------------------------------------+ +|2021-01-01T00:00:01.000+08:00| A,B/A+,B-| +|2021-01-01T00:00:02.000+08:00| A,A+/A,B+| +|2021-01-01T00:00:03.000+08:00| B+/B,B| +|2021-01-01T00:00:04.000+08:00| A+,A/A+,A| +|2021-01-01T00:00:05.000+08:00| A,B-/B,B| ++-----------------------------+-----------------------------------------------------+ +``` + ## RegexMatch ### 函数简介 @@ -209,82 +783,4 @@ select regexsplit(s1, "regex"=",", "index"="3") from root.test.d1 |2021-01-01T00:00:04.000+08:00| A| |2021-01-01T00:00:05.000+08:00| B| +-----------------------------+-----------------------------------------------------+ -``` - -## StrReplace - -### 函数简介 - -本函数用于将文本中的子串替换为指定的字符串。 - -**函数名:** STRREPLACE - -**输入序列:** 仅支持单个输入序列,类型为 TEXT。 - -**参数:** - -+ `target`: 需要替换的字符子串 -+ `replace`: 替换后的字符串。 -+ `limit`: 替换次数,大于等于 -1 的整数,默认为 -1 表示所有匹配的子串都会被替换。 -+ `offset`: 需要跳过的匹配次数,即前`offset`次匹配到的字符子串并不会被替换,默认为 0。 -+ `reverse`: 是否需要反向计数,默认为 false 即按照从左向右的次序。 - -**输出序列:** 输出单个序列,类型为 TEXT。 - -### 使用示例 - -输入序列: - -``` -+-----------------------------+---------------+ -| Time|root.test.d1.s1| -+-----------------------------+---------------+ -|2021-01-01T00:00:01.000+08:00| A,B,A+,B-| -|2021-01-01T00:00:02.000+08:00| A,A+,A,B+| -|2021-01-01T00:00:03.000+08:00| B+,B,B| -|2021-01-01T00:00:04.000+08:00| A+,A,A+,A| -|2021-01-01T00:00:05.000+08:00| A,B-,B,B| -+-----------------------------+---------------+ -``` - -用于查询的 SQL 语句: - -```sql -select strreplace(s1, "target"=",", "replace"="/", "limit"="2") from root.test.d1 -``` - -输出序列: - -``` -+-----------------------------+-----------------------------------------+ -| Time|strreplace(root.test.d1.s1, "target"=",",| -| | "replace"="/", "limit"="2")| -+-----------------------------+-----------------------------------------+ -|2021-01-01T00:00:01.000+08:00| A/B/A+,B-| -|2021-01-01T00:00:02.000+08:00| A/A+/A,B+| -|2021-01-01T00:00:03.000+08:00| B+/B/B| -|2021-01-01T00:00:04.000+08:00| A+/A/A+,A| -|2021-01-01T00:00:05.000+08:00| A/B-/B,B| -+-----------------------------+-----------------------------------------+ -``` - -另一个用于查询的 SQL 语句: - -```sql -select strreplace(s1, "target"=",", "replace"="/", "limit"="1", "offset"="1", "reverse"="true") from root.test.d1 -``` - -输出序列: - -``` -+-----------------------------+-----------------------------------------------------+ -| Time|strreplace(root.test.d1.s1, "target"=",", "replace"= | -| | "|", "limit"="1", "offset"="1", "reverse"="true")| -+-----------------------------+-----------------------------------------------------+ -|2021-01-01T00:00:01.000+08:00| A,B/A+,B-| -|2021-01-01T00:00:02.000+08:00| A,A+/A,B+| -|2021-01-01T00:00:03.000+08:00| B+/B,B| -|2021-01-01T00:00:04.000+08:00| A+,A/A+,A| -|2021-01-01T00:00:05.000+08:00| A,B-/B,B| -+-----------------------------+-----------------------------------------------------+ ``` \ No newline at end of file diff --git a/docs/zh/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md b/docs/zh/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md index 3bf30d8b0e10..34a7e0f1f7f5 100644 --- a/docs/zh/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md +++ b/docs/zh/UserGuide/Write-And-Delete-Data/Load-External-Tsfile.md @@ -39,7 +39,7 @@ AUTOREGISTER 选项表示当待加载的 tsfile 文件中时间序列对应的元数据不存在时,用户可以选择是否自动创建 schema ,参数为 true 表示自动创建 schema,相反 false 表示不创建,缺省时默认创建 schema。 -SGLEVEL 选项,当 tsfile 对应的存储组不存在时,用户可以通过 sglevel 参数的值来制定存储组的级别,默认为`iotdb-engine.properties`中设置的级别。例如当设置 level 参数为1时表明此 tsfile 中所有时间序列中层级为1的前缀路径是存储组,即若存在设备 root.sg.d1.s1,此时 root.sg 被指定为存储组。 +SGLEVEL 选项,当 tsfile 对应的存储组不存在时,用户可以通过 sglevel 参数的值来制定存储组的级别,默认为`iotdb-datanode.properties`中设置的级别。例如当设置 level 参数为1时表明此 tsfile 中所有时间序列中层级为1的前缀路径是存储组,即若存在设备 root.sg.d1.s1,此时 root.sg 被指定为存储组。 VERIFY 选项表示是否对载入的 tsfile 中的所有时间序列进行元数据检查,默认为 true。开启时,若载入的 tsfile 中的时间序列在当前 iotdb 中也存在,则会比较该时间序列的所有 Measurement 的数据类型是否一致,如果出现不一致将会导致载入失败,关闭该选项会跳过检查,载入更快。 diff --git a/example/ext-pipe-plugin-example/README.md b/example/ext-pipe-plugin-example/README.md new file mode 100644 index 000000000000..ac40cbe98a80 --- /dev/null +++ b/example/ext-pipe-plugin-example/README.md @@ -0,0 +1,60 @@ + + +# How to develop 1 ext-pipe plugin? + +## 1. Create 1 new java project, add below maven dependency. + +```xml + + + external-pipe-api + org.apache.iotdb + 0.14.0-SNAPSHOT + + +``` + +## 2. Develop 2 class to implement below 2 java Interface. + +```java +IExternalPipeSinkWriterFactory +IExternalPipeSinkWriter +``` + +**Note:** Please refer to example codes in **example/ext-pipe-plugin-example** . + + +## 3. build project and get plugin's xxx.jar file + +```shell +xxx-jar-with-dependencies.jar +``` + + +## 4. install plugin's xxx.jar file to IoTDB + +```shell +mkdir -p ext/extPipe +cp xxx-jar-with-dependencies.jar ext/extPipe +nohup ./A/sbin/start-server.sh >/dev/null 2>&1 +``` + diff --git a/example/ext-pipe-plugin-example/pom.xml b/example/ext-pipe-plugin-example/pom.xml new file mode 100644 index 000000000000..a480eba04fd8 --- /dev/null +++ b/example/ext-pipe-plugin-example/pom.xml @@ -0,0 +1,59 @@ + + + + 4.0.0 + iotdb-ext-pipe-example + IoTDB-Ext-Pipe Examples + + 8 + 8 + + + + external-pipe-api + org.apache.iotdb + 0.14.0-SNAPSHOT + + + + + + maven-assembly-plugin + + + jar-with-dependencies + + + + + + single + + prepare-package + + + org.apache.maven.plugins + 3.1.0 + + + + diff --git a/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterFactory.java b/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterFactory.java new file mode 100644 index 000000000000..a78d0ae8072e --- /dev/null +++ b/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterFactory.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.extpipe; + +import org.apache.iotdb.pipe.external.api.IExternalPipeSinkWriter; +import org.apache.iotdb.pipe.external.api.IExternalPipeSinkWriterFactory; + +import java.util.Map; + +//== Command Format: +// 1) CREATE PIPESINK ${extSinkName} AS ${extSinkType} ( ${parameters} ) +// 2) CREATE PIPE ${pipeName} TO ${extSinkName} +// +//== Command Example: +// CREATE PIPESINK mySink1 AS mySink (address='http://129.168.1.1/abc', user='admin', passwd='admin123', project='project1', table='table1', thread_num='5', batch_size='1000'); +// CREATE PIPE pipe2mySink TO mySink1; +// +//== About Parameters in Command: Below 4 parameter keys are reserved and used by IotDB, +// thread_num : the number of IExternalPipeSinkWriter, default value 1. +// batch_size : the number of operations to get from pipe source each time for 1 thread, default value 100_000. +// attempt_times : the number of attempt times when 1 operation fails, default value 3. +// retry_interval : waiting interval(ms) before retry when operation fail, default value 1_000. +//== Other parameters keys can be customer-defined and used by ext-pipe plugin. +// such as address, user, project, table etc. + +public class ExtPipeSinkWriterFactory implements IExternalPipeSinkWriterFactory { + + private static final String PARAM_ADDRESS = "address"; + private static final String PARAM_USER = "user"; + private static final String PARAM_PASSWD = "passwd"; + private static final String PARAM_PROJECT = "project"; + private static final String PARAM_TABLE = "table"; + + private Map sinkParams; + + /** + * Return the provider info of current exe-pipe plugin. + * In current IoTDB, this information is not important. + * @return + */ + @Override + public String getProviderName() { + return "Company_ABC"; + } + + /** + * Get the External PIPE's type name. For example: If customer self-defined getExternalPipeType() + * return "mySink", corresponding input CMD should be "CREATE PIPESINK mySink1 AS mySink (...)". + * Otherwise, the CMD will be refused. * + * + * @return External PIPE s type name + */ + @Override + public String getExternalPipeType() { + return "mySink"; + } + + /** + * This method is used to validate the parameters in client CMD. For example: When customer + * input CMD: "CREATE PIPESINK mySink1 AS mySink (p1='111', p2='abc')", The parameters (p1=111, + * p2=abc) will be saved in sinkParams and then send it to validateSinkParams(sinkParams) for + * validation. If validateSinkParams() does not return Exception, the CMD will be processed. + * Otherwise, the CMD will be refused with prompt info that is from Exception.getMessage(); + * + * @param sinkParams Contains the parameters in CMD "CREATE PIPESINK ..." + * @return true means successful + * @throws Exception + */ + @Override + public void validateSinkParams(Map sinkParams) throws Exception { + //== Check whether mandatory parameters are enough + if (!sinkParams.containsKey(PARAM_ADDRESS)) { + throw new Exception("Need attribute: " + PARAM_ADDRESS); + } else if (!sinkParams.containsKey(PARAM_USER)) { + throw new Exception("Need attribute: " + PARAM_USER); + } else if (!sinkParams.containsKey(PARAM_PASSWD)) { + throw new Exception("Need attribute: " + PARAM_PASSWD); + } else if (!sinkParams.containsKey(PARAM_PROJECT)) { + throw new Exception("Need attribute: " + PARAM_PROJECT); + } else if (!sinkParams.containsKey(PARAM_TABLE)) { + throw new Exception("Need attribute: " + PARAM_TABLE); + } + + //== Here, you may add other checking. Such as, checking whether remote address can be accessed etc. + //... + } + + /** + * After IoTDB create IExternalPipeSinkWriterFactory instance, + * IoTDB will call this method to let IExternalPipeSinkWriterFactory finish some init work. + * + * @param sinkParams Contains the parameters in CMD "CREATE PIPESINK ..." + */ + @Override + public void initialize(Map sinkParams) throws Exception { + this.sinkParams = sinkParams; + + try { + //== If need, may check input parameters again. + validateSinkParams(sinkParams); + //== Here, do init work of IExternalPipeSinkWriterFactory instance. + // ... + } catch (Exception e) { + //LOGGER.error("Failed to init extPipeSink ..." , e); + throw e; + } + } + + /** + * Get 1 IExternalPipeSinkWriter instance who will occupy 1 thread to run. + * @return + */ + @Override + public IExternalPipeSinkWriter get() { + return new ExtPipeSinkWriterImpl(sinkParams); + } +} diff --git a/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterImpl.java b/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterImpl.java new file mode 100644 index 000000000000..431a585a1ad8 --- /dev/null +++ b/example/ext-pipe-plugin-example/src/main/java/org/apache/iotdb/extpipe/ExtPipeSinkWriterImpl.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.extpipe; + +import org.apache.iotdb.pipe.external.api.DataType; +import org.apache.iotdb.pipe.external.api.ExternalPipeSinkWriterStatus; +import org.apache.iotdb.pipe.external.api.IExternalPipeSinkWriter; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class ExtPipeSinkWriterImpl implements IExternalPipeSinkWriter { + private Map sinkParams; + //private ExtSession extSession; //maintain the connect to ext DB system + private long startTime; + + public ExtPipeSinkWriterImpl(Map sinkParams) { + this.sinkParams= sinkParams; + } + + /** + * IoTDB call this method to initialize 1 IExternalPipeSinkWriter instance. + */ + @Override + public void open() { + //== Use the parameters in sinkParams to start 1 session to ext DB system + //extSession = new ExtSession(sinkParams); + + //== Record the start-time of current external session. + startTime = System.currentTimeMillis(); + } + + @Override + public synchronized void insertBoolean(String[] path, long timestamp, boolean value) + throws IOException { + //== Here, handle inserted Boolean type data from IoTDB. + //extSession.insertBoolean(...); + //... + } + + @Override + public synchronized void insertInt32(String[] path, long timestamp, int value) + throws IOException { + //== Here, handle inserted Int32 type data from IoTDB. + //extSession.insertInt32(...); + //... + } + + @Override + public synchronized void insertInt64(String[] path, long timestamp, long value) + throws IOException { + //== Here, handle inserted Int64 type data from IoTDB. + //... + } + + @Override + public synchronized void insertFloat(String[] path, long timestamp, float value) + throws IOException { + //== Here, handle inserted float type data from IoTDB. + //extSession.insertFloat(...); + //... + } + + @Override + public synchronized void insertDouble(String[] path, long timestamp, double value) + throws IOException { + //== Here, handle inserted double type data from IoTDB. + //extSession.insertDouble(...); + //... + } + + @Override + public synchronized void insertText(String[] path, long timestamp, String value) + throws IOException { + //== Here, handle inserted Text type data from IoTDB. + //extSession.insertText(...); + //.. + } + + @Override + public synchronized void insertVector(String[] path, DataType[] dataTypes, long timestamp, + Object[] values) + throws IOException { + //== Here, handle inserted Vector type data from IoTDB. + //extSession.insertVector(...); + //... + } + + @Override + public synchronized void delete(String[] s, long l) { + //== Here, handle delete operation. + //extSession.delete(...); + //... + } + + @Override + public synchronized void createTimeSeries(String[] path, DataType dataType) { + //== Here, handle create TimeSeries operation. + //extSession.createTable(...); + //... + } + + @Override + public synchronized void deleteTimeSeries(String[] path) { + //== Here, handle delete TimeSeries operation. + //extSession.deleteTable(...); + //... + } + + /** + * IoTDB call this method to flush data in plugin buf to external DB system, if data buf exist. + * @throws IOException + */ + @Override + public synchronized void flush() throws IOException { + //extSession.flush(...); + //... + } + + /** + * When run CMD "stop pipe ..." or "drop pipe ..." , + * IoTDB will call this method to close connection to external DB system. + * @throws IOException + */ + @Override + public synchronized void close() throws IOException { + flush(); + + //== Close connection to external DB system. + //extSession.close(...); + } + + /** + /** + * IoTDB use this method to collect statistic info of 1 ExternalPipeSinkWriter. + * When run CMD "show pipes", the statistic information will be show. + * @return + */ + @Override + public ExternalPipeSinkWriterStatus getStatus() { + ExternalPipeSinkWriterStatus status = new ExternalPipeSinkWriterStatus(); + + //== set basic statistic info + status.setStartTime(startTime); //ExternalPipeSinkWriter's beginning time + //status.setNumOfRecordsTransmitted(extSession.getNumOffRecords()); + //status.setNumOfBytesTransmitted(extSession.getNumOfBytes()); + + //== Here, customer may define & add other information. + Map extendedFields = new HashMap<>(); + //extendedFields.put("AverageSpeed", Long.toString(extSession.getAvgSpeed()); + //extendedFields.put("Speed", Long.toString(extSession.getSpeed())); + //extendedFields.put("SessionId", extSession == null ? "N/A" : extSession.getId()); + status.setExtendedFields(extendedFields); + + return status; + } + +} diff --git a/example/jdbc/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java b/example/jdbc/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java new file mode 100644 index 000000000000..5858b4955403 --- /dev/null +++ b/example/jdbc/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb; + +import org.apache.iotdb.jdbc.IoTDBSQLException; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +public class SyntaxConventionRelatedExample { + /** + * if you want to create a time series named root.sg1.select, a possible SQL statement would be + * like: create timeseries root.sg1.select with datatype=FLOAT, encoding=RLE As described before, + * when using session API, path is represented using String. The path should be written as + * "root.sg1.select". + */ + private static final String ROOT_SG1_KEYWORD_EXAMPLE = "root.sg1.select"; + + /** + * if you want to create a time series named root.sg1.111, a possible SQL statement would be like: + * create timeseries root.sg1.`111` with datatype=FLOAT, encoding=RLE The path should be written + * as "root.sg1.`111`". + */ + private static final String ROOT_SG1_DIGITS_EXAMPLE = "root.sg1.`111`"; + + /** + * if you want to create a time series named root.sg1.`a"b'c``, a possible SQL statement would be + * like: create timeseries root.sg1.`a"b'c``` with datatype=FLOAT, encoding=RLE The path should be + * written as "root.sg1.`a"b`c```". + */ + private static final String ROOT_SG1_SPECIAL_CHARACTER_EXAMPLE = "root.sg1.`a\"b'c```"; + + /** + * if you want to create a time series named root.sg1.a, a possible SQL statement would be like: + * create timeseries root.sg1.a with datatype=FLOAT, encoding=RLE The path should be written as + * "root.sg1.a". + */ + private static final String ROOT_SG1_NORMAL_NODE_EXAMPLE = "root.sg1.a"; + + private static final String DEVICE = "root.sg1"; + + public static void main(String[] args) throws ClassNotFoundException, SQLException { + Class.forName("org.apache.iotdb.jdbc.IoTDBDriver"); + try (Connection connection = + DriverManager.getConnection( + "jdbc:iotdb://127.0.0.1:6667?version=V_0_13", "root", "root"); + Statement statement = connection.createStatement()) { + + // set JDBC fetchSize + statement.setFetchSize(10000); + + // create time series + try { + statement.execute(String.format("SET STORAGE GROUP TO %s", DEVICE)); + statement.execute( + String.format( + "CREATE TIMESERIES %s WITH DATATYPE=INT64, ENCODING=RLE, COMPRESSOR=SNAPPY", + ROOT_SG1_DIGITS_EXAMPLE)); + statement.execute( + String.format( + "CREATE TIMESERIES %s WITH DATATYPE=INT64, ENCODING=RLE, COMPRESSOR=SNAPPY", + ROOT_SG1_KEYWORD_EXAMPLE)); + statement.execute( + String.format( + "CREATE TIMESERIES %s WITH DATATYPE=INT64, ENCODING=RLE, COMPRESSOR=SNAPPY", + ROOT_SG1_NORMAL_NODE_EXAMPLE)); + statement.execute( + String.format( + "CREATE TIMESERIES %s WITH DATATYPE=INT64, ENCODING=RLE, COMPRESSOR=SNAPPY", + ROOT_SG1_SPECIAL_CHARACTER_EXAMPLE)); + } catch (IoTDBSQLException e) { + System.out.println(e.getMessage()); + } + + // show timeseries + ResultSet resultSet = statement.executeQuery("show timeseries root.sg1.*"); + List timeseriesList = new ArrayList<>(); + while (resultSet.next()) { + timeseriesList.add(resultSet.getString("timeseries")); + } + for (String path : timeseriesList) { + for (int i = 0; i <= 10; i++) { + statement.addBatch(prepareInsertStatement(i, path)); + } + } + statement.executeBatch(); + statement.clearBatch(); + + resultSet = statement.executeQuery("select ** from root.sg1 where time <= 10"); + outputResult(resultSet); + for (String path : timeseriesList) { + // For example, for timeseires root.sg1.`111`, sensor is 111, as described in syntax + // convention, it should be written as `111` in SQL + // in resultSet of "show timeseries", result is root.sg1.`111`, which means you need not to + // worry about dealing with backquotes yourself. + resultSet = + statement.executeQuery(String.format("select %s from root.sg1", removeDevice(path))); + outputResult(resultSet); + } + } catch (IoTDBSQLException e) { + System.out.println(e.getMessage()); + } + } + + private static void outputResult(ResultSet resultSet) throws SQLException { + if (resultSet != null) { + System.out.println("--------------------------"); + final ResultSetMetaData metaData = resultSet.getMetaData(); + final int columnCount = metaData.getColumnCount(); + for (int i = 0; i < columnCount; i++) { + System.out.print(metaData.getColumnLabel(i + 1) + " "); + } + System.out.println(); + while (resultSet.next()) { + for (int i = 1; ; i++) { + System.out.print(resultSet.getString(i)); + if (i < columnCount) { + System.out.print(", "); + } else { + System.out.println(); + break; + } + } + } + System.out.println("--------------------------\n"); + } + } + + private static String prepareInsertStatement(int time, String path) { + // remove device root.sg1 + path = removeDevice(path); + return String.format( + "insert into root.sg1(timestamp, %s) values(" + time + "," + 1 + ")", path); + } + + private static String removeDevice(String path) { + return path.substring(DEVICE.length() + 1); + } +} diff --git a/example/mqtt-customize/README.md b/example/mqtt-customize/README.md index 58f90558714b..9fa224c4dab4 100644 --- a/example/mqtt-customize/README.md +++ b/example/mqtt-customize/README.md @@ -35,8 +35,8 @@ The example is to show how to customize your MQTT message format Then, in your server: * Create ${IOTDB_HOME}/ext/mqtt/ folder, and put the jar into this folder. -* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in iotdb-engine.properties) -* Set the value of `mqtt_payload_formatter` in `conf/iotdb-engine.properties` as the value of getName() in your implementation +* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in iotdb-datanode.properties) +* Set the value of `mqtt_payload_formatter` in `conf/iotdb-datanode.properties` as the value of getName() in your implementation * Launch the IoTDB server. * Now IoTDB will use your implementation to parse the MQTT message. diff --git a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java index 27fd9ce224be..3332dba096a1 100644 --- a/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java +++ b/example/mqtt-customize/src/main/java/org/apache/iotdb/mqtt/server/CustomizedJsonPayloadFormatter.java @@ -56,7 +56,7 @@ public List format(ByteBuf payload) { @Override public String getName() { - // set the value of mqtt_payload_formatter in iotdb-engine.properties as the following string: + // set the value of mqtt_payload_formatter in iotdb-datanode.properties as the following string: return "CustomizedJson"; } } diff --git a/example/mqtt/README.md b/example/mqtt/README.md index 9f8c8a5ab539..9aa6288a7ba6 100644 --- a/example/mqtt/README.md +++ b/example/mqtt/README.md @@ -27,7 +27,7 @@ The example is to show how to send data to IoTDB from a mqtt client. ## Usage -* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in iotdb-engine.properties) +* Update configuration to enable MQTT service. (`enable_mqtt_service=true` in iotdb-datanode.properties) * Launch the IoTDB server. * Setup storage group `SET STORAGE GROUP TO root.sg` and create time timeseries `CREATE TIMESERIES root.sg.d1.s1 WITH DATATYPE=DOUBLE, ENCODING=PLAIN`. * Run `org.apache.iotdb.mqtt.MQTTClient` to run the mqtt client and send events to server. diff --git a/example/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTClient.java b/example/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTClient.java index bd23fde6e80f..050316067ba2 100644 --- a/example/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTClient.java +++ b/example/mqtt/src/main/java/org/apache/iotdb/mqtt/MQTTClient.java @@ -36,6 +36,7 @@ public static void main(String[] args) throws Exception { connection.connect(); Random random = new Random(); + StringBuilder sb = new StringBuilder(); for (int i = 0; i < 10; i++) { String payload = String.format( @@ -46,10 +47,16 @@ public static void main(String[] args) throws Exception { + "\"values\":[%f]\n" + "}", System.currentTimeMillis(), random.nextDouble()); + sb.append(payload).append(","); + // publish a json object Thread.sleep(1); connection.publish("root.sg.d1.s1", payload.getBytes(), QoS.AT_LEAST_ONCE, false); } + // publish a json array + sb.insert(0, "["); + sb.replace(sb.lastIndexOf(","), sb.length(), "]"); + connection.publish("root.sg.d1.s1", sb.toString().getBytes(), QoS.AT_LEAST_ONCE, false); connection.disconnect(); } diff --git a/example/rest-java-example/pom.xml b/example/rest-java-example/pom.xml new file mode 100644 index 000000000000..499ca85a9ec4 --- /dev/null +++ b/example/rest-java-example/pom.xml @@ -0,0 +1,49 @@ + + + + + iotdb-examples + org.apache.iotdb + 0.14.0-SNAPSHOT + + 4.0.0 + rest-java-example + + 1.8 + 1.8 + + + + org.apache.httpcomponents + httpclient + compile + + + com.google.code.gson + gson + + + com.fasterxml.jackson.core + jackson-databind + + + diff --git a/example/rest-java-example/src/main/java/org/apache/iotdb/HttpExample.java b/example/rest-java-example/src/main/java/org/apache/iotdb/HttpExample.java new file mode 100644 index 000000000000..532a26659469 --- /dev/null +++ b/example/rest-java-example/src/main/java/org/apache/iotdb/HttpExample.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import org.apache.http.HttpEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; + +public class HttpExample { + private String getAuthorization(String username, String password) { + return Base64.getEncoder() + .encodeToString((username + ":" + password).getBytes(StandardCharsets.UTF_8)); + } + + public static void main(String[] args) { + HttpExample httpExample = new HttpExample(); + httpExample.ping(); + httpExample.insertTablet(); + httpExample.query(); + } + + public void ping() { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + HttpGet httpGet = new HttpGet("http://127.0.0.1:18080/ping"); + CloseableHttpResponse response = null; + try { + response = httpClient.execute(httpGet); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + JsonObject result = JsonParser.parseString(message).getAsJsonObject(); + System.out.println(result); + } catch (IOException e) { + e.printStackTrace(); + + } finally { + try { + if (httpClient != null) { + httpClient.close(); + } + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + + private HttpPost getHttpPost(String url) { + HttpPost httpPost = new HttpPost(url); + httpPost.addHeader("Content-type", "application/json; charset=utf-8"); + httpPost.setHeader("Accept", "application/json"); + String authorization = getAuthorization("root", "root"); + httpPost.setHeader("Authorization", authorization); + return httpPost; + } + + public void insertTablet() { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + CloseableHttpResponse response = null; + try { + HttpPost httpPost = getHttpPost("http://127.0.0.1:18080/rest/v1/insertTablet"); + String json = + "{\"timestamps\":[1635232143960,1635232153960],\"measurements\":[\"s3\",\"s4\",\"s5\",\"s6\",\"s7\",\"s8\"],\"dataTypes\":[\"TEXT\",\"INT32\",\"INT64\",\"FLOAT\",\"BOOLEAN\",\"DOUBLE\"],\"values\":[[\"2aa\",\"\"],[11,2],[1635000012345555,1635000012345556],[1.41,null],[null,false],[null,3.5555]],\"isAligned\":false,\"deviceId\":\"root.sg25\"}"; + httpPost.setEntity(new StringEntity(json, Charset.defaultCharset())); + response = httpClient.execute(httpPost); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + JsonObject result = JsonParser.parseString(message).getAsJsonObject(); + System.out.println(result); + } catch (IOException e) { + e.printStackTrace(); + + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + public void query() { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + CloseableHttpResponse response = null; + try { + HttpPost httpPost = getHttpPost("http://127.0.0.1:18080/rest/v1/query"); + String sql = "{\"sql\":\"select *,s4+1,s4+1 from root.sg25\"}"; + httpPost.setEntity(new StringEntity(sql, Charset.defaultCharset())); + response = httpClient.execute(httpPost); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + ObjectMapper mapper = new ObjectMapper(); + Map map = mapper.readValue(message, Map.class); + System.out.println(map); + } catch (IOException e) { + e.printStackTrace(); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } +} diff --git a/example/rest-java-example/src/main/java/org/apache/iotdb/HttpsExample.java b/example/rest-java-example/src/main/java/org/apache/iotdb/HttpsExample.java new file mode 100644 index 000000000000..d9932dc9e49d --- /dev/null +++ b/example/rest-java-example/src/main/java/org/apache/iotdb/HttpsExample.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.gson.JsonObject; +import com.google.gson.JsonParser; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; +import org.apache.http.HttpEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +public class HttpsExample { + + private String getAuthorization(String username, String password) { + return Base64.getEncoder() + .encodeToString((username + ":" + password).getBytes(StandardCharsets.UTF_8)); + } + + public static void main(String[] args) throws Exception { + HttpsExample httpsExample=new HttpsExample(); + httpsExample.pingHttps(); + httpsExample.insertTablet(); + httpsExample.query(); + + } + + public void pingHttps() throws Exception { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + HttpGet httpGet = new HttpGet("https://127.0.0.1:18080/ping"); + CloseableHttpResponse response = null; + try { + response = httpClient.execute(httpGet); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + JsonObject result = JsonParser.parseString(message).getAsJsonObject(); + System.out.println(result); + + } catch (IOException e) { + e.printStackTrace(); + + } finally { + try { + if (httpClient != null) { + httpClient.close(); + } + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + + } + } + } + + private HttpPost getHttpPost(String url) { + HttpPost httpPost = new HttpPost(url); + httpPost.addHeader("Content-type", "application/json; charset=utf-8"); + httpPost.setHeader("Accept", "application/json"); + String authorization = getAuthorization("root", "root"); + httpPost.setHeader("Authorization", authorization); + return httpPost; + } + + public void insertTablet() { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + CloseableHttpResponse response = null; + try { + HttpPost httpPost = getHttpPost("https://127.0.0.1:18080/rest/v1/insertTablet"); + String json = + "{\"timestamps\":[1635232143960,1635232153960],\"measurements\":[\"s3\",\"s4\",\"s5\",\"s6\",\"s7\",\"s8\"],\"dataTypes\":[\"TEXT\",\"INT32\",\"INT64\",\"FLOAT\",\"BOOLEAN\",\"DOUBLE\"],\"values\":[[\"2aa\",\"\"],[11,2],[1635000012345555,1635000012345556],[1.41,null],[null,false],[null,3.5555]],\"isAligned\":false,\"deviceId\":\"root.sg25\"}"; + httpPost.setEntity(new StringEntity(json, Charset.defaultCharset())); + response = httpClient.execute(httpPost); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + JsonObject result = JsonParser.parseString(message).getAsJsonObject(); + System.out.println(result); + } catch (IOException e) { + e.printStackTrace(); + + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + public void query() { + CloseableHttpClient httpClient = SSLClient.getInstance().getHttpClient(); + CloseableHttpResponse response = null; + try { + HttpPost httpPost = getHttpPost("https://127.0.0.1:18080/rest/v1/query"); + String sql = "{\"sql\":\"select *,s4+1,s4+1 from root.sg25\"}"; + httpPost.setEntity(new StringEntity(sql, Charset.defaultCharset())); + response = httpClient.execute(httpPost); + HttpEntity responseEntity = response.getEntity(); + String message = EntityUtils.toString(responseEntity, "utf-8"); + ObjectMapper mapper = new ObjectMapper(); + Map map = mapper.readValue(message, Map.class); + System.out.println(map); + } catch (IOException e) { + e.printStackTrace(); + } finally { + try { + if (response != null) { + response.close(); + } + } catch (IOException e) { + e.printStackTrace(); + } + } + } + +} diff --git a/example/rest-java-example/src/main/java/org/apache/iotdb/SSLClient.java b/example/rest-java-example/src/main/java/org/apache/iotdb/SSLClient.java new file mode 100644 index 000000000000..faad2866cc53 --- /dev/null +++ b/example/rest-java-example/src/main/java/org/apache/iotdb/SSLClient.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb; + +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.socket.ConnectionSocketFactory; +import org.apache.http.conn.socket.PlainConnectionSocketFactory; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.TrustStrategy; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.ssl.SSLContextBuilder; + +public class SSLClient { + + private static SSLConnectionSocketFactory sslConnectionSocketFactory = null; + private static PoolingHttpClientConnectionManager poolingHttpClientConnectionManager = null; + private static SSLContextBuilder sslContextBuilder = null; + private static ConnectionSocketFactory plainsf=null; + + private static class SSLClientInstance { + private static final SSLClient instance = new SSLClient(); + } + + public static SSLClient getInstance() { + return SSLClientInstance.instance; + } + private SSLClient() { + try { + sslContextBuilder = new SSLContextBuilder().loadTrustMaterial(null, new TrustStrategy() { + @Override + public boolean isTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { + return true; + } + }); + plainsf = PlainConnectionSocketFactory + .getSocketFactory(); + sslConnectionSocketFactory = new SSLConnectionSocketFactory(sslContextBuilder.build(), new String[]{"TLSv1.3"}, null, NoopHostnameVerifier.INSTANCE); + Registry registryBuilder = RegistryBuilder.create() + .register("http", plainsf) + .register("https", sslConnectionSocketFactory) + .build(); + poolingHttpClientConnectionManager = new PoolingHttpClientConnectionManager(registryBuilder); + poolingHttpClientConnectionManager.setMaxTotal(10); + } catch (NoSuchAlgorithmException|KeyStoreException|KeyManagementException e) { + e.printStackTrace(); + } + } + + + public CloseableHttpClient getHttpClient() { + CloseableHttpClient httpClient = HttpClients.custom().setSSLSocketFactory(sslConnectionSocketFactory) + .setConnectionManager(poolingHttpClientConnectionManager) + .setConnectionManagerShared(true) + .build(); + return httpClient; + } +} diff --git a/example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java b/example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java new file mode 100644 index 000000000000..362702f76a8c --- /dev/null +++ b/example/session/src/main/java/org/apache/iotdb/SyntaxConventionRelatedExample.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb; + +import org.apache.iotdb.rpc.IoTDBConnectionException; +import org.apache.iotdb.rpc.StatementExecutionException; +import org.apache.iotdb.rpc.TSStatusCode; +import org.apache.iotdb.session.Session; +import org.apache.iotdb.session.SessionDataSet; +import org.apache.iotdb.session.util.Version; +import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType; +import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding; + +import java.util.ArrayList; +import java.util.List; + +/** + * When using session API, measurement, device, storage group and path are represented by String. + * The content of the String should be the same as what you would write in a SQL statement. This + * class is an example to help you understand better. + */ +public class SyntaxConventionRelatedExample { + private static Session session; + private static final String LOCAL_HOST = "127.0.0.1"; + /** + * if you want to create a time series named root.sg1.select, a possible SQL statement would be + * like: create timeseries root.sg1.select with datatype=FLOAT, encoding=RLE As described before, + * when using session API, path is represented using String. The path should be written as + * "root.sg1.select". + */ + private static final String ROOT_SG1_KEYWORD_EXAMPLE = "root.sg1.select"; + + /** + * if you want to create a time series named root.sg1.111, a possible SQL statement would be like: + * create timeseries root.sg1.`111` with datatype=FLOAT, encoding=RLE The path should be written + * as "root.sg1.`111`". + */ + private static final String ROOT_SG1_DIGITS_EXAMPLE = "root.sg1.`111`"; + + /** + * if you want to create a time series named root.sg1.`a"b'c``, a possible SQL statement would be + * like: create timeseries root.sg1.`a"b'c``` with datatype=FLOAT, encoding=RLE The path should be + * written as "root.sg1.`a"b`c```". + */ + private static final String ROOT_SG1_SPECIAL_CHARACTER_EXAMPLE = "root.sg1.`a\"b'c```"; + + /** + * if you want to create a time series named root.sg1.a, a possible SQL statement would be like: + * create timeseries root.sg1.a with datatype=FLOAT, encoding=RLE The path should be written as + * "root.sg1.a". + */ + private static final String ROOT_SG1_NORMAL_NODE_EXAMPLE = "root.sg1.a"; + + public static void main(String[] args) + throws IoTDBConnectionException, StatementExecutionException { + session = + new Session.Builder() + .host(LOCAL_HOST) + .port(6667) + .username("root") + .password("root") + .version(Version.V_0_13) + .build(); + session.open(false); + + // set session fetchSize + session.setFetchSize(10000); + + try { + session.setStorageGroup("root.sg1"); + } catch (StatementExecutionException e) { + if (e.getStatusCode() != TSStatusCode.PATH_ALREADY_EXIST_ERROR.getStatusCode()) { + throw e; + } + } + + // createTimeSeries + createTimeSeries(); + SessionDataSet dataSet = session.executeQueryStatement("show timeseries root.sg1.*"); + // the expected paths would be: + // [root.sg1.select, root.sg1.`111`, root.sg1.`a"b'c```, root.sg1.a] + // You could see that time series in dataSet are exactly the same as + // the initial String you used as path. Node names consist of digits or contain special + // characters are quoted with ``, both in SQL statement and in header of result dataset. + // It's convenient that you can use the result of show timeseries as input parameter directly + // for other + // session APIs such as insertRecord or executeRawDataQuery. + List paths = new ArrayList<>(); + while (dataSet.hasNext()) { + paths.add(dataSet.next().getFields().get(0).toString()); + } + + long startTime = 1L; + long endTime = 100L; + + try (SessionDataSet dataSet1 = session.executeRawDataQuery(paths, startTime, endTime)) { + + System.out.println(dataSet1.getColumnNames()); + dataSet1.setFetchSize(1024); + while (dataSet1.hasNext()) { + System.out.println(dataSet1.next()); + } + } + } + + private static void createTimeSeries() + throws IoTDBConnectionException, StatementExecutionException { + if (!session.checkTimeseriesExists(ROOT_SG1_KEYWORD_EXAMPLE)) { + session.createTimeseries( + ROOT_SG1_KEYWORD_EXAMPLE, TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY); + } + if (!session.checkTimeseriesExists(ROOT_SG1_DIGITS_EXAMPLE)) { + session.createTimeseries( + ROOT_SG1_DIGITS_EXAMPLE, TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY); + } + if (!session.checkTimeseriesExists(ROOT_SG1_SPECIAL_CHARACTER_EXAMPLE)) { + session.createTimeseries( + ROOT_SG1_SPECIAL_CHARACTER_EXAMPLE, + TSDataType.FLOAT, + TSEncoding.RLE, + CompressionType.SNAPPY); + } + if (!session.checkTimeseriesExists(ROOT_SG1_NORMAL_NODE_EXAMPLE)) { + session.createTimeseries( + ROOT_SG1_NORMAL_NODE_EXAMPLE, TSDataType.FLOAT, TSEncoding.RLE, CompressionType.SNAPPY); + } + } +} diff --git a/example/trigger/src/main/java/org/apache/iotdb/trigger/AlertingExample.java b/example/trigger/src/main/java/org/apache/iotdb/trigger/AlertingExample.java index 500caf2b92f9..48975a1dd88a 100644 --- a/example/trigger/src/main/java/org/apache/iotdb/trigger/AlertingExample.java +++ b/example/trigger/src/main/java/org/apache/iotdb/trigger/AlertingExample.java @@ -19,6 +19,7 @@ package org.apache.iotdb.trigger; +import org.apache.iotdb.commons.path.PartialPath; import org.apache.iotdb.db.engine.trigger.api.Trigger; import org.apache.iotdb.db.engine.trigger.api.TriggerAttributes; import org.apache.iotdb.db.engine.trigger.sink.alertmanager.AlertManagerConfiguration; @@ -71,7 +72,7 @@ public void onStop() throws Exception { } @Override - public Double fire(long timestamp, Double value) throws Exception { + public Double fire(long timestamp, Double value, PartialPath path) throws Exception { if (value > 100.0) { labels.put("value", String.valueOf(value)); labels.put("severity", "critical"); @@ -88,7 +89,7 @@ public Double fire(long timestamp, Double value) throws Exception { } @Override - public double[] fire(long[] timestamps, double[] values) throws Exception { + public double[] fire(long[] timestamps, double[] values, PartialPath path) throws Exception { for (double value : values) { if (value > 100.0) { labels.put("value", String.valueOf(value)); diff --git a/example/trigger/src/main/java/org/apache/iotdb/trigger/TriggerExample.java b/example/trigger/src/main/java/org/apache/iotdb/trigger/TriggerExample.java index c6565172d5a3..7e1a6155a7ff 100644 --- a/example/trigger/src/main/java/org/apache/iotdb/trigger/TriggerExample.java +++ b/example/trigger/src/main/java/org/apache/iotdb/trigger/TriggerExample.java @@ -99,14 +99,14 @@ public void onStop() throws Exception { } @Override - public Double fire(long timestamp, Double value) throws Exception { + public Double fire(long timestamp, Double value, PartialPath path) throws Exception { tryOpenSinksFirstOnFire(); windowEvaluationHandler.collect(timestamp, value); return value; } @Override - public double[] fire(long[] timestamps, double[] values) throws Exception { + public double[] fire(long[] timestamps, double[] values, PartialPath path) throws Exception { tryOpenSinksFirstOnFire(); for (int i = 0; i < timestamps.length; ++i) { windowEvaluationHandler.collect(timestamps[i], values[i]); diff --git a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java index 6d2c2ff89178..aa946f67b7d5 100644 --- a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java +++ b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/TsFileSequenceRead.java @@ -63,7 +63,7 @@ public static void main(String[] args) throws IOException { System.out.println("file magic head: " + reader.readHeadMagic()); System.out.println("file magic tail: " + reader.readTailMagic()); System.out.println("Level 1 metadata position: " + reader.getFileMetadataPos()); - System.out.println("Level 1 metadata size: " + reader.getFileMetadataSize()); + System.out.println("Level 1 metadata size: " + reader.getTsFileMetadataSize()); // Sequential reading of one ChunkGroup now follows this order: // first the CHUNK_GROUP_HEADER, then SeriesChunks (headers and data) in one ChunkGroup // Because we do not know how many chunks a ChunkGroup may have, we should read one byte (the diff --git a/example/udf/src/main/java/org/apache/iotdb/udf/UDTFExample.java b/example/udf/src/main/java/org/apache/iotdb/udf/UDTFExample.java index 66a07482d5c6..ddafd2984e43 100644 --- a/example/udf/src/main/java/org/apache/iotdb/udf/UDTFExample.java +++ b/example/udf/src/main/java/org/apache/iotdb/udf/UDTFExample.java @@ -19,14 +19,14 @@ package org.apache.iotdb.udf; -import org.apache.iotdb.db.query.udf.api.UDTF; -import org.apache.iotdb.db.query.udf.api.access.Row; -import org.apache.iotdb.db.query.udf.api.collector.PointCollector; -import org.apache.iotdb.db.query.udf.api.customizer.config.UDTFConfigurations; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameterValidator; -import org.apache.iotdb.db.query.udf.api.customizer.parameter.UDFParameters; -import org.apache.iotdb.db.query.udf.api.customizer.strategy.RowByRowAccessStrategy; -import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; import java.io.IOException; import java.util.Map; @@ -51,7 +51,7 @@ public void validate(UDFParameterValidator validator) throws Exception { // this udf only accepts 1 time series .validateInputSeriesNumber(1) // the data type of the first input time series should be INT32 - .validateInputSeriesDataType(0, TSDataType.INT32) + .validateInputSeriesDataType(0, Type.INT32) // this udf doesn't accept any extra parameters // the validation rule is not required because extra parameters will be ignored .validate( @@ -62,9 +62,7 @@ public void validate(UDFParameterValidator validator) throws Exception { @Override public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { - configurations - .setAccessStrategy(new RowByRowAccessStrategy()) - .setOutputDataType(TSDataType.INT32); + configurations.setAccessStrategy(new RowByRowAccessStrategy()).setOutputDataType(Type.INT32); } @Override diff --git a/external-pipe-api/pom.xml b/external-pipe-api/pom.xml new file mode 100644 index 000000000000..1afbb0f57418 --- /dev/null +++ b/external-pipe-api/pom.xml @@ -0,0 +1,35 @@ + + + + + iotdb-parent + org.apache.iotdb + 0.14.0-SNAPSHOT + ../pom.xml + + 4.0.0 + external-pipe-api + + 8 + 8 + + diff --git a/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/DataType.java b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/DataType.java new file mode 100644 index 000000000000..32bf3ec27216 --- /dev/null +++ b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/DataType.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.external.api; + +public enum DataType { + BOOLEAN, + INT32, + INT64, + FLOAT, + DOUBLE, + TEXT, + VECTOR; + + public static DataType fromTsDataType(byte tsDataType) { + switch (tsDataType) { + case 0: + return BOOLEAN; + case 1: + return INT32; + case 2: + return INT64; + case 3: + return FLOAT; + case 4: + return DOUBLE; + case 5: + return TEXT; + case 6: + return VECTOR; + default: + throw new IllegalArgumentException("Unrecognized TSDataType: " + tsDataType); + } + } +} diff --git a/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/ExternalPipeSinkWriterStatus.java b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/ExternalPipeSinkWriterStatus.java new file mode 100644 index 000000000000..b9f93e23875b --- /dev/null +++ b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/ExternalPipeSinkWriterStatus.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.external.api; + +import java.util.Map; + +/** Represents the status of an external pipe sink writer. */ +public class ExternalPipeSinkWriterStatus { + + private Long startTime; + private Long numOfBytesTransmitted; + private Long numOfRecordsTransmitted; + private Map extendedFields; + + public void setStartTime(Long startTime) { + this.startTime = startTime; + } + + public Long getStartTime() { + return startTime; + } + + public void setNumOfBytesTransmitted(Long numOfBytesTransmitted) { + this.numOfBytesTransmitted = numOfBytesTransmitted; + } + + public Long getNumOfBytesTransmitted() { + return numOfBytesTransmitted; + } + + public void setNumOfRecordsTransmitted(Long numOfRecordsTransmitted) { + this.numOfRecordsTransmitted = numOfRecordsTransmitted; + } + + public Long getNumOfRecordsTransmitted() { + return numOfRecordsTransmitted; + } + + public void setExtendedFields(Map extendedFields) { + this.extendedFields = extendedFields; + } + + public Map getExtendedFields() { + return extendedFields; + } + + @Override + public String toString() { + return "ExternalPipeSinkWriterStatus{" + + "startTime=" + + startTime + + ", numOfBytesTransmitted=" + + numOfBytesTransmitted + + ", numOfRecordsTransmitted=" + + numOfRecordsTransmitted + + ", extendedFields=" + + extendedFields + + '}'; + } +} diff --git a/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriter.java b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriter.java new file mode 100644 index 000000000000..c91ff4abd764 --- /dev/null +++ b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriter.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.external.api; + +import java.io.IOException; + +/** Responsible for forwarding the operations to the sink. */ +public interface IExternalPipeSinkWriter extends AutoCloseable { + + /** Initialize the writer. */ + void open() throws IOException; + + /** + * Insert a boolean data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertBoolean(String[] path, long time, boolean value) throws IOException; + + /** + * Insert a 32-bit integer data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertInt32(String[] path, long time, int value) throws IOException; + + /** + * Insert a 64-bit integer data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertInt64(String[] path, long time, long value) throws IOException; + + /** + * Insert a float data point to the sink. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertFloat(String[] path, long time, float value) throws IOException; + + /** + * Insert a double data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertDouble(String[] path, long time, double value) throws IOException; + + /** + * Insert a text data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + * @param value Value of the data point. + */ + void insertText(String[] path, long time, String value) throws IOException; + + /** + * Insert a vector data point to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param dataTypes Datatype of each element in the vector. + * @param time Timestamp of the data point. + * @param values Value of each element in the vector. + */ + void insertVector(String[] path, DataType[] dataTypes, long time, Object[] values) + throws IOException; + + /** + * Delete a data point from the sink. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param time Timestamp of the data point. + */ + void delete(String[] path, long time) throws IOException; + + /** + * Handle the creation of a timeseries. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + * @param dataType Datatype of the timeseries. + */ + void createTimeSeries(String[] path, DataType dataType) throws IOException; + + /** + * Handle the deletion of a timeseries. + * + *

The framework will retry if this method throws an {@link IOException}. + * + * @param path The parts of a path separated by '.'. For example, for a path root.a.b.c, the input + * argument would be ["root", "a", "b", "c"]. + */ + void deleteTimeSeries(String[] path) throws IOException; + + /** + * Flush the data and metadata changes to the sink. + * + *

The framework will retry if this method throws an {@link IOException}. + */ + void flush() throws IOException; + + /** Get the status of this writer. This method should NOT throw any exception. */ + ExternalPipeSinkWriterStatus getStatus(); + + /** + * Close the writer. + * + *

The framework will NOT retry if this method throws an {@link IOException}. + */ + @Override + void close() throws IOException; +} diff --git a/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriterFactory.java b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriterFactory.java new file mode 100644 index 000000000000..b158b77eede4 --- /dev/null +++ b/external-pipe-api/src/main/java/org/apache/iotdb/pipe/external/api/IExternalPipeSinkWriterFactory.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.pipe.external.api; + +import java.util.Map; +import java.util.function.Supplier; + +/** Responsible for creating {@link IExternalPipeSinkWriter} with the sink configuration. */ +public interface IExternalPipeSinkWriterFactory extends Supplier { + /** + * Get the External PIPE's provider info. + * + * @return External PIPE s provider name + */ + String getProviderName(); + + /** + * Get the External PIPE's type name. For example: If customer self-defined getExternalPipeType() + * return "mySink", corresponding input CMD should be "CREATE PIPESINK mySink1 AS mySink (...)". + * Otherwise, the CMD will be refused. * + * + * @return External PIPE s type name + */ + String getExternalPipeType(); + + /** + * This function is used to validate the parameters in client CMD. For example: When customer + * input CMD: "CREATE PIPESINK mySink1 AS mySink (p1='111', p2='abc')", The parameters (p1=111, + * p2=abc) will be saved in sinkParams and then send it to validateSinkParams(sinkParams) for + * validation. If validateSinkParams() does not return Exception, the CMD will be processed. + * Otherwise, the CMD will be refused with prompt info that is from Exception.getMessage(); + * + * @param sinkParams Contains the parameters in CMD "CREATE PIPESINK ..." + * @return true means successful + * @throws Exception + */ + void validateSinkParams(Map sinkParams) throws Exception; + + /** + * Initialize with the configuration of the corresponding sink, which may contain information to + * set up a connection to the third-party system. + * + * @param sinkParams Parameters of the corresponding sink. + */ + void initialize(Map sinkParams) throws Exception; +} diff --git a/flink-iotdb-connector/src/main/java/org/apache/iotdb/flink/IoTDBSink.java b/flink-iotdb-connector/src/main/java/org/apache/iotdb/flink/IoTDBSink.java index 8ab06c09759e..1ccf73795647 100644 --- a/flink-iotdb-connector/src/main/java/org/apache/iotdb/flink/IoTDBSink.java +++ b/flink-iotdb-connector/src/main/java/org/apache/iotdb/flink/IoTDBSink.java @@ -86,6 +86,7 @@ void initSession() { sessionPoolSize); } + @SuppressWarnings("unsafeThreadSchedule") void initScheduler() { if (batchSize > 0) { scheduledExecutor = Executors.newSingleThreadScheduledExecutor(); diff --git a/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/controller/DatabaseConnectController.java b/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/controller/DatabaseConnectController.java index 788e983b0950..176a1549f8ba 100644 --- a/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/controller/DatabaseConnectController.java +++ b/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/controller/DatabaseConnectController.java @@ -111,6 +111,9 @@ public String query(@RequestBody String json) { continue; } String target = object.get(targetStr).getAsString(); + if (target.contains(";")) { + throw new Exception("Only one SQL statement is supported"); + } JsonObject obj = new JsonObject(); obj.addProperty("target", target); String type = getJsonType(object); @@ -124,7 +127,7 @@ public String query(@RequestBody String json) { logger.info("query finished"); return result.toString(); } catch (Exception e) { - logger.error("/query failed, request body is {}", json, e); + logger.error("/query failed, request body is {}", json.replaceAll("[\n\r\t]", "_"), e); } return null; } diff --git a/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/dao/impl/BasicDaoImpl.java b/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/dao/impl/BasicDaoImpl.java index dc68618d4bba..99f619565451 100644 --- a/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/dao/impl/BasicDaoImpl.java +++ b/grafana-connector/src/main/java/org/apache/iotdb/web/grafana/dao/impl/BasicDaoImpl.java @@ -101,7 +101,7 @@ public static void setTimestampRadioX(String timestampPrecision) { default: timestampRadioX = 1L; } - logger.info("Use timestamp precision {}", timestampPrecision); + logger.info("Use timestamp precision {}", timestampPrecision.replaceAll("[\n\r\t]", "_")); } /** diff --git a/grafana-metrics-example/cluster/Apache IoTDB ConfigNode Dashboard v0.14.0.json b/grafana-metrics-example/cluster/Apache IoTDB ConfigNode Dashboard v0.14.0.json new file mode 100644 index 000000000000..484fb19b8a10 --- /dev/null +++ b/grafana-metrics-example/cluster/Apache IoTDB ConfigNode Dashboard v0.14.0.json @@ -0,0 +1,1175 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "8.4.2" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "iteration": 1655363164080, + "links": [], + "liveNow": false, + "panels": [ + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 28, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "config_node{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "Online ConfigNode", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "data_node{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "Online DataNode", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "storage_group{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "Storage Group", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(region{instance=~\"$instance\", name=\"total\"})", + "hide": false, + "interval": "", + "legendFormat": "Total Region", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "region{instance=~\"$instance\", name=\"total\"}", + "hide": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "D" + } + ], + "title": "Overview", + "type": "stat" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(region{instance=~\"${instance}\", name=~\"EndPoint.*\"}) by (name)", + "interval": "", + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "title": "Total Region on Node", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 32, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "region{instance=~\"${instance}\", name=~\"EndPoint.*\"}", + "interval": "", + "legendFormat": "{{name}}-{{type}}", + "refId": "A" + } + ], + "title": "Region on Node", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "region{instance=~\"${instance}\", name!=\"total\", name!~\"EndPoint.*\"}", + "interval": "", + "legendFormat": "{{name}}-{{type}}", + "refId": "A" + } + ], + "title": "Region on StorageGroup", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 37, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "slot{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{name}}-{{type}}", + "refId": "A" + } + ], + "title": "Slot In Storage Group", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 16, + "panels": [], + "title": "System", + "type": "row" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_count{instance=~\"$instance\", action=\"end of minor GC\"}[1m]))*60", + "interval": "1m", + "legendFormat": "Young GC number", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_count{instance=~\"$instance\", action=\"end of major GC\"}[1m]))*60", + "hide": false, + "interval": "1m", + "legendFormat": "Full GC number", + "refId": "B" + } + ], + "title": "The number of GC (per minute)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_sum{instance=~\"$instance\", action=\"end of minor GC\"}[1m]))*60", + "interval": "1m", + "legendFormat": "The time of young GC", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_sum{instance=~\"$instance\", action=\"end of major GC\"}[1m]))*60", + "hide": false, + "interval": "1m", + "legendFormat": "The time of full GC", + "refId": "B" + } + ], + "title": "The time consumed of GC(per minute)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_memory_max_bytes{instance=~\"$instance\",area=\"heap\"})", + "interval": "1m", + "legendFormat": "Maximum heap memory", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\"})", + "hide": false, + "interval": "1m", + "legendFormat": "Used heap memory", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Old Gen\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Old area", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Eden Space\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Eden area", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Survivor Space\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Survivor area", + "refId": "E" + } + ], + "title": "Heap Memory", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_buffer_memory_used_bytes{instance=~\"$instance\"})", + "interval": "1m", + "legendFormat": "off-heap memory", + "refId": "A" + } + ], + "title": "Off-heap Memory", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_threads_live_threads{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "The total number of jvm thread", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_threads_states_threads{instance=~\"$instance\"}", + "hide": false, + "interval": "1m", + "legendFormat": "{{state}}", + "refId": "B" + } + ], + "title": "The number of Java thread", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "avg(rate(entry_seconds_sum{}[1m])/rate(entry_seconds_count{}[1m])) by (name,instance)", + "interval": "1m", + "legendFormat": "{{instance}}-{{name}}", + "refId": "A" + } + ], + "title": "The time consumed of interface", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 35, + "style": "dark", + "tags": [ + "Apache-IoTDB", + "ConfigNode", + "0.14.0" + ], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "job", + "options": [], + "query": { + "query": "label_values(job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(up{job=~\"$job\"},instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(up{job=~\"$job\"},instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "15s", + "30s", + "1m", + "5m", + "15m", + "30m" + ] + }, + "timezone": "browser", + "title": "Apache IoTDB ConfigNode Dashboard", + "uid": "TbEVYRw7g", + "version": 16, + "weekStart": "" +} \ No newline at end of file diff --git a/grafana-metrics-example/cluster/Apache IoTDB DataNode Dashboard v0.14.0.json b/grafana-metrics-example/cluster/Apache IoTDB DataNode Dashboard v0.14.0.json new file mode 100644 index 000000000000..e64d0376c309 --- /dev/null +++ b/grafana-metrics-example/cluster/Apache IoTDB DataNode Dashboard v0.14.0.json @@ -0,0 +1,1544 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "8.4.2" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "iteration": 1655343436644, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 38, + "panels": [], + "title": "Overview", + "type": "row" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "quantity{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{name}} number", + "refId": "A" + } + ], + "title": "The number of entity", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 34, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(quantity_total{instance=~\"$instance\"}[1m])", + "interval": "1m", + "legendFormat": "write point per minute", + "refId": "A" + } + ], + "title": "write point per minute", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 32, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "mem{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "{{name}}", + "refId": "A" + } + ], + "title": "storage group used memory", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 30, + "panels": [], + "title": "Interface", + "type": "row" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 24, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(entry_seconds_count{instance=~\"$instance\"}[1m])", + "interval": "1m", + "legendFormat": "{{instance}}-{{name}}", + "refId": "A" + } + ], + "title": "The QPS of interface", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "avg(rate(entry_seconds_sum{}[1m])/rate(entry_seconds_count{}[1m])) by (name,instance)", + "interval": "1m", + "legendFormat": "{{instance}}-{{name}}", + "refId": "A" + } + ], + "title": "The time consumed of interface", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 22, + "panels": [], + "title": "Engine", + "type": "row" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 27 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "queue{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "{{name}}-{{status}}", + "refId": "A" + } + ], + "title": "Task number(pending and active)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 27 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(cost_task_seconds_sum{instance=~\"$instance\"}[10m])/rate(cost_task_seconds_count{instance=~\"$instance\"}[10m])", + "interval": "1m", + "legendFormat": "{{instance}}-{{name}}", + "refId": "A" + } + ], + "title": "The time consumed of task(pending and active)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "8.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "avg(cache_total{instance=~\"$instance\", type=\"hit\"}) by (name) / avg(cache_total{instance=~\"$instance\", type=\"all\"}) by (name)", + "format": "time_series", + "interval": "", + "legendFormat": "{{name}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "cache_hit{instance=~\"$instance\"} / 100", + "hide": false, + "interval": "", + "legendFormat": "{{name}}", + "refId": "B" + } + ], + "title": "Cache hit rate", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 16, + "panels": [], + "title": "System", + "type": "row" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 44 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "file_size{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "{{instance}}-{{name}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(file_size{instance=~\"$instance\"})", + "hide": false, + "interval": "1m", + "legendFormat": "total size of file", + "refId": "B" + } + ], + "title": "The size of file", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 44 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "file_count{instance=~\"$instance\"}", + "interval": "", + "legendFormat": "{{name}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(file_count{instance=~\"$instance\"})", + "hide": false, + "interval": "", + "legendFormat": "total number of file", + "refId": "B" + } + ], + "title": "The number of file", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 52 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_count{instance=~\"$instance\", action=\"end of minor GC\"}[1m]))*60", + "interval": "1m", + "legendFormat": "Young GC number", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_count{instance=~\"$instance\", action=\"end of major GC\"}[1m]))*60", + "hide": false, + "interval": "1m", + "legendFormat": "Full GC number", + "refId": "B" + } + ], + "title": "The number of GC (per minute)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 52 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_sum{instance=~\"$instance\", action=\"end of minor GC\"}[1m]))*60", + "interval": "1m", + "legendFormat": "The time of young GC", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(jvm_gc_pause_seconds_sum{instance=~\"$instance\", action=\"end of major GC\"}[1m]))*60", + "hide": false, + "interval": "1m", + "legendFormat": "The time of full GC", + "refId": "B" + } + ], + "title": "The time consumed of GC(per minute)", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 60 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_memory_max_bytes{instance=~\"$instance\",area=\"heap\"})", + "interval": "1m", + "legendFormat": "Maximum heap memory", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\"})", + "hide": false, + "interval": "1m", + "legendFormat": "Used heap memory", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Old Gen\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Old area", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Eden Space\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Eden area", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_memory_used_bytes{instance=~\"$instance\",area=\"heap\",id=\"PS Survivor Space\"}", + "hide": false, + "interval": "1m", + "legendFormat": "Survivor area", + "refId": "E" + } + ], + "title": "Heap Memory", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 60 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(jvm_buffer_memory_used_bytes{instance=~\"$instance\"})", + "interval": "1m", + "legendFormat": "off-heap memory", + "refId": "A" + } + ], + "title": "Off-heap Memory", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 68 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_threads_live_threads{instance=~\"$instance\"}", + "interval": "1m", + "legendFormat": "The total number of jvm thread", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "jvm_threads_states_threads{instance=~\"$instance\"}", + "hide": false, + "interval": "1m", + "legendFormat": "{{state}}", + "refId": "B" + } + ], + "title": "The number of Java thread", + "type": "timeseries" + } + ], + "refresh": false, + "schemaVersion": 35, + "style": "dark", + "tags": [ + "Apache-IoTDB", + "DataNode", + "0.14.0" + ], + "templating": { + "list": [ + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "job", + "options": [], + "query": { + "query": "label_values(job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(up{job=~\"$job\"},instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(up{job=~\"$job\"},instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "15s", + "30s", + "1m", + "5m", + "15m", + "30m" + ] + }, + "timezone": "browser", + "title": "Apache IoTDB DataNode Dashboard", + "uid": "TbEVYRw7m", + "version": 13, + "weekStart": "" +} \ No newline at end of file diff --git a/grafana-metrics-example/Apache IoTDB Dashboard v0.13.1.json b/grafana-metrics-example/standalone/Apache IoTDB Dashboard v0.13.1.json similarity index 100% rename from grafana-metrics-example/Apache IoTDB Dashboard v0.13.1.json rename to grafana-metrics-example/standalone/Apache IoTDB Dashboard v0.13.1.json diff --git a/grafana-metrics-example/Apache IoTDB Dashboard v0.14.0.json b/grafana-metrics-example/standalone/Apache IoTDB Dashboard v0.14.0.json similarity index 100% rename from grafana-metrics-example/Apache IoTDB Dashboard v0.14.0.json rename to grafana-metrics-example/standalone/Apache IoTDB Dashboard v0.14.0.json diff --git a/integration-test/README.md b/integration-test/README.md new file mode 100644 index 000000000000..f2c8526b2db4 --- /dev/null +++ b/integration-test/README.md @@ -0,0 +1,88 @@ + + +Integration Test For the MPP Architecture +=================== + +Integration test for the mpp architecture are in this module. + +Now integration testing supports the Cluster mode and the Local Standalone mode. + +Integration Testing with Cluster Mode +------------------- + +You can run the integration test in cluster mode. At present, we have implemented a pseudo cluster with 1 config nodes and 3 data nodes. +(As the test cases and the test environment are decoupled, we can easily implement other pseudo cluster or even a docker-based cluster later.) + +The maven command is: +``` +mvn clean verify -DskipUTs -pl integration-test -am -PClusterIT +``` +Notice that, this above maven command only run IT. + +------- + +Run in IntelliJ in cluster mode is so easy, +- Step 0. Optionally, when you run the test for the first time, or when you change the code of the module that the integration test module depends on, you may need to use the following command to generate `integration-test/target/template-node` for nodes of the pseudo cluster. +``` +mvn clean package -DskipTests -pl integration-test -am -PClusterIT +``` + +- Step 1. Run(Menu) -> Edit Configurations... + ![Run(Menu)](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/Run(Menu).png?raw=true) + + +- Step 2. Add New Configuration -> JUnit + ![Add New Configuration](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/Add_New_Configuration.png?raw=true) + + +- Step 3. Input some fields as the following picture + ![ClusterIT Category](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/ClusterIT_Category.png?raw=true) + +Integration Testing with Local Standalone Mode +------------------- + +Integration testing with local standalone mode can be run with both maven and IDEs like IntelliJ. + +The maven command is: +``` +mvn clean verify -DskipUTs -pl integration-test -am -PLocalStandaloneOnMppIT +``` + +------- +And if you want to run IT in the IDE like IntelliJ, you need to achieve the effect as the `LocalStandaloneOnMppIT` profile in maven. Follow Steps 1-4 to achieve it. + +- Step 0. Optionally, when you run the test for the first time, or when you change the code of the module that the integration test module depends on, you may need to use the following command to generate `integration-test/target/template-node` for the node of the local standalone. +It has the same effect as step 0 of the cluster mode counterpart; these two command's generations are the same content. +``` +mvn clean package -DskipTests -pl integration-test -am -PLocalStandaloneOnMppIT +``` + +- Step 1. Run(Menu) -> Edit Configurations... + ![Run(Menu)](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/Run(Menu).png?raw=true) + + +- Step 2. Add New Configuration -> JUnit + ![Add New Configuration](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/Add_New_Configuration.png?raw=true) + + +- Step 3. Input some fields as the following picture + ![StandaloneOnMppIT Category](https://github.com/apache/iotdb-bin-resources/blob/main/integration-test/pic/StandaloneOnMppIT_Category.png?raw=true) diff --git a/integration-test/checkstyle.xml b/integration-test/checkstyle.xml new file mode 100644 index 000000000000..271761cfd649 --- /dev/null +++ b/integration-test/checkstyle.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/integration-test/import-control.xml b/integration-test/import-control.xml new file mode 100644 index 000000000000..5e5a8b5ecdc9 --- /dev/null +++ b/integration-test/import-control.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + diff --git a/integration-test/pom.xml b/integration-test/pom.xml new file mode 100644 index 000000000000..fccb4f53096d --- /dev/null +++ b/integration-test/pom.xml @@ -0,0 +1,289 @@ + + + + 4.0.0 + + iotdb-parent + org.apache.iotdb + 0.14.0-SNAPSHOT + ../pom.xml + + integration-test + + 1 + true + true + true + none + 200m + 200m + + + + + + + org.apache.iotdb + iotdb-server + ${project.version} + + + org.apache.iotdb + iotdb-server + ${project.version} + test-jar + test + + + org.apache.iotdb + iotdb-session + ${project.version} + + + org.apache.iotdb + iotdb-jdbc + ${project.version} + + + org.apache.iotdb + iotdb-confignode + ${project.version} + + + junit + junit + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.0.0-M5 + + + org.apache.maven.plugins + maven-failsafe-plugin + 3.0.0-M5 + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + default-test + + true + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + ${maven.assembly.version} + + ${integrationTest.launchNodeInSameJVM} + + + + + cluster-test-assembly + package + + single + + + + src/assembly/mpp-test.xml + + template-node + false + + + + cluster-test-assembly-share + package + + single + + + + src/assembly/mpp-share.xml + + template-node-share + false + + + + + + + com.google.code.maven-replacer-plugin + replacer + 1.5.3 + + ${integrationTest.launchNodeInSameJVM} + + + + package + + replace + + + ${project.build.directory}/template-node + true + false + + datanode/conf/datanode-env.sh + confignode/conf/confignode-env.sh + datanode/conf/datanode-env.bat + confignode/conf/confignode-env.bat + + + + #HEAP_NEWSIZE=.* + HEAP_NEWSIZE="${integrationTest.nodeNewHeapSize}" + + + #MAX_HEAP_SIZE=.* + MAX_HEAP_SIZE="${integrationTest.nodeMaxHeapSize}" + + + @REM set HEAP_NEWSIZE.* + set HEAP_NEWSIZE="${integrationTest.nodeNewHeapSize}" + + + @REM set MAX_HEAP_SIZE=.* + set MAX_HEAP_SIZE="${integrationTest.nodeMaxHeapSize}" + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + integration-test + + integration-test + + + false + ${integrationTest.includedGroups} + ${integrationTest.excludedGroups} + false + ${integrationTest.parallelMode} + 1 + ${integrationTest.forkCount} + false + + ${integrationTest.testEnv} + ${integrationTest.randomSelectWriteNode} + ${integrationTest.readAndVerifyWithMultiNode} + + target/failsafe-reports/failsafe-summary-IT.xml + + + + verify + + verify + + + false + + target/failsafe-reports/failsafe-summary-IT.xml + + + + + + + + + + LocalStandaloneIT + + org.apache.iotdb.itbase.category.LocalStandaloneIT + + Standalone + + + true + + + + + RemoteIT + + org.apache.iotdb.itbase.category.RemoteIT + + Remote + + + false + + + + ClusterIT + + org.apache.iotdb.itbase.category.ClusterIT + + Cluster1 + false + true + true + classes + + + false + + + + LocalStandaloneOnMppIT + + org.apache.iotdb.itbase.category.LocalStandaloneIT + + LocalStandaloneOnMpp + false + false + false + classes + + + false + + + + diff --git a/integration-test/src/assembly/mpp-share.xml b/integration-test/src/assembly/mpp-share.xml new file mode 100644 index 000000000000..01fce3555def --- /dev/null +++ b/integration-test/src/assembly/mpp-share.xml @@ -0,0 +1,33 @@ + + + + mpp-share + + dir + + false + + + lib + + + diff --git a/integration-test/src/assembly/mpp-test.xml b/integration-test/src/assembly/mpp-test.xml new file mode 100644 index 000000000000..76f62c4925f4 --- /dev/null +++ b/integration-test/src/assembly/mpp-test.xml @@ -0,0 +1,74 @@ + + + + mpp-test + + dir + + false + + + datanode/conf + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf + + + confignode/conf + ${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/conf + + + datanode/conf + ${maven.multiModuleProjectDirectory}/metrics/interface/src/main/assembly/resources/conf + + + datanode/sbin + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/sbin + 0755 + + + confignode/sbin + ${maven.multiModuleProjectDirectory}/confignode/src/assembly/resources/sbin + 0755 + + + tools + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/tools + 0755 + + + datanode/sbin + ${maven.multiModuleProjectDirectory}/cli/src/assembly/resources/sbin + 0755 + + + tools + ${maven.multiModuleProjectDirectory}/cli/src/assembly/resources/tools + 0755 + + + + + ${maven.multiModuleProjectDirectory}/server/src/assembly/resources/conf/datanode-env.sh + datanode/conf/datanode-env.sh + 0755 + + + diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Accumulator.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Accumulator.java new file mode 100644 index 000000000000..ff1e949f0e54 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Accumulator.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.access.RowIterator; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingSizeWindowAccessStrategy; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class Accumulator implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(Accumulator.class); + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator.validateInputSeriesDataType(0, Type.INT32, Type.DOUBLE); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("Accumulator#beforeStart"); + configurations.setOutputDataType(Type.INT32); + switch (parameters.getStringOrDefault( + ExampleUDFConstant.ACCESS_STRATEGY_KEY, ExampleUDFConstant.ACCESS_STRATEGY_ROW_BY_ROW)) { + case ExampleUDFConstant.ACCESS_STRATEGY_SLIDING_SIZE: + configurations.setAccessStrategy( + new SlidingSizeWindowAccessStrategy( + parameters.getInt(ExampleUDFConstant.WINDOW_SIZE_KEY))); + break; + case ExampleUDFConstant.ACCESS_STRATEGY_SLIDING_TIME: + configurations.setAccessStrategy( + new SlidingTimeWindowAccessStrategy( + parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY), + parameters.getLong(ExampleUDFConstant.SLIDING_STEP_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_BEGIN_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_END_KEY))); + break; + case ExampleUDFConstant.ACCESS_STRATEGY_ROW_BY_ROW: + default: + configurations.setAccessStrategy(new RowByRowAccessStrategy()); + } + } + + @Override + public void transform(Row row, PointCollector collector) throws IOException { + collector.putInt(row.getTime(), row.getInt(0)); + } + + @Override + public void transform(RowWindow rowWindow, PointCollector collector) throws IOException { + int accumulator = 0; + RowIterator rowIterator = rowWindow.getRowIterator(); + while (rowIterator.hasNextRow()) { + switch (rowWindow.getDataType(0)) { + case INT32: + accumulator += rowIterator.next().getInt(0); + break; + case DOUBLE: + accumulator += (int) rowIterator.next().getDouble(0); + break; + } + } + if (rowWindow.windowSize() != 0) { + collector.putInt(rowWindow.getRow(0).getTime(), accumulator); + } + } + + @Override + public void beforeDestroy() { + logger.debug("Accumulator#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Adder.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Adder.java new file mode 100644 index 000000000000..bf43997aa749 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Adder.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException; +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class Adder implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(Adder.class); + + private double addend; + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator + .validateInputSeriesNumber(2) + .validateInputSeriesDataType(0, Type.INT32, Type.INT64, Type.FLOAT, Type.DOUBLE) + .validateInputSeriesDataType(1, Type.INT32, Type.INT64, Type.FLOAT, Type.DOUBLE); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("Adder#beforeStart"); + addend = parameters.getFloatOrDefault("addend", 0); + configurations.setOutputDataType(Type.INT64).setAccessStrategy(new RowByRowAccessStrategy()); + } + + @Override + public void transform(Row row, PointCollector collector) throws Exception { + if (row.isNull(0) || row.isNull(1)) { + return; + } + collector.putLong( + row.getTime(), (long) (extractDoubleValue(row, 0) + extractDoubleValue(row, 1) + addend)); + } + + private double extractDoubleValue(Row row, int index) throws IOException { + double value; + switch (row.getDataType(index)) { + case INT32: + value = row.getInt(index); + break; + case INT64: + value = (double) row.getLong(index); + break; + case FLOAT: + value = row.getFloat(index); + break; + case DOUBLE: + value = row.getDouble(index); + break; + default: + throw new UnSupportedDataTypeException(row.getDataType(index).toString()); + } + return value; + } + + @Override + public void beforeDestroy() { + logger.debug("Adder#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Counter.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Counter.java new file mode 100644 index 000000000000..2408c1b575fa --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Counter.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingSizeWindowAccessStrategy; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class Counter implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(Counter.class); + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("Counter#beforeStart"); + configurations.setOutputDataType(Type.INT32); + switch (parameters.getStringOrDefault( + ExampleUDFConstant.ACCESS_STRATEGY_KEY, ExampleUDFConstant.ACCESS_STRATEGY_ROW_BY_ROW)) { + case ExampleUDFConstant.ACCESS_STRATEGY_SLIDING_SIZE: + configurations.setAccessStrategy( + new SlidingSizeWindowAccessStrategy( + parameters.getInt(ExampleUDFConstant.WINDOW_SIZE_KEY))); + break; + case ExampleUDFConstant.ACCESS_STRATEGY_SLIDING_TIME: + configurations.setAccessStrategy( + parameters.hasAttribute(ExampleUDFConstant.SLIDING_STEP_KEY) + && parameters.hasAttribute(ExampleUDFConstant.DISPLAY_WINDOW_BEGIN_KEY) + && parameters.hasAttribute(ExampleUDFConstant.DISPLAY_WINDOW_END_KEY) + ? new SlidingTimeWindowAccessStrategy( + parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY), + parameters.getLong(ExampleUDFConstant.SLIDING_STEP_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_BEGIN_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_END_KEY)) + : new SlidingTimeWindowAccessStrategy( + parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY))); + break; + case ExampleUDFConstant.ACCESS_STRATEGY_ROW_BY_ROW: + default: + configurations.setAccessStrategy(new RowByRowAccessStrategy()); + } + } + + @Override + public void transform(Row row, PointCollector collector) throws Exception { + collector.putInt(row.getTime(), 1); + } + + @Override + public void transform(RowWindow rowWindow, PointCollector collector) throws IOException { + if (rowWindow.windowSize() != 0) { + collector.putInt(rowWindow.getRow(0).getTime(), rowWindow.windowSize()); + } + } + + @Override + public void beforeDestroy() { + logger.debug("Counter#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ExampleUDFConstant.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ExampleUDFConstant.java new file mode 100644 index 000000000000..87984d2db870 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ExampleUDFConstant.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.db.query.udf.example; + +public class ExampleUDFConstant { + public static final String ACCESS_STRATEGY_KEY = "access"; + public static final String ACCESS_STRATEGY_ROW_BY_ROW = "row-by-row"; + public static final String ACCESS_STRATEGY_SLIDING_SIZE = "size"; + public static final String ACCESS_STRATEGY_SLIDING_TIME = "time"; + public static final String WINDOW_SIZE_KEY = "windowSize"; + public static final String TIME_INTERVAL_KEY = "timeInterval"; + public static final String SLIDING_STEP_KEY = "slidingStep"; + public static final String DISPLAY_WINDOW_BEGIN_KEY = "displayWindowBegin"; + public static final String DISPLAY_WINDOW_END_KEY = "displayWindowEnd"; +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Max.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Max.java new file mode 100644 index 000000000000..47a44d6e0211 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Max.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class Max implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(Max.class); + + private Long time; + private int value; + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator.validateInputSeriesNumber(1).validateInputSeriesDataType(0, Type.INT32); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("Max#beforeStart"); + configurations.setOutputDataType(Type.INT32).setAccessStrategy(new RowByRowAccessStrategy()); + } + + @Override + public void transform(Row row, PointCollector collector) throws IOException { + int candidateValue = row.getInt(0); + if (time == null || value < candidateValue) { + time = row.getTime(); + value = candidateValue; + } + } + + @Override + public void terminate(PointCollector collector) throws IOException { + if (time != null) { + collector.putInt(time, value); + } + } + + @Override + public void beforeDestroy() { + logger.debug("Max#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Multiplier.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Multiplier.java new file mode 100644 index 000000000000..2e9b95fc3ab9 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/Multiplier.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Multiplier implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(Multiplier.class); + + private long a; + private long b; + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator.validateInputSeriesNumber(1).validateInputSeriesDataType(0, Type.INT64); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("Multiplier#beforeStart"); + a = parameters.getLongOrDefault("a", 0); + b = parameters.getLongOrDefault("b", 0); + configurations.setOutputDataType(Type.INT64).setAccessStrategy(new RowByRowAccessStrategy()); + } + + @Override + public void transform(Row row, PointCollector collector) throws Exception { + collector.putLong(row.getTime(), row.getLong(0) * a * b); + } + + @Override + public void beforeDestroy() { + logger.debug("Multiplier#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester0.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester0.java new file mode 100644 index 000000000000..677d2060fa48 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester0.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingSizeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SlidingSizeWindowConstructorTester0 implements UDTF { + + private static final Logger logger = + LoggerFactory.getLogger(SlidingSizeWindowConstructorTester0.class); + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("SlidingSizeWindowConstructorTester0#beforeStart"); + int windowSize = parameters.getInt("windowSize"); + int slidingStep = parameters.getInt("slidingStep"); + configurations + .setOutputDataType(Type.INT32) + .setAccessStrategy(new SlidingSizeWindowAccessStrategy(windowSize, slidingStep)); + } + + @Override + public void transform(RowWindow rowWindow, PointCollector collector) throws Exception { + if (rowWindow.windowSize() != 0) { + collector.putInt(rowWindow.getRow(0).getTime(), rowWindow.windowSize()); + } + } + + @Override + public void beforeDestroy() { + logger.debug("SlidingSizeWindowConstructorTester0#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester1.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester1.java new file mode 100644 index 000000000000..defeaf7bb84f --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingSizeWindowConstructorTester1.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SlidingSizeWindowConstructorTester1 implements UDTF { + + private static final Logger logger = + LoggerFactory.getLogger(SlidingSizeWindowConstructorTester1.class); + + private int consumptionPoint; + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator.validateInputSeriesNumber(1).validateInputSeriesDataType(0, Type.INT32); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("SlidingSizeWindowConstructorTester1#beforeStart"); + consumptionPoint = parameters.getInt("consumptionPoint"); + configurations.setOutputDataType(Type.INT32).setAccessStrategy(new RowByRowAccessStrategy()); + } + + @Override + public void transform(Row row, PointCollector collector) throws Exception { + if (row.getTime() == consumptionPoint) { + collector.putInt(row.getTime(), row.getInt(0)); + } + } + + @Override + public void beforeDestroy() { + logger.debug("SlidingSizeWindowConstructorTester1#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingTimeWindowConstructionTester.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingTimeWindowConstructionTester.java new file mode 100644 index 000000000000..6d71877b20b6 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/SlidingTimeWindowConstructionTester.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.RowIterator; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +public class SlidingTimeWindowConstructionTester implements UDTF { + + private static final Logger logger = + LoggerFactory.getLogger(SlidingTimeWindowConstructionTester.class); + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator.validateInputSeriesNumber(1).validateInputSeriesDataType(0, Type.INT32); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("SlidingTimeWindowConstructionTester#beforeStart"); + long timeInterval = parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY); + configurations + .setOutputDataType(Type.INT32) + .setAccessStrategy(new SlidingTimeWindowAccessStrategy(timeInterval)); + } + + @Override + public void transform(RowWindow rowWindow, PointCollector collector) throws IOException { + int accumulator = 0; + RowIterator rowIterator = rowWindow.getRowIterator(); + while (rowIterator.hasNextRow()) { + accumulator += rowIterator.next().getInt(0); + } + if (rowWindow.windowSize() != 0) { + collector.putInt(rowWindow.getRow(0).getTime(), accumulator); + } + } + + @Override + public void beforeDestroy() { + logger.debug("SlidingTimeWindowConstructionTester#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/TerminateTester.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/TerminateTester.java new file mode 100644 index 000000000000..06e90cce04a2 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/TerminateTester.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.Row; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TerminateTester implements UDTF { + + private static final Logger logger = LoggerFactory.getLogger(TerminateTester.class); + + private Long maxTime; + private int count; + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + logger.debug("TerminateTester#beforeStart"); + configurations.setOutputDataType(Type.INT32).setAccessStrategy(new RowByRowAccessStrategy()); + maxTime = null; + count = 0; + } + + @Override + public void transform(Row row, PointCollector collector) throws Exception { + maxTime = row.getTime(); + ++count; + + collector.putInt(maxTime, 1); + } + + @Override + public void terminate(PointCollector collector) throws Exception { + if (maxTime != null) { + collector.putInt(maxTime + 1, count); + } + } + + @Override + public void beforeDestroy() { + logger.debug("TerminateTester#beforeDestroy"); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ValidateTester.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ValidateTester.java new file mode 100644 index 000000000000..e3248c077825 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/ValidateTester.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameterValidator; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.RowByRowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +public class ValidateTester implements UDTF { + + @Override + public void validate(UDFParameterValidator validator) throws Exception { + validator + .validateRequiredAttribute("k") + .validateInputSeriesNumber(2) + .validateInputSeriesDataType(0, Type.INT32, Type.INT64) + .validateInputSeriesDataType(1, Type.INT32, Type.INT64); + } + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + configurations.setAccessStrategy(new RowByRowAccessStrategy()).setOutputDataType(Type.INT32); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/WindowStartEnd.java b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/WindowStartEnd.java new file mode 100644 index 000000000000..fb1f66ec4b06 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/db/query/udf/example/WindowStartEnd.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.query.udf.example; + +import org.apache.iotdb.udf.api.UDTF; +import org.apache.iotdb.udf.api.access.RowWindow; +import org.apache.iotdb.udf.api.collector.PointCollector; +import org.apache.iotdb.udf.api.customizer.config.UDTFConfigurations; +import org.apache.iotdb.udf.api.customizer.parameter.UDFParameters; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingSizeWindowAccessStrategy; +import org.apache.iotdb.udf.api.customizer.strategy.SlidingTimeWindowAccessStrategy; +import org.apache.iotdb.udf.api.type.Type; + +import java.io.IOException; + +public class WindowStartEnd implements UDTF { + + @Override + public void beforeStart(UDFParameters parameters, UDTFConfigurations configurations) { + configurations.setOutputDataType(Type.INT64); + if (ExampleUDFConstant.ACCESS_STRATEGY_SLIDING_SIZE.equals( + parameters.getString(ExampleUDFConstant.ACCESS_STRATEGY_KEY))) { + configurations.setAccessStrategy( + parameters.hasAttribute(ExampleUDFConstant.SLIDING_STEP_KEY) + ? new SlidingSizeWindowAccessStrategy( + parameters.getInt(ExampleUDFConstant.WINDOW_SIZE_KEY), + parameters.getInt(ExampleUDFConstant.SLIDING_STEP_KEY)) + : new SlidingSizeWindowAccessStrategy( + parameters.getInt(ExampleUDFConstant.WINDOW_SIZE_KEY))); + } else { + configurations.setAccessStrategy( + parameters.hasAttribute(ExampleUDFConstant.SLIDING_STEP_KEY) + && parameters.hasAttribute(ExampleUDFConstant.DISPLAY_WINDOW_BEGIN_KEY) + && parameters.hasAttribute(ExampleUDFConstant.DISPLAY_WINDOW_END_KEY) + ? new SlidingTimeWindowAccessStrategy( + parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY), + parameters.getLong(ExampleUDFConstant.SLIDING_STEP_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_BEGIN_KEY), + parameters.getLong(ExampleUDFConstant.DISPLAY_WINDOW_END_KEY)) + : new SlidingTimeWindowAccessStrategy( + parameters.getLong(ExampleUDFConstant.TIME_INTERVAL_KEY))); + } + } + + @Override + public void transform(RowWindow rowWindow, PointCollector collector) throws IOException { + collector.putLong(rowWindow.windowStartTime(), rowWindow.windowEndTime()); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractEnv.java new file mode 100644 index 000000000000..7c4a104b8bc9 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractEnv.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.itbase.runtime.ClusterTestConnection; +import org.apache.iotdb.itbase.runtime.NodeConnection; +import org.apache.iotdb.itbase.runtime.ParallelRequestDelegate; +import org.apache.iotdb.itbase.runtime.RequestDelegate; +import org.apache.iotdb.itbase.runtime.SerialRequestDelegate; +import org.apache.iotdb.jdbc.Config; +import org.apache.iotdb.jdbc.Constant; +import org.apache.iotdb.jdbc.IoTDBConnection; + +import org.apache.commons.lang3.SystemUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.iotdb.jdbc.Config.VERSION; +import static org.junit.Assert.fail; + +public abstract class AbstractEnv implements BaseEnv { + private static final Logger logger = LoggerFactory.getLogger(AbstractEnv.class); + private final int NODE_START_TIMEOUT = 100; + private final int PROBE_TIMEOUT_MS = 2000; + private final int NODE_NETWORK_TIMEOUT_MS = 65_000; + private final String lockFilePath = + System.getProperty("user.dir") + File.separator + "target" + File.separator + "lock-"; + protected List configNodeWrapperList = Collections.emptyList(); + protected List dataNodeWrapperList = Collections.emptyList(); + private final Random rand = new Random(); + protected String testMethodName = null; + + protected void initEnvironment(int configNodesNum, int dataNodesNum) { + this.configNodeWrapperList = new ArrayList<>(); + this.dataNodeWrapperList = new ArrayList<>(); + + final String testClassName = getTestClassName(); + final String testMethodName = getTestMethodName(); + + ConfigNodeWrapper seedConfigNodeWrapper = + new ConfigNodeWrapper(true, "", testClassName, testMethodName, searchAvailablePorts()); + seedConfigNodeWrapper.createDir(); + seedConfigNodeWrapper.changeConfig(ConfigFactory.getConfig().getConfignodeProperties()); + seedConfigNodeWrapper.start(); + String targetConfigNode = seedConfigNodeWrapper.getIpAndPortString(); + this.configNodeWrapperList.add(seedConfigNodeWrapper); + + List configNodeEndpoints = new ArrayList<>(); + RequestDelegate configNodesDelegate = new SerialRequestDelegate<>(configNodeEndpoints); + for (int i = 1; i < configNodesNum; i++) { + ConfigNodeWrapper configNodeWrapper = + new ConfigNodeWrapper( + false, targetConfigNode, testClassName, testMethodName, searchAvailablePorts()); + this.configNodeWrapperList.add(configNodeWrapper); + configNodeEndpoints.add(configNodeWrapper.getIpAndPortString()); + configNodeWrapper.createDir(); + configNodeWrapper.changeConfig(null); + configNodesDelegate.addRequest( + () -> { + configNodeWrapper.start(); + return null; + }); + } + try { + configNodesDelegate.requestAll(); + } catch (SQLException e) { + logger.error("Start configNodes failed", e); + fail(); + } + + List dataNodeEndpoints = new ArrayList<>(); + RequestDelegate dataNodesDelegate = + new ParallelRequestDelegate<>(dataNodeEndpoints, NODE_START_TIMEOUT); + for (int i = 0; i < dataNodesNum; i++) { + DataNodeWrapper dataNodeWrapper = + new DataNodeWrapper( + targetConfigNode, testClassName, testMethodName, searchAvailablePorts()); + this.dataNodeWrapperList.add(dataNodeWrapper); + dataNodeEndpoints.add(dataNodeWrapper.getIpAndPortString()); + dataNodeWrapper.createDir(); + dataNodeWrapper.changeConfig(ConfigFactory.getConfig().getEngineProperties()); + dataNodesDelegate.addRequest( + () -> { + dataNodeWrapper.start(); + return null; + }); + } + + try { + dataNodesDelegate.requestAll(); + } catch (SQLException e) { + logger.error("Start dataNodes failed", e); + fail(); + } + + testWorking(); + } + + private void cleanupEnvironment() { + for (AbstractNodeWrapper nodeWrapper : + Stream.concat(this.dataNodeWrapperList.stream(), this.configNodeWrapperList.stream()) + .collect(Collectors.toList())) { + nodeWrapper.stop(); + nodeWrapper.waitingToShutDown(); + nodeWrapper.destroyDir(); + String lockPath = getLockFilePath(nodeWrapper.getPort()); + if (!new File(lockPath).delete()) { + logger.error("Delete lock file {} failed", lockPath); + } + } + testMethodName = null; + } + + public String getTestClassName() { + StackTraceElement[] stack = Thread.currentThread().getStackTrace(); + for (StackTraceElement stackTraceElement : stack) { + String className = stackTraceElement.getClassName(); + if (className.endsWith("IT")) { + return className.substring(className.lastIndexOf(".") + 1); + } + } + return "UNKNOWN-IT"; + } + + public void testWorking() { + List endpoints = + dataNodeWrapperList.stream() + .map(DataNodeWrapper::getIpAndPortString) + .collect(Collectors.toList()); + RequestDelegate testDelegate = + new ParallelRequestDelegate<>(endpoints, NODE_START_TIMEOUT); + for (DataNodeWrapper dataNode : dataNodeWrapperList) { + final String dataNodeEndpoint = dataNode.getIpAndPortString(); + testDelegate.addRequest( + () -> { + Exception lastException = null; + for (int i = 0; i < 30; i++) { + try (Connection ignored = getConnection(dataNodeEndpoint, PROBE_TIMEOUT_MS)) { + return null; + } catch (Exception e) { + lastException = e; + TimeUnit.SECONDS.sleep(1L); + } + } + throw lastException; + }); + } + try { + testDelegate.requestAll(); + } catch (Exception e) { + fail("After 30 times retry, the cluster can't work!"); + } + } + + @Override + public void cleanAfterClass() { + cleanupEnvironment(); + } + + @Override + public void cleanAfterTest() { + cleanupEnvironment(); + } + + public final int[] searchAvailablePorts() { + do { + int randomPortStart = 1000 + (int) (Math.random() * (1999 - 1000)); + randomPortStart = randomPortStart * 10 + 1; + File lockFile = new File(getLockFilePath(randomPortStart)); + if (lockFile.exists()) { + continue; + } + + List requiredPorts = + IntStream.rangeClosed(randomPortStart, randomPortStart + 9) + .boxed() + .collect(Collectors.toList()); + try { + if (checkPortsAvailable(requiredPorts) && lockFile.createNewFile()) { + return requiredPorts.stream().mapToInt(Integer::intValue).toArray(); + } + } catch (IOException e) { + // ignore + } + } while (true); + } + + private boolean checkPortsAvailable(List ports) { + String cmd = getSearchAvailablePortCmd(ports); + try { + Process proc = Runtime.getRuntime().exec(cmd); + return proc.waitFor() == 1; + } catch (IOException e) { + // ignore + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + return false; + } + + private String getSearchAvailablePortCmd(List ports) { + if (SystemUtils.IS_OS_WINDOWS) { + return getWindowsSearchPortCmd(ports); + } + return getUnixSearchPortCmd(ports); + } + + private String getWindowsSearchPortCmd(List ports) { + String cmd = "netstat -aon -p tcp | findStr "; + return cmd + + ports.stream().map(v -> "/C:'127.0.0.1:" + v + "'").collect(Collectors.joining(" ")); + } + + private String getUnixSearchPortCmd(List ports) { + String cmd = "lsof -iTCP -sTCP:LISTEN -P -n | awk '{print $9}' | grep -E "; + return cmd + ports.stream().map(String::valueOf).collect(Collectors.joining("|")) + "\""; + } + + @Override + public Connection getConnection() throws SQLException { + return new ClusterTestConnection(getWriteConnection(null), getReadConnections(null)); + } + + private Connection getConnection(String endpoint, int queryTimeout) throws SQLException { + IoTDBConnection connection = + (IoTDBConnection) + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + endpoint + getParam(null, queryTimeout), + System.getProperty("User", "root"), + System.getProperty("Password", "root")); + connection.setQueryTimeout(queryTimeout); + + return connection; + } + + @Override + public Connection getConnection(Constant.Version version) throws SQLException { + if (System.getProperty("ReadAndVerifyWithMultiNode", "true").equalsIgnoreCase("true")) { + return new ClusterTestConnection(getWriteConnection(version), getReadConnections(version)); + } else { + return getWriteConnection(version).getUnderlyingConnecton(); + } + } + + protected NodeConnection getWriteConnection(Constant.Version version) throws SQLException { + DataNodeWrapper dataNode; + + if (System.getProperty("RandomSelectWriteNode", "true").equalsIgnoreCase("true")) { + // Randomly choose a node for handling write requests + dataNode = this.dataNodeWrapperList.get(rand.nextInt(this.dataNodeWrapperList.size())); + } else { + dataNode = this.dataNodeWrapperList.get(0); + } + + String endpoint = dataNode.getIp() + ":" + dataNode.getPort(); + Connection writeConnection = + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + endpoint + getParam(version, NODE_NETWORK_TIMEOUT_MS), + System.getProperty("User", "root"), + System.getProperty("Password", "root")); + return new NodeConnection( + endpoint, + NodeConnection.NodeRole.DATA_NODE, + NodeConnection.ConnectionRole.WRITE, + writeConnection); + } + + protected List getReadConnections(Constant.Version version) throws SQLException { + List endpoints = new ArrayList<>(); + ParallelRequestDelegate readConnRequestDelegate = + new ParallelRequestDelegate<>(endpoints, NODE_START_TIMEOUT); + for (DataNodeWrapper dataNodeWrapper : this.dataNodeWrapperList) { + final String endpoint = dataNodeWrapper.getIpAndPortString(); + endpoints.add(endpoint); + readConnRequestDelegate.addRequest( + () -> { + Connection readConnection = + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + endpoint + getParam(version, NODE_NETWORK_TIMEOUT_MS), + System.getProperty("User", "root"), + System.getProperty("Password", "root")); + return new NodeConnection( + endpoint, + NodeConnection.NodeRole.DATA_NODE, + NodeConnection.ConnectionRole.READ, + readConnection); + }); + } + return readConnRequestDelegate.requestAll(); + } + + private String getParam(Constant.Version version, int timeout) { + StringBuilder sb = new StringBuilder("?"); + sb.append(Config.NETWORK_TIMEOUT).append("=").append(timeout); + if (version != null) { + sb.append("&").append(VERSION).append("=").append(version); + } + return sb.toString(); + } + + public String getTestMethodName() { + return testMethodName; + } + + @Override + public void setTestMethodName(String testMethodName) { + this.testMethodName = testMethodName; + } + + public void dumpTestJVMSnapshot() { + for (ConfigNodeWrapper configNodeWrapper : configNodeWrapperList) { + configNodeWrapper.dumpJVMSnapshot(testMethodName); + } + for (DataNodeWrapper dataNodeWrapper : dataNodeWrapperList) { + dataNodeWrapper.dumpJVMSnapshot(testMethodName); + } + } + + private String getLockFilePath(int port) { + return lockFilePath + port; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractNodeWrapper.java new file mode 100644 index 000000000000..6d02fd2f2885 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/AbstractNodeWrapper.java @@ -0,0 +1,383 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseNodeWrapper; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.file.PathUtils; +import org.apache.commons.lang3.SystemUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.MBeanServerConnection; +import javax.management.remote.JMXConnector; +import javax.management.remote.JMXConnectorFactory; +import javax.management.remote.JMXServiceURL; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintWriter; +import java.lang.management.ManagementFactory; +import java.lang.management.MonitorInfo; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.net.MalformedURLException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.LinkOption; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.time.LocalDateTime; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static org.junit.Assert.fail; + +public abstract class AbstractNodeWrapper implements BaseNodeWrapper { + private static final Logger logger = LoggerFactory.getLogger(AbstractNodeWrapper.class); + private final String templateNodePath = + System.getProperty("user.dir") + File.separator + "target" + File.separator + "template-node"; + private final String templateNodeLibPath = + System.getProperty("user.dir") + + File.separator + + "target" + + File.separator + + "template-node-share" + + File.separator + + "lib"; + private final File NULL_FILE = + SystemUtils.IS_OS_WINDOWS ? new File("nul") : new File("/dev/null"); + protected final String testClassName; + protected final String testMethodName; + protected final int[] portList; + private final int jmxPort; + private final String jmxUserName = "root"; + private final String jmxPassword = "passw!d"; + private final String TAB = " "; + private Process instance; + + public AbstractNodeWrapper(String testClassName, String testMethodName, int[] portList) { + this.testClassName = testClassName; + this.testMethodName = testMethodName; + this.portList = portList; + jmxPort = this.portList[portList.length - 1]; + } + + @Override + public void createDir() { + // Copy templateNodePath to nodePath + String destPath = getNodePath(); + try { + try { + PathUtils.deleteDirectory(Paths.get(destPath)); + } catch (NoSuchFileException e) { + // ignored + } + // Here we need to copy without follow symbolic links, so we can't use FileUtils directly. + try (Stream s = Files.walk(Paths.get(this.templateNodePath))) { + s.forEach( + source -> { + Path destination = + Paths.get(destPath, source.toString().substring(this.templateNodePath.length())); + try { + Files.copy( + source, + destination, + LinkOption.NOFOLLOW_LINKS, + StandardCopyOption.COPY_ATTRIBUTES); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + Path destLibPath = Paths.get(destPath, "lib"); + FileUtils.forceMkdir(destLibPath.toFile()); + // Create hard link for libs to decrease copy size + try (Stream s = Files.walk(Paths.get(this.templateNodeLibPath))) { + s.forEach( + source -> { + if (source.toFile().isFile()) { + Path destination = + Paths.get( + destLibPath.toString(), + source.toString().substring(this.templateNodeLibPath.length())); + try { + Files.createLink(destination, source); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }); + } + String startScriptPath = getStartScriptPath(); + String stopScriptPath = getStopScriptPath(); + if (!new File(startScriptPath).setExecutable(true)) { + logger.error("Change {} to executable failed.", startScriptPath); + } + if (!new File(stopScriptPath).setExecutable(true)) { + logger.error("Change {} to executable failed.", stopScriptPath); + } + // Make sure the log dir exist, as the first file is output by starting script directly. + FileUtils.createParentDirectories(new File(getLogPath())); + } catch (IOException ex) { + logger.error("Copy node dir failed", ex); + fail(); + } + } + + @Override + public void destroyDir() { + for (int i = 0; i < 3; i++) { + try { + // DO NOT use FileUtils.forceDelete, as it will follow the symbolic link to make libs + // read-only, which causes permission denied in deletion. + PathUtils.deleteDirectory(Paths.get(getNodePath())); + return; + } catch (IOException ex) { + logger.warn("Delete node dir failed. RetryTimes={}", i + 1, ex); + try { + TimeUnit.SECONDS.sleep(3); + } catch (InterruptedException e) { + fail("Delete node dir failed. " + e); + } + } + } + fail("Delete node dir failed."); + } + + @Override + public void start() { + try { + File stdoutFile = new File(getLogPath()); + ProcessBuilder processBuilder = + new ProcessBuilder(getStartScriptPath()) + .redirectOutput(stdoutFile) + .redirectError(stdoutFile); + this.instance = processBuilder.start(); + logger.info("In test {} {} started.", getTestLogDirName(), getId()); + } catch (IOException ex) { + fail("Start node failed. " + ex); + } + } + + @Override + public void stop() { + if (this.instance == null) { + return; + } + this.instance.destroy(); + // In Windows, the IoTDB process is started as a subprocess of the original batch script with a + // new pid, so we need to kill the new subprocess as well. + if (SystemUtils.IS_OS_WINDOWS) { + ProcessBuilder processBuilder = + new ProcessBuilder(getStopScriptPath()) + .redirectOutput(NULL_FILE) + .redirectError(NULL_FILE); + processBuilder.environment().put("CONSOLE_LOG_LEVEL", "DEBUG"); + Process p = null; + try { + p = processBuilder.start(); + p.waitFor(5, TimeUnit.SECONDS); + } catch (IOException | InterruptedException e) { + logger.error("Stop instance in Windows failed", e); + if (p != null) { + p.destroyForcibly(); + } + } + } + } + + @Override + public void waitingToShutDown() { + while (this.instance.isAlive()) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + logger.error("Waiting node to shutdown." + e); + } + } + } + + @Override + public void changeConfig(Properties properties) { + try { + String configPath = getConfigPath(); + Properties configProperties = new Properties(); + try (InputStream confInput = Files.newInputStream(Paths.get(configPath))) { + configProperties.load(confInput); + } + updateConfig(configProperties); + if (properties != null && !properties.isEmpty()) { + configProperties.putAll(properties); + } + try (FileWriter confOutput = new FileWriter(configPath)) { + configProperties.store(confOutput, null); + } + // Change JMX config + Path path = Paths.get(getEnvConfigPath()); + String content = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + content = content.replaceAll("JMX_LOCAL=\"true\"", "JMX_LOCAL=\"false\""); + content = content.replaceAll("JMX_PORT=\"\\d+\"", String.format("JMX_PORT=\"%d\"", jmxPort)); + Files.write(path, content.getBytes(StandardCharsets.UTF_8)); + } catch (IOException ex) { + fail("Change the config of data node failed. " + ex); + } + } + + @Override + public final String getIp() { + return "127.0.0.1"; + } + + @Override + public final int getPort() { + return portList[0]; + } + + @Override + public final String getIpAndPortString() { + return this.getIp() + ":" + this.getPort(); + } + + protected String workDirFilePath(String dirName, String fileName) { + return getNodePath() + File.separator + dirName + File.separator + fileName; + } + + protected abstract String getConfigPath(); + + protected abstract String getEnvConfigPath(); + + protected abstract void updateConfig(Properties properties); + + protected abstract String getStartScriptPath(); + + protected abstract String getStopScriptPath(); + + private String getLogPath() { + return getLogDirPath() + File.separator + getId() + ".log"; + } + + private String getLogDirPath() { + return System.getProperty("user.dir") + + File.separator + + "target" + + File.separator + + "cluster-logs" + + File.separator + + getTestLogDirName(); + } + + private String getNodePath() { + return System.getProperty("user.dir") + File.separator + "target" + File.separator + getId(); + } + + @Override + public void dumpJVMSnapshot(String testCaseName) { + JMXServiceURL url; + try { + url = + new JMXServiceURL( + String.format("service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi", jmxPort)); + } catch (MalformedURLException e) { + logger.error("Construct JMX URL failed", e); + return; + } + Map environment = + Collections.singletonMap(JMXConnector.CREDENTIALS, new String[] {jmxUserName, jmxPassword}); + + try (JMXConnector connector = JMXConnectorFactory.connect(url, environment)) { + MBeanServerConnection mbsc = connector.getMBeanServerConnection(); + ThreadMXBean tmbean = + ManagementFactory.newPlatformMXBeanProxy( + mbsc, ManagementFactory.THREAD_MXBEAN_NAME, ThreadMXBean.class); + ThreadInfo[] threadInfos = tmbean.dumpAllThreads(true, true); + long[] deadlockIds = tmbean.findDeadlockedThreads(); + LocalDateTime currentTime = LocalDateTime.now(); + try (PrintWriter output = + new PrintWriter( + getLogDirPath() + File.separator + testCaseName + "_" + getId() + "-threads.dump")) { + output.printf("# Captured at %s\n", currentTime); + output.println("==================\n"); + if (deadlockIds != null && deadlockIds.length > 0) { + output.printf("Detect DEADLOCK threads!\n"); + for (long deadlockId : deadlockIds) { + ThreadInfo ti = tmbean.getThreadInfo(deadlockId); + output.printf("%s #%d\n", ti.getThreadName(), ti.getThreadId()); + } + output.println("==================\n"); + } + for (ThreadInfo threadInfo : threadInfos) { + dumpThread(output, threadInfo); + } + } + } catch (IOException e) { + logger.error("Connect with MBeanServer {} failed", url.getURLPath(), e); + } + } + + private void dumpThread(PrintWriter output, ThreadInfo ti) { + StringBuilder sb = new StringBuilder("\"" + ti.getThreadName() + "\""); + sb.append(" #").append(ti.getThreadId()); + sb.append(" ").append(ti.getThreadState()).append("\n"); + if (ti.getLockName() != null) { + sb.append("Waiting on: ").append(ti.getLockName()).append("\n"); + } + if (ti.getLockOwnerName() != null) { + sb.append("Locked by: ") + .append(ti.getLockOwnerName()) + .append(" #") + .append(ti.getLockOwnerId()) + .append("\n"); + } + Map lockInfoMap = new HashMap<>(); + for (MonitorInfo monitor : ti.getLockedMonitors()) { + lockInfoMap.put(monitor.getLockedStackFrame(), monitor); + } + for (StackTraceElement ele : ti.getStackTrace()) { + if (lockInfoMap.containsKey(ele)) { + MonitorInfo monitorInfo = lockInfoMap.get(ele); + sb.append(TAB) + .append("- lock ") + .append(monitorInfo.getClassName()) + .append("@") + .append(Integer.toHexString(monitorInfo.getIdentityHashCode())) + .append("\n"); + } + sb.append(TAB).append(ele).append("\n"); + } + sb.append("\n"); + output.print(sb); + } + + private String getTestLogDirName() { + if (testMethodName == null) { + return testClassName; + } + return testClassName + "_" + testMethodName; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/Cluster1Env.java b/integration-test/src/main/java/org/apache/iotdb/it/env/Cluster1Env.java new file mode 100644 index 000000000000..55f841d2fee7 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/Cluster1Env.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class Cluster1Env extends AbstractEnv { + private static final Logger logger = LoggerFactory.getLogger(Cluster1Env.class); + + @Override + public void initBeforeClass() throws InterruptedException { + logger.debug("=======start init class======="); + super.initEnvironment(1, 3); + } + + @Override + public void initBeforeTest() throws InterruptedException { + logger.debug("=======start init test======="); + super.initEnvironment(1, 3); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigFactory.java b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigFactory.java new file mode 100644 index 000000000000..22f48193b5a8 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigFactory.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseConfig; + +import java.lang.reflect.InvocationTargetException; + +public class ConfigFactory { + private static BaseConfig config; + + public static BaseConfig getConfig() { + if (config == null) { + try { + switch (System.getProperty("TestEnv", "Standalone")) { + case "Standalone": + config = + (BaseConfig) + Class.forName("org.apache.iotdb.db.it.env.StandaloneEnvConfig") + .getDeclaredConstructor() + .newInstance(); + break; + case "LocalStandaloneOnMpp": + case "Cluster1": + config = new MppConfig(); + break; + case "Remote": + config = new RemoteServerConfig(); + break; + default: + throw new ClassNotFoundException("The Property class of TestEnv not found"); + } + } catch (ClassNotFoundException + | IllegalAccessException + | InstantiationException + | NoSuchMethodException + | InvocationTargetException e) { + e.printStackTrace(); + System.exit(-1); + } + } + return config; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java new file mode 100644 index 000000000000..05b6e51b77b5 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/ConfigNodeWrapper.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.commons.lang3.SystemUtils; + +import java.io.File; +import java.util.Properties; + +public class ConfigNodeWrapper extends AbstractNodeWrapper { + + private final int consensusPort; + private final String targetConfigNode; + private final boolean isSeed; + + public ConfigNodeWrapper( + boolean isSeed, + String targetConfigNode, + String testClassName, + String testMethodName, + int[] portList) { + super(testClassName, testMethodName, portList); + this.consensusPort = portList[1]; + this.isSeed = isSeed; + if (isSeed) { + this.targetConfigNode = getIpAndPortString(); + } else { + this.targetConfigNode = targetConfigNode; + } + } + + @Override + protected void updateConfig(Properties properties) { + properties.setProperty("rpc_address", super.getIp()); + properties.setProperty("rpc_port", String.valueOf(getPort())); + properties.setProperty("consensus_port", String.valueOf(this.consensusPort)); + properties.setProperty("config_nodes", this.targetConfigNode); + properties.setProperty( + "config_node_consensus_protocol_class", + "org.apache.iotdb.consensus.standalone.StandAloneConsensus"); + properties.setProperty( + "schema_region_consensus_protocol_class", + "org.apache.iotdb.consensus.standalone.StandAloneConsensus"); + properties.setProperty( + "data_region_consensus_protocol_class", + "org.apache.iotdb.consensus.standalone.StandAloneConsensus"); + properties.setProperty("schema_replication_factor", "1"); + properties.setProperty("data_replication_factor", "1"); + properties.setProperty("connection_timeout_ms", "30000"); + } + + @Override + protected String getConfigPath() { + return workDirFilePath("confignode" + File.separator + "conf", "iotdb-confignode.properties"); + } + + @Override + protected String getEnvConfigPath() { + if (SystemUtils.IS_OS_WINDOWS) { + return workDirFilePath("confignode" + File.separator + "conf", "confignode-env.bat"); + } + return workDirFilePath("confignode" + File.separator + "conf", "confignode-env.sh"); + } + + @Override + protected String getStartScriptPath() { + String scriptName = "start-confignode.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "start-confignode.bat"; + } + return workDirFilePath("confignode" + File.separator + "sbin", scriptName); + } + + @Override + protected String getStopScriptPath() { + String scriptName = "stop-confignode.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "stop-confignode.bat"; + } + return workDirFilePath("confignode" + File.separator + "sbin", scriptName); + } + + @Override + public final String getId() { + if (isSeed) { + return "SeedConfigNode" + getPort(); + } + return "ConfigNode" + getPort(); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java new file mode 100644 index 000000000000..eb2f46380f25 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/DataNodeWrapper.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.commons.lang3.SystemUtils; + +import java.io.File; +import java.util.Properties; + +public class DataNodeWrapper extends AbstractNodeWrapper { + + private final String targetConfigNode; + private final int mppDataExchangePort; + private final int internalPort; + private final int dataRegionConsensusPort; + private final int schemaRegionConsensusPort; + + public DataNodeWrapper( + String targetConfigNode, String testClassName, String testMethodName, int[] portList) { + super(testClassName, testMethodName, portList); + this.targetConfigNode = targetConfigNode; + this.mppDataExchangePort = portList[1]; + this.internalPort = portList[2]; + this.dataRegionConsensusPort = portList[3]; + this.schemaRegionConsensusPort = portList[4]; + } + + @Override + protected void updateConfig(Properties properties) { + properties.setProperty("rpc_address", super.getIp()); + properties.setProperty("internal_address", "127.0.0.1"); + properties.setProperty("rpc_port", String.valueOf(getPort())); + properties.setProperty("mpp_data_exchange_port", String.valueOf(this.mppDataExchangePort)); + properties.setProperty("internal_port", String.valueOf(this.internalPort)); + properties.setProperty( + "data_region_consensus_port", String.valueOf(this.dataRegionConsensusPort)); + properties.setProperty( + "schema_region_consensus_port", String.valueOf(this.schemaRegionConsensusPort)); + properties.setProperty("connection_timeout_ms", "30000"); + if (this.targetConfigNode != null) { + properties.setProperty("config_nodes", this.targetConfigNode); + } + } + + @Override + protected String getConfigPath() { + return workDirFilePath("datanode" + File.separator + "conf", "iotdb-datanode.properties"); + } + + @Override + protected String getEnvConfigPath() { + if (SystemUtils.IS_OS_WINDOWS) { + return workDirFilePath("datanode" + File.separator + "conf", "datanode-env.bat"); + } + return workDirFilePath("datanode" + File.separator + "conf", "datanode-env.sh"); + } + + @Override + protected String getStartScriptPath() { + String scriptName = "start-datanode.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "start-datanode.bat"; + } + return workDirFilePath("datanode" + File.separator + "sbin", scriptName); + } + + @Override + protected String getStopScriptPath() { + String scriptName = "stop-datanode.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "stop-datanode.bat"; + } + return workDirFilePath("datanode" + File.separator + "sbin", scriptName); + } + + @Override + public final String getId() { + return "DataNode" + getPort(); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java new file mode 100644 index 000000000000..0b065f3286bc --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/EnvFactory.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.jdbc.Config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class EnvFactory { + private static BaseEnv env; + private static final Logger logger = LoggerFactory.getLogger(EnvFactory.class); + + public static BaseEnv getEnv() { + if (env == null) { + try { + Class.forName(Config.JDBC_DRIVER_NAME); + logger.debug(">>>>>>>" + System.getProperty("TestEnv")); + switch (System.getProperty("TestEnv", "Standalone")) { + case "Standalone": + env = (BaseEnv) Class.forName("org.apache.iotdb.db.it.env.StandaloneEnv").newInstance(); + break; + case "LocalStandaloneOnMpp": + env = new StandaloneOnMppEnv(); + break; + case "Cluster1": + env = new Cluster1Env(); + break; + case "Remote": + env = new RemoteServerEnv(); + break; + default: + throw new ClassNotFoundException("The Property class of TestEnv not found"); + } + } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { + e.printStackTrace(); + System.exit(-1); + } + } + return env; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/IoTDBTestRunner.java b/integration-test/src/main/java/org/apache/iotdb/it/env/IoTDBTestRunner.java new file mode 100644 index 000000000000..d865e736fcc0 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/IoTDBTestRunner.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.junit.runner.Description; +import org.junit.runner.notification.RunNotifier; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.model.FrameworkMethod; +import org.junit.runners.model.InitializationError; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IoTDBTestRunner extends BlockJUnit4ClassRunner { + + private static final Logger logger = LoggerFactory.getLogger(IoTDBTestRunner.class); + + public IoTDBTestRunner(Class testClass) throws InitializationError { + super(testClass); + } + + @Override + protected void runChild(final FrameworkMethod method, RunNotifier notifier) { + Description description = describeChild(method); + logger.info("Run {}", description.getMethodName()); + long currentTime = System.currentTimeMillis(); + EnvFactory.getEnv().setTestMethodName(description.getMethodName()); + super.runChild(method, notifier); + logger.info( + "Done {}. Cost: {}s", + description.getMethodName(), + (System.currentTimeMillis() - currentTime) / 1000.0); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java new file mode 100644 index 000000000000..7d10461723b4 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/MppConfig.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseConfig; + +import java.util.Properties; + +public class MppConfig implements BaseConfig { + private final Properties engineProperties; + private final Properties confignodeProperties; + + public MppConfig() { + engineProperties = new Properties(); + confignodeProperties = new Properties(); + } + + @Override + public void clearAllProperties() { + engineProperties.clear(); + confignodeProperties.clear(); + } + + @Override + public Properties getEngineProperties() { + return this.engineProperties; + } + + @Override + public Properties getConfignodeProperties() { + return this.confignodeProperties; + } + + @Override + public BaseConfig setMaxNumberOfPointsInPage(int maxNumberOfPointsInPage) { + engineProperties.setProperty( + "max_number_of_points_in_page", String.valueOf(maxNumberOfPointsInPage)); + return this; + } + + @Override + public BaseConfig setPageSizeInByte(int pageSizeInByte) { + engineProperties.setProperty("page_size_in_byte", String.valueOf(pageSizeInByte)); + return this; + } + + @Override + public BaseConfig setGroupSizeInByte(int groupSizeInByte) { + engineProperties.setProperty("group_size_in_byte", String.valueOf(groupSizeInByte)); + return this; + } + + @Override + public BaseConfig setMemtableSizeThreshold(long memtableSizeThreshold) { + engineProperties.setProperty("memtable_size_threshold", String.valueOf(memtableSizeThreshold)); + return this; + } + + @Override + public BaseConfig setDataRegionNum(int dataRegionNum) { + engineProperties.setProperty("data_region_num", String.valueOf(dataRegionNum)); + return this; + } + + @Override + public BaseConfig setPartitionInterval(long partitionInterval) { + engineProperties.setProperty("partition_interval", String.valueOf(partitionInterval)); + return this; + } + + @Override + public BaseConfig setCompressor(String compressor) { + engineProperties.setProperty("compressor", compressor); + return this; + } + + @Override + public BaseConfig setMaxQueryDeduplicatedPathNum(int maxQueryDeduplicatedPathNum) { + engineProperties.setProperty( + "max_deduplicated_path_num", String.valueOf(maxQueryDeduplicatedPathNum)); + return this; + } + + @Override + public BaseConfig setRpcThriftCompressionEnable(boolean rpcThriftCompressionEnable) { + engineProperties.setProperty( + "rpc_thrift_compression_enable", String.valueOf(rpcThriftCompressionEnable)); + return this; + } + + @Override + public BaseConfig setRpcAdvancedCompressionEnable(boolean rpcAdvancedCompressionEnable) { + engineProperties.setProperty( + "rpc_advanced_compression_enable", String.valueOf(rpcAdvancedCompressionEnable)); + return this; + } + + @Override + public BaseConfig setEnablePartition(boolean enablePartition) { + engineProperties.setProperty("enable_partition", String.valueOf(enablePartition)); + return this; + } + + @Override + public BaseConfig setUdfCollectorMemoryBudgetInMB(float udfCollectorMemoryBudgetInMB) { + // udf_memory_budget_in_mb + // udf_reader_transformer_collector_memory_proportion + engineProperties.setProperty( + "udf_memory_budget_in_mb", String.valueOf(udfCollectorMemoryBudgetInMB * 3)); + return this; + } + + @Override + public BaseConfig setUdfTransformerMemoryBudgetInMB(float udfTransformerMemoryBudgetInMB) { + engineProperties.setProperty( + "udf_memory_budget_in_mb", String.valueOf(udfTransformerMemoryBudgetInMB * 3)); + return this; + } + + @Override + public BaseConfig setUdfReaderMemoryBudgetInMB(float udfReaderMemoryBudgetInMB) { + engineProperties.setProperty( + "udf_memory_budget_in_mb", String.valueOf(udfReaderMemoryBudgetInMB * 3)); + return this; + } + + @Override + public BaseConfig setEnableSeqSpaceCompaction(boolean enableSeqSpaceCompaction) { + engineProperties.setProperty( + "enable_seq_space_compaction", String.valueOf(enableSeqSpaceCompaction)); + return this; + } + + @Override + public BaseConfig setEnableUnseqSpaceCompaction(boolean enableUnseqSpaceCompaction) { + engineProperties.setProperty( + "enable_unseq_space_compaction", String.valueOf(enableUnseqSpaceCompaction)); + return this; + } + + @Override + public BaseConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompaction) { + engineProperties.setProperty( + "enable_cross_space_compaction", String.valueOf(enableCrossSpaceCompaction)); + return this; + } + + @Override + public BaseConfig setEnableIDTable(boolean isEnableIDTable) { + engineProperties.setProperty("enable_id_table", String.valueOf(isEnableIDTable)); + return this; + } + + @Override + public BaseConfig setDeviceIDTransformationMethod(String deviceIDTransformationMethod) { + engineProperties.setProperty("device_id_transformation_method", deviceIDTransformationMethod); + return this; + } + + @Override + public BaseConfig setAutoCreateSchemaEnabled(boolean enableAutoCreateSchema) { + engineProperties.setProperty( + "enable_auto_create_schema", String.valueOf(enableAutoCreateSchema)); + return this; + } + + @Override + public BaseConfig setEnableLastCache(boolean lastCacheEnable) { + engineProperties.setProperty("enable_last_cache", String.valueOf(lastCacheEnable)); + return this; + } + + @Override + public BaseConfig setPrimitiveArraySize(int primitiveArraySize) { + engineProperties.setProperty("primitive_array_size", String.valueOf(primitiveArraySize)); + return this; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerConfig.java b/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerConfig.java new file mode 100644 index 000000000000..cfc3e58b28fb --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerConfig.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseConfig; + +public class RemoteServerConfig implements BaseConfig {} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerEnv.java new file mode 100644 index 000000000000..a9be62426f99 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/RemoteServerEnv.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.iotdb.itbase.env.BaseEnv; +import org.apache.iotdb.jdbc.Config; +import org.apache.iotdb.jdbc.Constant; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; + +import static org.apache.iotdb.jdbc.Config.VERSION; +import static org.junit.Assert.fail; + +public class RemoteServerEnv implements BaseEnv { + private String ip_addr = System.getProperty("RemoteIp", "127.0.0.1"); + private String port = System.getProperty("RemotePort", "6667"); + private String user = System.getProperty("RemoteUser", "root"); + private String password = System.getProperty("RemotePassword", "root"); + + @Override + public void initBeforeClass() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("SET STORAGE GROUP TO root.init;"); + statement.execute("DELETE STORAGE GROUP root;"); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Override + public void cleanAfterClass() {} + + @Override + public void initBeforeTest() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("SET STORAGE GROUP TO root.init;"); + statement.execute("DELETE STORAGE GROUP root;"); + } catch (Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + + @Override + public void cleanAfterTest() {} + + @Override + public Connection getConnection() throws SQLException { + Connection connection = null; + try { + Class.forName(Config.JDBC_DRIVER_NAME); + connection = + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + ip_addr + ":" + port, user, password); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + fail(); + } + return connection; + } + + @Override + public Connection getConnection(Constant.Version version) throws SQLException { + Connection connection = null; + try { + Class.forName(Config.JDBC_DRIVER_NAME); + connection = + DriverManager.getConnection( + Config.IOTDB_URL_PREFIX + + ip_addr + + ":" + + port + + "?" + + VERSION + + "=" + + version.toString(), + user, + password); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + fail(); + } + return connection; + } + + public void setTestMethodName(String testCaseName) { + // Do nothing + } + + @Override + public void dumpTestJVMSnapshot() { + // Do nothing + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneDataNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneDataNodeWrapper.java new file mode 100644 index 000000000000..5bf6de27e850 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneDataNodeWrapper.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.apache.commons.lang3.SystemUtils; + +import java.io.File; + +public class StandaloneDataNodeWrapper extends DataNodeWrapper { + + public StandaloneDataNodeWrapper( + String targetConfigNode, String testClassName, String testMethodName, int[] portList) { + super(targetConfigNode, testClassName, testMethodName, portList); + } + + @Override + protected String getStartScriptPath() { + String scriptName = "start-new-server.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "start-new-server.bat"; + } + return workDirFilePath("datanode" + File.separator + "sbin", scriptName); + } + + @Override + protected String getStopScriptPath() { + String scriptName = "stop-server.sh"; + if (SystemUtils.IS_OS_WINDOWS) { + scriptName = "stop-server.bat"; + } + return workDirFilePath("datanode" + File.separator + "sbin", scriptName); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneOnMppEnv.java b/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneOnMppEnv.java new file mode 100644 index 000000000000..810a03ba7219 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/it/env/StandaloneOnMppEnv.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.it.env; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; + +public class StandaloneOnMppEnv extends AbstractEnv { + + private static final Logger logger = LoggerFactory.getLogger(StandaloneOnMppEnv.class); + + private void initEnvironment() { + DataNodeWrapper dataNodeWrapper = + new StandaloneDataNodeWrapper( + null, super.getTestClassName(), super.getTestMethodName(), searchAvailablePorts()); + dataNodeWrapper.createDir(); + dataNodeWrapper.changeConfig(ConfigFactory.getConfig().getEngineProperties()); + dataNodeWrapper.start(); + super.dataNodeWrapperList = Collections.singletonList(dataNodeWrapper); + super.testWorking(); + } + + @Override + public void initBeforeClass() throws InterruptedException { + logger.debug("=======start init class======="); + initEnvironment(); + } + + @Override + public void initBeforeTest() { + logger.debug("=======start init test======="); + initEnvironment(); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/ClusterIT.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/ClusterIT.java new file mode 100644 index 000000000000..40d8fe5c8c13 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/ClusterIT.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.category; + +public interface ClusterIT {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/LocalStandaloneIT.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/LocalStandaloneIT.java new file mode 100644 index 000000000000..74a631aa87d4 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/LocalStandaloneIT.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.category; + +public interface LocalStandaloneIT {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/category/RemoteIT.java b/integration-test/src/main/java/org/apache/iotdb/itbase/category/RemoteIT.java new file mode 100644 index 000000000000..f303c0b2c448 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/category/RemoteIT.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.category; + +public interface RemoteIT {} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinAggregationFunctionEnum.java b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinAggregationFunctionEnum.java new file mode 100644 index 000000000000..1b6702c1507f --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinAggregationFunctionEnum.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.constant; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +public enum BuiltinAggregationFunctionEnum { + MIN_TIME("min_time"), + MAX_TIME("max_time"), + MAX_VALUE("max_value"), + MIN_VALUE("min_value"), + EXTREME("extreme"), + FIRST_VALUE("first_value"), + LAST_VALUE("last_value"), + COUNT("count"), + AVG("avg"), + SUM("sum"), + ; + + private final String functionName; + + BuiltinAggregationFunctionEnum(String functionName) { + this.functionName = functionName; + } + + public String getFunctionName() { + return functionName; + } + + private static final Set NATIVE_FUNCTION_NAMES = + new HashSet<>( + Arrays.stream(org.apache.iotdb.commons.udf.builtin.BuiltinAggregationFunction.values()) + .map(org.apache.iotdb.commons.udf.builtin.BuiltinAggregationFunction::getFunctionName) + .collect(Collectors.toList())); + + public static Set getNativeFunctionNames() { + return NATIVE_FUNCTION_NAMES; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinTimeSeriesGeneratingFunctionEnum.java b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinTimeSeriesGeneratingFunctionEnum.java new file mode 100644 index 000000000000..dd78d79f7c1f --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/BuiltinTimeSeriesGeneratingFunctionEnum.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.constant; + +/** All built-in UDFs need to register their function names and classes here. */ +public enum BuiltinTimeSeriesGeneratingFunctionEnum { + CONST("CONST"), + E("E"), + PI("PI"), + SIN("SIN"), + COS("COS"), + TAN("TAN"), + ASIN("ASIN"), + ACOS("ACOS"), + ATAN("ATAN"), + SINH("SINH"), + COSH("COSH"), + TANH("TANH"), + DEGREES("DEGREES"), + RADIANS("RADIANS"), + ABS("ABS"), + SIGN("SIGN"), + CEIL("CEIL"), + FLOOR("FLOOR"), + ROUND("ROUND"), + EXP("EXP"), + LN("LN"), + LOG10("LOG10"), + SQRT("SQRT"), + STRING_CONTAINS("STRING_CONTAINS"), + STRING_MATCHES("STRING_MATCHES"), + STRING_LENGTH("LENGTH"), + STRING_LOCATE("LOCATE"), + STRING_STARTS_WITH("STARTSWITH"), + STRING_ENDS_WITH("ENDSWITH"), + STRING_CONCAT("CONCAT"), + STRING_SUBSTR("SUBSTR"), + STRING_UPPER("UPPER"), + STRING_LOWER("LOWER"), + STRING_TRIM("TRIM"), + STRING_CMP("STRCMP"), + DIFFERENCE("DIFFERENCE"), + NON_NEGATIVE_DIFFERENCE("NON_NEGATIVE_DIFFERENCE"), + TIME_DIFFERENCE("TIME_DIFFERENCE"), + DERIVATIVE("DERIVATIVE"), + NON_NEGATIVE_DERIVATIVE("NON_NEGATIVE_DERIVATIVE"), + TOP_K("TOP_K"), + BOTTOM_K("BOTTOM_K"), + CAST("CAST"), + IN_RANGE("IN_RANGE"), + ON_OFF("ON_OFF"), + ZERO_DURATION("ZERO_DURATION"), + NON_ZERO_DURATION("NON_ZERO_DURATION"), + ZERO_COUNT("ZERO_COUNT"), + NON_ZERO_COUNT("NON_ZERO_COUNT"), + EQUAL_SIZE_BUCKET_RANDOM_SAMPLE("EQUAL_SIZE_BUCKET_RANDOM_SAMPLE"), + EQUAL_SIZE_BUCKET_AGG_SAMPLE("EQUAL_SIZE_BUCKET_AGG_SAMPLE"), + EQUAL_SIZE_BUCKET_M4_SAMPLE("EQUAL_SIZE_BUCKET_M4_SAMPLE"), + EQUAL_SIZE_BUCKET_OUTLIER_SAMPLE("EQUAL_SIZE_BUCKET_OUTLIER_SAMPLE"), + JEXL("JEXL"); + + private final String functionName; + + BuiltinTimeSeriesGeneratingFunctionEnum(String functionName) { + this.functionName = functionName; + } + + public String getFunctionName() { + return functionName; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/constant/TestConstant.java b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/TestConstant.java new file mode 100644 index 000000000000..29361299ad41 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/TestConstant.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.itbase.constant; + +import org.apache.iotdb.tsfile.utils.FilePathUtils; +import org.apache.iotdb.tsfile.write.record.TSRecord; +import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint; + +import java.io.File; + +public class TestConstant { + + public static final String BASE_OUTPUT_PATH = "target".concat(File.separator); + public static final String OUTPUT_DATA_DIR = + BASE_OUTPUT_PATH.concat("data").concat(File.separator); + public static final String PARTIAL_PATH_STRING = + "%s" + File.separator + "%d" + File.separator + "%d" + File.separator; + public static final String TEST_TSFILE_PATH = + BASE_OUTPUT_PATH + "testTsFile".concat(File.separator) + PARTIAL_PATH_STRING; + + public static final String d0 = "root.vehicle.d0"; + public static final String s0 = "s0"; + public static final String s1 = "s1"; + public static final String s2 = "s2"; + public static final String s3 = "s3"; + public static final String s4 = "s4"; + public static final String s5 = "s5"; + public static final String d1 = "root.vehicle.d1"; + public static final String TIMESTAMP_STR = "Time"; + public static boolean testFlag = true; + public static String[] stringValue = new String[] {"A", "B", "C", "D", "E"}; + public static String[] booleanValue = new String[] {"true", "false"}; + public static final String TIMESEIRES_STR = "timeseries"; + public static final String VALUE_STR = "value"; + public static final String DATA_TYPE_STR = "dataType"; + + public static String[] createSql = + new String[] { + "SET STORAGE GROUP TO root.vehicle", + "CREATE TIMESERIES root.vehicle.d0.s0 WITH DATATYPE=INT32, ENCODING=RLE", + "CREATE TIMESERIES root.vehicle.d0.s1 WITH DATATYPE=INT64, ENCODING=RLE", + "CREATE TIMESERIES root.vehicle.d0.s2 WITH DATATYPE=FLOAT, ENCODING=RLE", + "CREATE TIMESERIES root.vehicle.d0.s3 WITH DATATYPE=TEXT, ENCODING=PLAIN", + "CREATE TIMESERIES root.vehicle.d0.s4 WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", + "CREATE TIMESERIES root.vehicle.d0.s5 WITH DATATYPE=DOUBLE, ENCODING=RLE", + "CREATE TIMESERIES root.vehicle.d1.s0 WITH DATATYPE=INT32, ENCODING=RLE", + "CREATE TIMESERIES root.vehicle.d1.s1 WITH DATATYPE=INT64, ENCODING=RLE", + }; + + public static String insertTemplate = "insert into %s(timestamp%s) values(%d%s)"; + + public static String firstValue(String path) { + return String.format("first_value(%s)", path); + } + + public static String lastValue(String path) { + return String.format("last_value(%s)", path); + } + + public static String sum(String path) { + return String.format("sum(%s)", path); + } + + public static String avg(String path) { + return String.format("avg(%s)", path); + } + + public static String count(String path) { + return String.format("count(%s)", path); + } + + public static String maxTime(String path) { + return String.format("max_time(%s)", path); + } + + public static String minTime(String path) { + return String.format("min_time(%s)", path); + } + + public static String maxValue(String path) { + return String.format("max_value(%s)", path); + } + + public static String extreme(String path) { + return String.format("extreme(%s)", path); + } + + public static String minValue(String path) { + return String.format("min_value(%s)", path); + } + + public static String recordToInsert(TSRecord record) { + StringBuilder measurements = new StringBuilder(); + StringBuilder values = new StringBuilder(); + for (DataPoint dataPoint : record.dataPointList) { + measurements.append(",").append(dataPoint.getMeasurementId()); + values.append(",").append(dataPoint.getValue()); + } + return String.format(insertTemplate, record.deviceId, measurements, record.time, values); + } + + public static String getTestTsFilePath( + String logicalStorageGroupName, + long VirtualStorageGroupId, + long TimePartitionId, + long tsFileVersion) { + String filePath = + String.format( + TEST_TSFILE_PATH, logicalStorageGroupName, VirtualStorageGroupId, TimePartitionId); + String fileName = + System.currentTimeMillis() + + FilePathUtils.FILE_NAME_SEPARATOR + + tsFileVersion + + "-0-0.tsfile"; + return filePath.concat(fileName); + } + + public static String getTestTsFileDir( + String logicalStorageGroupName, long VirtualStorageGroupId, long TimePartitionId) { + return String.format( + TestConstant.TEST_TSFILE_PATH, + logicalStorageGroupName, + VirtualStorageGroupId, + TimePartitionId); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/constant/UDFTestConstant.java b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/UDFTestConstant.java new file mode 100644 index 000000000000..6613eed07755 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/constant/UDFTestConstant.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.constant; + +public class UDFTestConstant { + public static final String ACCESS_STRATEGY_KEY = "access"; + public static final String ACCESS_STRATEGY_ROW_BY_ROW = "row-by-row"; + public static final String ACCESS_STRATEGY_SLIDING_SIZE = "size"; + public static final String ACCESS_STRATEGY_SLIDING_TIME = "time"; + public static final String WINDOW_SIZE_KEY = "windowSize"; + public static final String TIME_INTERVAL_KEY = "timeInterval"; + public static final String SLIDING_STEP_KEY = "slidingStep"; + public static final String DISPLAY_WINDOW_BEGIN_KEY = "displayWindowBegin"; + public static final String DISPLAY_WINDOW_END_KEY = "displayWindowEnd"; +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseConfig.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseConfig.java new file mode 100644 index 000000000000..10cb4f365adb --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseConfig.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.env; + +import java.util.Properties; + +public interface BaseConfig { + + default void clearAllProperties() throws UnsupportedOperationException { + throw new UnsupportedOperationException("Method clearAllProperties not implement"); + } + + default Properties getEngineProperties() throws UnsupportedOperationException { + throw new UnsupportedOperationException("Method getEngineProperties not implement"); + } + + default Properties getConfignodeProperties() throws UnsupportedOperationException { + throw new UnsupportedOperationException("Method getConfignodeProperties not implement"); + } + + default BaseConfig setMaxNumberOfPointsInPage(int maxNumberOfPointsInPage) { + return this; + } + + default BaseConfig setPageSizeInByte(int pageSizeInByte) { + return this; + } + + default BaseConfig setGroupSizeInByte(int groupSizeInByte) { + return this; + } + + default BaseConfig setMemtableSizeThreshold(long memtableSizeThreshold) { + return this; + } + + default BaseConfig setDataRegionNum(int dataRegionNum) { + return this; + } + + default BaseConfig setPartitionInterval(long partitionInterval) { + return this; + } + + default long getPartitionInterval() { + return 604800; + } + + default BaseConfig setCompressor(String compressor) { + return this; + } + + default BaseConfig setMaxQueryDeduplicatedPathNum(int maxQueryDeduplicatedPathNum) { + return this; + } + + default BaseConfig setRpcThriftCompressionEnable(boolean rpcThriftCompressionEnable) { + return this; + } + + default BaseConfig setRpcAdvancedCompressionEnable(boolean rpcAdvancedCompressionEnable) { + return this; + } + + default BaseConfig setEnablePartition(boolean enablePartition) { + return this; + } + + default BaseConfig setUdfCollectorMemoryBudgetInMB(float udfCollectorMemoryBudgetInMB) { + return this; + } + + default BaseConfig setUdfTransformerMemoryBudgetInMB(float udfTransformerMemoryBudgetInMB) { + return this; + } + + default BaseConfig setUdfReaderMemoryBudgetInMB(float udfReaderMemoryBudgetInMB) { + return this; + } + + default BaseConfig setEnableSeqSpaceCompaction(boolean enableSeqSpaceCompaction) { + return this; + } + + default boolean isEnableSeqSpaceCompaction() { + return true; + } + + default BaseConfig setEnableUnseqSpaceCompaction(boolean enableUnseqSpaceCompaction) { + return this; + } + + default boolean isEnableUnseqSpaceCompaction() { + return true; + } + + default BaseConfig setEnableCrossSpaceCompaction(boolean enableCrossSpaceCompaction) { + return this; + } + + default boolean isEnableCrossSpaceCompaction() { + return true; + } + + default BaseConfig setEnableIDTable(boolean isEnableIDTable) { + return this; + } + + default BaseConfig setDeviceIDTransformationMethod(String deviceIDTransformationMethod) { + return this; + } + + default BaseConfig setAutoCreateSchemaEnabled(boolean enableAutoCreateSchema) { + return this; + } + + default BaseConfig setEnableLastCache(boolean lastCacheEnable) { + return this; + } + + default boolean isLastCacheEnabled() { + return true; + } + + default int getMaxNumberOfPointsInPage() { + return 1024 * 1024; + } + + default boolean isAutoCreateSchemaEnabled() { + return true; + } + + default BaseConfig setPrimitiveArraySize(int primitiveArraySize) { + return this; + } + + default int getPrimitiveArraySize() { + return 32; + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java new file mode 100644 index 000000000000..8e5650d44b26 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseEnv.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.env; + +import org.apache.iotdb.jdbc.Constant; + +import java.sql.Connection; +import java.sql.SQLException; + +public interface BaseEnv { + + void initBeforeClass() throws InterruptedException; + + void cleanAfterClass(); + + void initBeforeTest() throws InterruptedException; + + void cleanAfterTest(); + + Connection getConnection() throws SQLException; + + Connection getConnection(Constant.Version version) throws SQLException; + + void setTestMethodName(String testCaseName); + + void dumpTestJVMSnapshot(); +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseNodeWrapper.java b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseNodeWrapper.java new file mode 100644 index 000000000000..c1efb1f35112 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/env/BaseNodeWrapper.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.env; + +import java.util.Properties; + +public interface BaseNodeWrapper { + + void createDir(); + + void destroyDir(); + + void changeConfig(Properties properties); + + void start(); + + void stop(); + + void waitingToShutDown(); + + String getIp(); + + int getPort(); + + String getId(); + + String getIpAndPortString(); + + void dumpJVMSnapshot(String testCaseName); +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/exception/InconsistentDataException.java b/integration-test/src/main/java/org/apache/iotdb/itbase/exception/InconsistentDataException.java new file mode 100644 index 000000000000..51a29adb149e --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/exception/InconsistentDataException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.exception; + +import java.util.List; + +public class InconsistentDataException extends RuntimeException { + + public InconsistentDataException(List data, List endpoints) { + super(String.format("Datasets are inconsistent: %s with data %s", endpoints, data)); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/exception/ParallelRequestTimeoutException.java b/integration-test/src/main/java/org/apache/iotdb/itbase/exception/ParallelRequestTimeoutException.java new file mode 100644 index 000000000000..7c6537e7db07 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/exception/ParallelRequestTimeoutException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.exception; + +public class ParallelRequestTimeoutException extends RuntimeException { + public ParallelRequestTimeoutException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterResultSetMetaData.java b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterResultSetMetaData.java new file mode 100644 index 000000000000..b66b817a32e8 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterResultSetMetaData.java @@ -0,0 +1,239 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.runtime; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.List; + +/** The implementation of {@link ResultSetMetaData} in cluster test. */ +public class ClusterResultSetMetaData implements ResultSetMetaData { + + private final List metaDataList; + private final List endpoints; + + public ClusterResultSetMetaData(List metadataList, List endpoints) { + this.metaDataList = metadataList; + this.endpoints = endpoints; + } + + @Override + public int getColumnCount() throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(rs::getColumnCount); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isAutoIncrement(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isCaseSensitive(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isSearchable(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isSearchable(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isCurrency(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isCurrency(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int isNullable(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isNullable(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isSigned(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isSigned(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnDisplaySize(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getColumnLabel(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnLabel(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getColumnName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getSchemaName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getSchemaName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int getPrecision(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getPrecision(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int getScale(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getScale(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getTableName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getTableName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getCatalogName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getCatalogName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int getColumnType(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnType(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnTypeName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isReadOnly(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isWritable(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isWritable(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.isDefinitelyWritable(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getColumnClassName(int column) throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSetMetaData rs : metaDataList) { + delegate.addRequest(() -> rs.getColumnClassName(column)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public T unwrap(Class iface) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isWrapperFor(Class iface) { + throw new UnsupportedOperationException(); + } + + /** As all the ResultSetMetaData are on local, there's no need to request in parallel */ + private RequestDelegate createRequestDelegate() { + return new SerialRequestDelegate<>(endpoints); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestConnection.java b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestConnection.java new file mode 100644 index 000000000000..77a5136e696c --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestConnection.java @@ -0,0 +1,337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.runtime; + +import org.apache.commons.lang3.Validate; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +/** The implementation of {@link Connection} in cluster test. */ +public class ClusterTestConnection implements Connection { + + private final NodeConnection writeConnection; + private final List readConnections; + private boolean isClosed; + + public ClusterTestConnection( + NodeConnection writeConnection, List readConnections) { + Validate.notNull(readConnections); + this.writeConnection = writeConnection; + this.readConnections = readConnections; + } + + @Override + public Statement createStatement() throws SQLException { + return new ClusterTestStatement(writeConnection, readConnections); + } + + @Override + public PreparedStatement prepareStatement(String sql) { + throw new UnsupportedOperationException(); + } + + @Override + public CallableStatement prepareCall(String sql) { + throw new UnsupportedOperationException(); + } + + @Override + public String nativeSQL(String sql) { + throw new UnsupportedOperationException(); + } + + @Override + public void setAutoCommit(boolean autoCommit) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getAutoCommit() { + throw new UnsupportedOperationException(); + } + + @Override + public void commit() { + throw new UnsupportedOperationException(); + } + + @Override + public void rollback() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + writeConnection.close(); + for (NodeConnection conn : readConnections) { + conn.close(); + } + isClosed = true; + } + + @Override + public boolean isClosed() { + return isClosed; + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return writeConnection.getUnderlyingConnecton().getMetaData(); + } + + @Override + public void setReadOnly(boolean readOnly) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isReadOnly() { + throw new UnsupportedOperationException(); + } + + @Override + public void setCatalog(String catalog) { + throw new UnsupportedOperationException(); + } + + @Override + public String getCatalog() { + throw new UnsupportedOperationException(); + } + + @Override + public void setTransactionIsolation(int level) { + throw new UnsupportedOperationException(); + } + + @Override + public int getTransactionIsolation() { + throw new UnsupportedOperationException(); + } + + @Override + public SQLWarning getWarnings() { + throw new UnsupportedOperationException(); + } + + @Override + public void clearWarnings() { + throw new UnsupportedOperationException(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) { + throw new UnsupportedOperationException(); + } + + @Override + public PreparedStatement prepareStatement( + String sql, int resultSetType, int resultSetConcurrency) { + throw new UnsupportedOperationException(); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) { + throw new UnsupportedOperationException(); + } + + @Override + public Map> getTypeMap() { + throw new UnsupportedOperationException(); + } + + @Override + public void setTypeMap(Map> map) { + throw new UnsupportedOperationException(); + } + + @Override + public void setHoldability(int holdability) { + throw new UnsupportedOperationException(); + } + + @Override + public int getHoldability() { + throw new UnsupportedOperationException(); + } + + @Override + public Savepoint setSavepoint() { + throw new UnsupportedOperationException(); + } + + @Override + public Savepoint setSavepoint(String name) { + throw new UnsupportedOperationException(); + } + + @Override + public void rollback(Savepoint savepoint) { + throw new UnsupportedOperationException(); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) { + throw new UnsupportedOperationException(); + } + + @Override + public Statement createStatement( + int resultSetType, int resultSetConcurrency, int resultSetHoldability) { + throw new UnsupportedOperationException(); + } + + @Override + public PreparedStatement prepareStatement( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) { + throw new UnsupportedOperationException(); + } + + @Override + public CallableStatement prepareCall( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) { + throw new UnsupportedOperationException(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) { + throw new UnsupportedOperationException(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) { + throw new UnsupportedOperationException(); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) { + throw new UnsupportedOperationException(); + } + + @Override + public Clob createClob() { + throw new UnsupportedOperationException(); + } + + @Override + public Blob createBlob() { + throw new UnsupportedOperationException(); + } + + @Override + public NClob createNClob() { + throw new UnsupportedOperationException(); + } + + @Override + public SQLXML createSQLXML() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isValid(int timeout) { + throw new UnsupportedOperationException(); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + writeConnection.getUnderlyingConnecton().setClientInfo(name, value); + for (NodeConnection conn : readConnections) { + conn.getUnderlyingConnecton().setClientInfo(name, value); + } + } + + @Override + public void setClientInfo(Properties properties) { + throw new UnsupportedOperationException(); + } + + @Override + public String getClientInfo(String name) { + throw new UnsupportedOperationException(); + } + + @Override + public Properties getClientInfo() { + throw new UnsupportedOperationException(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) { + throw new UnsupportedOperationException(); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) { + throw new UnsupportedOperationException(); + } + + @Override + public void setSchema(String schema) { + throw new UnsupportedOperationException(); + } + + @Override + public String getSchema() { + throw new UnsupportedOperationException(); + } + + @Override + public void abort(Executor executor) { + throw new UnsupportedOperationException(); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) { + throw new UnsupportedOperationException(); + } + + @Override + public int getNetworkTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public T unwrap(Class iface) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isWrapperFor(Class iface) { + throw new UnsupportedOperationException(); + } +} diff --git a/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestResultSet.java b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestResultSet.java new file mode 100644 index 000000000000..e98ff7e7c144 --- /dev/null +++ b/integration-test/src/main/java/org/apache/iotdb/itbase/runtime/ClusterTestResultSet.java @@ -0,0 +1,1178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iotdb.itbase.runtime; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.List; +import java.util.Map; + +/** The implementation of {@link ClusterTestResultSet} in cluster test. */ +public class ClusterTestResultSet implements ResultSet { + + private final List resultSets; + private final List endpoints; + private final int queryTimeoutSeconds; + + public ClusterTestResultSet( + List statements, List endpoints, String sql, int queryTimeoutSeconds) + throws SQLException { + this.queryTimeoutSeconds = queryTimeoutSeconds; + this.endpoints = endpoints; + RequestDelegate delegate = createRequestDelegate(); + for (Statement st : statements) { + delegate.addRequest(() -> st.executeQuery(sql)); + } + resultSets = delegate.requestAll(); + } + + @Override + public boolean next() throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(rs::next); + } + return delegate.requestAllAndCompare(); + } + + @Override + public void close() throws SQLException { + RequestDelegate delegate = createRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest( + () -> { + rs.close(); + return null; + }); + } + delegate.requestAll(); + } + + @Override + public boolean wasNull() throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(rs::wasNull); + } + return delegate.requestAllAndCompare(); + } + + @Override + public String getString(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getString(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getBoolean(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getByte(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public short getShort(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getShort(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public int getInt(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getInt(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public long getLong(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getLong(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getFloat(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getDouble(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getBigDecimal(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getBytes(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + RequestDelegate delegate = createLocalRequestDelegate(); + for (ResultSet rs : resultSets) { + delegate.addRequest(() -> rs.getDate(columnIndex)); + } + return delegate.requestAllAndCompare(); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + RequestDelegate